You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by we...@apache.org on 2017/05/16 22:53:06 UTC

[01/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Repository: hive
Updated Branches:
  refs/heads/hive-14535 1ceaf357b -> 77511070d


http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/results/clientpositive/mm_conversions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_conversions.q.out b/ql/src/test/results/clientpositive/mm_conversions.q.out
index 2cfa06d..1610672 100644
--- a/ql/src/test/results/clientpositive/mm_conversions.q.out
+++ b/ql/src/test/results/clientpositive/mm_conversions.q.out
@@ -37,55 +37,55 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@intermediate@p=457
 POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: drop table simple_from_mm
+PREHOOK: query: drop table simple_from_mm1
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table simple_from_mm
+POSTHOOK: query: drop table simple_from_mm1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table simple_from_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: query: create table simple_from_mm1(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: create table simple_from_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: create table simple_from_mm1(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
-POSTHOOK: Output: default@simple_from_mm
-PREHOOK: query: insert into table simple_from_mm select key from intermediate
+POSTHOOK: Output: default@simple_from_mm1
+PREHOOK: query: insert into table simple_from_mm1 select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: insert into table simple_from_mm1 select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_from_mm
-POSTHOOK: Lineage: simple_from_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table simple_from_mm select key from intermediate
+POSTHOOK: Output: default@simple_from_mm1
+POSTHOOK: Lineage: simple_from_mm1.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: insert into table simple_from_mm1 select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: insert into table simple_from_mm1 select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_from_mm
-POSTHOOK: Lineage: simple_from_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from simple_from_mm s1 order by key
+POSTHOOK: Output: default@simple_from_mm1
+POSTHOOK: Lineage: simple_from_mm1.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from simple_from_mm1 s1 order by key
 PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
+PREHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s1 order by key
+POSTHOOK: query: select * from simple_from_mm1 s1 order by key
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
 0
 0
@@ -93,21 +93,21 @@ POSTHOOK: Input: default@simple_from_mm
 98
 100
 100
-PREHOOK: query: alter table simple_from_mm unset tblproperties('transactional_properties', 'transactional')
+PREHOOK: query: alter table simple_from_mm1 unset tblproperties('transactional_properties', 'transactional')
 PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@simple_from_mm
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: alter table simple_from_mm unset tblproperties('transactional_properties', 'transactional')
+PREHOOK: Input: default@simple_from_mm1
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: alter table simple_from_mm1 unset tblproperties('transactional_properties', 'transactional')
 POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@simple_from_mm
-POSTHOOK: Output: default@simple_from_mm
-PREHOOK: query: select * from simple_from_mm s2 order by key
+POSTHOOK: Input: default@simple_from_mm1
+POSTHOOK: Output: default@simple_from_mm1
+PREHOOK: query: select * from simple_from_mm1 s2 order by key
 PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
+PREHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s2 order by key
+POSTHOOK: query: select * from simple_from_mm1 s2 order by key
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
 0
 0
@@ -115,28 +115,28 @@ POSTHOOK: Input: default@simple_from_mm
 98
 100
 100
-PREHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: query: insert into table simple_from_mm1 select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: insert into table simple_from_mm1 select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_from_mm
-POSTHOOK: Lineage: simple_from_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from simple_from_mm s3 order by key
+POSTHOOK: Output: default@simple_from_mm1
+POSTHOOK: Lineage: simple_from_mm1.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from simple_from_mm1 s3 order by key
 PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
+PREHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s3 order by key
+POSTHOOK: query: select * from simple_from_mm1 s3 order by key
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
 0
 0
@@ -147,140 +147,132 @@ POSTHOOK: Input: default@simple_from_mm
 100
 100
 100
-PREHOOK: query: alter table simple_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@simple_from_mm
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: alter table simple_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@simple_from_mm
-POSTHOOK: Output: default@simple_from_mm
-PREHOOK: query: select * from simple_from_mm s4 order by key
+PREHOOK: query: drop table simple_from_mm1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@simple_from_mm1
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: drop table simple_from_mm1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@simple_from_mm1
+POSTHOOK: Output: default@simple_from_mm1
+PREHOOK: query: drop table simple_from_mm2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table simple_from_mm2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table simple_from_mm2(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: create table simple_from_mm2(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@simple_from_mm2
+PREHOOK: query: insert into table simple_from_mm2 select key from intermediate
 PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s4 order by key
+PREHOOK: Input: default@intermediate
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: insert into table simple_from_mm2 select key from intermediate
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
-#### A masked pattern was here ####
-0
-0
-0
-98
-98
-98
-100
-100
-100
-PREHOOK: query: insert into table simple_from_mm select key from intermediate
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+POSTHOOK: Output: default@simple_from_mm2
+POSTHOOK: Lineage: simple_from_mm2.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: insert into table simple_from_mm2 select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: insert into table simple_from_mm2 select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_from_mm
-POSTHOOK: Lineage: simple_from_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from simple_from_mm s5 order by key
+POSTHOOK: Output: default@simple_from_mm2
+POSTHOOK: Lineage: simple_from_mm2.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from simple_from_mm2 s1 order by key
 PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
+PREHOOK: Input: default@simple_from_mm2
 #### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s5 order by key
+POSTHOOK: query: select * from simple_from_mm2 s1 order by key
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm2
 #### A masked pattern was here ####
 0
 0
-0
-0
-98
-98
 98
 98
 100
 100
-100
-100
-PREHOOK: query: alter table simple_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false')
+PREHOOK: query: alter table simple_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false')
 PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@simple_from_mm
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: alter table simple_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false')
+PREHOOK: Input: default@simple_from_mm2
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: alter table simple_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false')
 POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@simple_from_mm
-POSTHOOK: Output: default@simple_from_mm
-PREHOOK: query: select * from simple_from_mm s6 order by key
+POSTHOOK: Input: default@simple_from_mm2
+POSTHOOK: Output: default@simple_from_mm2
+PREHOOK: query: select * from simple_from_mm2 s2 order by key
 PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
+PREHOOK: Input: default@simple_from_mm2
 #### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s6 order by key
+POSTHOOK: query: select * from simple_from_mm2 s2 order by key
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm2
 #### A masked pattern was here ####
 0
 0
-0
-0
 98
 98
-98
-98
-100
 100
 100
-100
-PREHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: query: insert into table simple_from_mm2 select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: insert into table simple_from_mm2 select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_from_mm
-POSTHOOK: Lineage: simple_from_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from simple_from_mm s7 order by key
+POSTHOOK: Output: default@simple_from_mm2
+POSTHOOK: Lineage: simple_from_mm2.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from simple_from_mm2 s3 order by key
 PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
+PREHOOK: Input: default@simple_from_mm2
 #### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s7 order by key
+POSTHOOK: query: select * from simple_from_mm2 s3 order by key
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm2
 #### A masked pattern was here ####
 0
 0
 0
-0
-0
-98
 98
 98
 98
-98
-100
 100
 100
 100
-100
-PREHOOK: query: drop table simple_from_mm
+PREHOOK: query: drop table simple_from_mm2
 PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@simple_from_mm
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: drop table simple_from_mm
+PREHOOK: Input: default@simple_from_mm2
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: drop table simple_from_mm2
 POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@simple_from_mm
-POSTHOOK: Output: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm2
+POSTHOOK: Output: default@simple_from_mm2
 PREHOOK: query: drop table simple_to_mm
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table simple_to_mm
@@ -308,21 +300,6 @@ POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
 POSTHOOK: Output: default@simple_to_mm
 POSTHOOK: Lineage: simple_to_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table simple_to_mm select key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_to_mm
-POSTHOOK: query: insert into table simple_to_mm select key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_to_mm
-POSTHOOK: Lineage: simple_to_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select * from simple_to_mm s1 order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@simple_to_mm
@@ -332,15 +309,13 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@simple_to_mm
 #### A masked pattern was here ####
 0
-0
-98
 98
 100
-100
 PREHOOK: query: alter table simple_to_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@simple_to_mm
 PREHOOK: Output: default@simple_to_mm
+FAILED: Error in acquiring locks: Transaction already opened. txnid:30
 POSTHOOK: query: alter table simple_to_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
 POSTHOOK: type: ALTERTABLE_PROPERTIES
 POSTHOOK: Input: default@simple_to_mm
@@ -354,11 +329,8 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@simple_to_mm
 #### A masked pattern was here ####
 0
-0
-98
 98
 100
-100
 PREHOOK: query: insert into table simple_to_mm select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
@@ -400,12 +372,9 @@ POSTHOOK: Input: default@simple_to_mm
 0
 0
 0
-0
 98
 98
 98
-98
-100
 100
 100
 100
@@ -417,74 +386,74 @@ POSTHOOK: query: drop table simple_to_mm
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@simple_to_mm
 POSTHOOK: Output: default@simple_to_mm
-PREHOOK: query: drop table part_from_mm
+PREHOOK: query: drop table part_from_mm1
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table part_from_mm
+POSTHOOK: query: drop table part_from_mm1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table part_from_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: query: create table part_from_mm1(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
-PREHOOK: Output: default@part_from_mm
-POSTHOOK: query: create table part_from_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: Output: default@part_from_mm1
+POSTHOOK: query: create table part_from_mm1(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part_from_mm
-PREHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+POSTHOOK: Output: default@part_from_mm1
+PREHOOK: query: insert into table part_from_mm1 partition(key_mm='455') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+PREHOOK: Output: default@part_from_mm1@key_mm=455
+POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='455') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+POSTHOOK: Output: default@part_from_mm1@key_mm=455
+POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: insert into table part_from_mm1 partition(key_mm='455') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+PREHOOK: Output: default@part_from_mm1@key_mm=455
+POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='455') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+POSTHOOK: Output: default@part_from_mm1@key_mm=455
+POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: insert into table part_from_mm1 partition(key_mm='456') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+PREHOOK: Output: default@part_from_mm1@key_mm=456
+POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='456') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from part_from_mm s1 order by key, key_mm
+POSTHOOK: Output: default@part_from_mm1@key_mm=456
+POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from part_from_mm1 s1 order by key, key_mm
 PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
+PREHOOK: Input: default@part_from_mm1
+PREHOOK: Input: default@part_from_mm1@key_mm=455
+PREHOOK: Input: default@part_from_mm1@key_mm=456
 #### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s1 order by key, key_mm
+POSTHOOK: query: select * from part_from_mm1 s1 order by key, key_mm
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
+POSTHOOK: Input: default@part_from_mm1
+POSTHOOK: Input: default@part_from_mm1@key_mm=455
+POSTHOOK: Input: default@part_from_mm1@key_mm=456
 #### A masked pattern was here ####
 0	455
 0	455
@@ -495,25 +464,25 @@ POSTHOOK: Input: default@part_from_mm@key_mm=456
 100	455
 100	455
 100	456
-PREHOOK: query: alter table part_from_mm unset tblproperties('transactional_properties', 'transactional')
+PREHOOK: query: alter table part_from_mm1 unset tblproperties('transactional_properties', 'transactional')
 PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Output: default@part_from_mm
-POSTHOOK: query: alter table part_from_mm unset tblproperties('transactional_properties', 'transactional')
+PREHOOK: Input: default@part_from_mm1
+PREHOOK: Output: default@part_from_mm1
+POSTHOOK: query: alter table part_from_mm1 unset tblproperties('transactional_properties', 'transactional')
 POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Output: default@part_from_mm
-PREHOOK: query: select * from part_from_mm s2 order by key, key_mm
+POSTHOOK: Input: default@part_from_mm1
+POSTHOOK: Output: default@part_from_mm1
+PREHOOK: query: select * from part_from_mm1 s2 order by key, key_mm
 PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
+PREHOOK: Input: default@part_from_mm1
+PREHOOK: Input: default@part_from_mm1@key_mm=455
+PREHOOK: Input: default@part_from_mm1@key_mm=456
 #### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s2 order by key, key_mm
+POSTHOOK: query: select * from part_from_mm1 s2 order by key, key_mm
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
+POSTHOOK: Input: default@part_from_mm1
+POSTHOOK: Input: default@part_from_mm1@key_mm=455
+POSTHOOK: Input: default@part_from_mm1@key_mm=456
 #### A masked pattern was here ####
 0	455
 0	455
@@ -524,49 +493,49 @@ POSTHOOK: Input: default@part_from_mm@key_mm=456
 100	455
 100	455
 100	456
-PREHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+PREHOOK: query: insert into table part_from_mm1 partition(key_mm='456') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+PREHOOK: Output: default@part_from_mm1@key_mm=456
+POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='456') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table part_from_mm partition(key_mm='457') select key from intermediate
+POSTHOOK: Output: default@part_from_mm1@key_mm=456
+POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: insert into table part_from_mm1 partition(key_mm='457') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=457
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='457') select key from intermediate
+PREHOOK: Output: default@part_from_mm1@key_mm=457
+POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='457') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=457
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from part_from_mm s3 order by key, key_mm
+POSTHOOK: Output: default@part_from_mm1@key_mm=457
+POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from part_from_mm1 s3 order by key, key_mm
 PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-PREHOOK: Input: default@part_from_mm@key_mm=457
+PREHOOK: Input: default@part_from_mm1
+PREHOOK: Input: default@part_from_mm1@key_mm=455
+PREHOOK: Input: default@part_from_mm1@key_mm=456
+PREHOOK: Input: default@part_from_mm1@key_mm=457
 #### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s3 order by key, key_mm
+POSTHOOK: query: select * from part_from_mm1 s3 order by key, key_mm
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-POSTHOOK: Input: default@part_from_mm@key_mm=457
+POSTHOOK: Input: default@part_from_mm1
+POSTHOOK: Input: default@part_from_mm1@key_mm=455
+POSTHOOK: Input: default@part_from_mm1@key_mm=456
+POSTHOOK: Input: default@part_from_mm1@key_mm=457
 #### A masked pattern was here ####
 0	455
 0	455
@@ -583,212 +552,148 @@ POSTHOOK: Input: default@part_from_mm@key_mm=457
 100	456
 100	456
 100	457
-PREHOOK: query: alter table part_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Output: default@part_from_mm
-POSTHOOK: query: alter table part_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Output: default@part_from_mm
-PREHOOK: query: select * from part_from_mm s4 order by key, key_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-PREHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s4 order by key, key_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-POSTHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-0	455
-0	455
-0	456
-0	456
-0	457
-98	455
-98	455
-98	456
-98	456
-98	457
-100	455
-100	455
-100	456
-100	456
-100	457
-PREHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+PREHOOK: query: drop table part_from_mm1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@part_from_mm1
+PREHOOK: Output: default@part_from_mm1
+POSTHOOK: query: drop table part_from_mm1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@part_from_mm1
+POSTHOOK: Output: default@part_from_mm1
+PREHOOK: query: drop table part_from_mm2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table part_from_mm2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table part_from_mm2(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@part_from_mm2
+POSTHOOK: query: create table part_from_mm2(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@part_from_mm2
+PREHOOK: query: insert into table part_from_mm2 partition(key_mm='456') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+PREHOOK: Output: default@part_from_mm2@key_mm=456
+POSTHOOK: query: insert into table part_from_mm2 partition(key_mm='456') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+POSTHOOK: Output: default@part_from_mm2@key_mm=456
+POSTHOOK: Lineage: part_from_mm2 PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: --fails here
+insert into table part_from_mm2 partition(key_mm='455') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+PREHOOK: Output: default@part_from_mm2@key_mm=455
+POSTHOOK: query: --fails here
+insert into table part_from_mm2 partition(key_mm='455') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from part_from_mm s5 order by key, key_mm
+POSTHOOK: Output: default@part_from_mm2@key_mm=455
+POSTHOOK: Lineage: part_from_mm2 PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from part_from_mm2 s1 order by key, key_mm
 PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-PREHOOK: Input: default@part_from_mm@key_mm=457
+PREHOOK: Input: default@part_from_mm2
+PREHOOK: Input: default@part_from_mm2@key_mm=455
+PREHOOK: Input: default@part_from_mm2@key_mm=456
 #### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s5 order by key, key_mm
+POSTHOOK: query: select * from part_from_mm2 s1 order by key, key_mm
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-POSTHOOK: Input: default@part_from_mm@key_mm=457
+POSTHOOK: Input: default@part_from_mm2
+POSTHOOK: Input: default@part_from_mm2@key_mm=455
+POSTHOOK: Input: default@part_from_mm2@key_mm=456
 #### A masked pattern was here ####
 0	455
-0	455
-0	455
-0	456
 0	456
-0	456
-0	457
 98	455
-98	455
-98	455
-98	456
-98	456
 98	456
-98	457
-100	455
-100	455
 100	455
 100	456
-100	456
-100	456
-100	457
-PREHOOK: query: alter table part_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false')
+PREHOOK: query: alter table part_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false')
 PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Output: default@part_from_mm
-POSTHOOK: query: alter table part_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false')
+PREHOOK: Input: default@part_from_mm2
+PREHOOK: Output: default@part_from_mm2
+POSTHOOK: query: alter table part_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false')
 POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Output: default@part_from_mm
-PREHOOK: query: select * from part_from_mm s6 order by key, key_mm
+POSTHOOK: Input: default@part_from_mm2
+POSTHOOK: Output: default@part_from_mm2
+PREHOOK: query: select * from part_from_mm2 s2 order by key, key_mm
 PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-PREHOOK: Input: default@part_from_mm@key_mm=457
+PREHOOK: Input: default@part_from_mm2
+PREHOOK: Input: default@part_from_mm2@key_mm=455
+PREHOOK: Input: default@part_from_mm2@key_mm=456
 #### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s6 order by key, key_mm
+POSTHOOK: query: select * from part_from_mm2 s2 order by key, key_mm
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-POSTHOOK: Input: default@part_from_mm@key_mm=457
+POSTHOOK: Input: default@part_from_mm2
+POSTHOOK: Input: default@part_from_mm2@key_mm=455
+POSTHOOK: Input: default@part_from_mm2@key_mm=456
 #### A masked pattern was here ####
 0	455
-0	455
-0	455
-0	456
 0	456
-0	456
-0	457
-98	455
 98	455
-98	455
-98	456
 98	456
-98	456
-98	457
-100	455
-100	455
 100	455
 100	456
-100	456
-100	456
-100	457
-PREHOOK: query: insert into table part_from_mm partition(key_mm='457') select key from intermediate
+PREHOOK: query: insert into table part_from_mm2 partition(key_mm='457') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=457
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='457') select key from intermediate
+PREHOOK: Output: default@part_from_mm2@key_mm=457
+POSTHOOK: query: insert into table part_from_mm2 partition(key_mm='457') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=457
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from part_from_mm s7 order by key, key_mm
+POSTHOOK: Output: default@part_from_mm2@key_mm=457
+POSTHOOK: Lineage: part_from_mm2 PARTITION(key_mm=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from part_from_mm2 s3 order by key, key_mm
 PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-PREHOOK: Input: default@part_from_mm@key_mm=457
+PREHOOK: Input: default@part_from_mm2
+PREHOOK: Input: default@part_from_mm2@key_mm=455
+PREHOOK: Input: default@part_from_mm2@key_mm=456
+PREHOOK: Input: default@part_from_mm2@key_mm=457
 #### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s7 order by key, key_mm
+POSTHOOK: query: select * from part_from_mm2 s3 order by key, key_mm
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-POSTHOOK: Input: default@part_from_mm@key_mm=457
+POSTHOOK: Input: default@part_from_mm2
+POSTHOOK: Input: default@part_from_mm2@key_mm=455
+POSTHOOK: Input: default@part_from_mm2@key_mm=456
+POSTHOOK: Input: default@part_from_mm2@key_mm=457
 #### A masked pattern was here ####
 0	455
-0	455
-0	455
-0	456
-0	456
 0	456
 0	457
-0	457
-98	455
 98	455
-98	455
-98	456
-98	456
 98	456
 98	457
-98	457
-100	455
 100	455
-100	455
-100	456
 100	456
-100	456
-100	457
 100	457
-PREHOOK: query: drop table part_from_mm
+PREHOOK: query: drop table part_from_mm2
 PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Output: default@part_from_mm
-POSTHOOK: query: drop table part_from_mm
+PREHOOK: Input: default@part_from_mm2
+PREHOOK: Output: default@part_from_mm2
+POSTHOOK: query: drop table part_from_mm2
 POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Output: default@part_from_mm
+POSTHOOK: Input: default@part_from_mm2
+POSTHOOK: Output: default@part_from_mm2
 PREHOOK: query: drop table part_to_mm
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table part_to_mm
@@ -853,6 +758,7 @@ PREHOOK: query: alter table part_to_mm set tblproperties("transactional"="true",
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@part_to_mm
 PREHOOK: Output: default@part_to_mm
+FAILED: Error in acquiring locks: Transaction already opened. txnid:63
 POSTHOOK: query: alter table part_to_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
 POSTHOOK: type: ALTERTABLE_PROPERTIES
 POSTHOOK: Input: default@part_to_mm

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/results/clientpositive/mm_exim.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_exim.q.out b/ql/src/test/results/clientpositive/mm_exim.q.out
new file mode 100644
index 0000000..6a6e549
--- /dev/null
+++ b/ql/src/test/results/clientpositive/mm_exim.q.out
@@ -0,0 +1,457 @@
+PREHOOK: query: drop table intermediate
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table intermediate
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@intermediate
+POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@intermediate
+PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@intermediate@p=455
+POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@intermediate@p=455
+POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@intermediate@p=456
+POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@intermediate@p=456
+POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@intermediate@p=457
+POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@intermediate@p=457
+POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: drop table intermediate_nonpart
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table intermediate_nonpart
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table intermmediate_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table intermmediate_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table intermmediate_nonpart
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table intermmediate_nonpart
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table intermediate_nonpart(key int, p int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@intermediate_nonpart
+POSTHOOK: query: create table intermediate_nonpart(key int, p int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@intermediate_nonpart
+PREHOOK: query: insert into intermediate_nonpart select * from intermediate
+PREHOOK: type: QUERY
+PREHOOK: Input: default@intermediate
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+PREHOOK: Output: default@intermediate_nonpart
+POSTHOOK: query: insert into intermediate_nonpart select * from intermediate
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+POSTHOOK: Output: default@intermediate_nonpart
+POSTHOOK: Lineage: intermediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: intermediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
+PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@intermmediate_nonpart
+POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@intermmediate_nonpart
+PREHOOK: query: insert into intermmediate_nonpart select * from intermediate
+PREHOOK: type: QUERY
+PREHOOK: Input: default@intermediate
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+PREHOOK: Output: default@intermmediate_nonpart
+POSTHOOK: query: insert into intermmediate_nonpart select * from intermediate
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+POSTHOOK: Output: default@intermmediate_nonpart
+POSTHOOK: Lineage: intermmediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: intermmediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
+PREHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@intermmediate
+POSTHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@intermmediate
+PREHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate
+PREHOOK: type: QUERY
+PREHOOK: Input: default@intermediate
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+PREHOOK: Output: default@intermmediate
+POSTHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+POSTHOOK: Output: default@intermmediate@p=455
+POSTHOOK: Output: default@intermmediate@p=456
+POSTHOOK: Output: default@intermmediate@p=457
+POSTHOOK: Lineage: intermmediate PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: intermmediate PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: intermmediate PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'
+PREHOOK: type: EXPORT
+PREHOOK: Input: default@intermediate_nonpart
+#### A masked pattern was here ####
+POSTHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'
+POSTHOOK: type: EXPORT
+POSTHOOK: Input: default@intermediate_nonpart
+#### A masked pattern was here ####
+PREHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'
+PREHOOK: type: EXPORT
+PREHOOK: Input: default@intermmediate_nonpart
+#### A masked pattern was here ####
+POSTHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'
+POSTHOOK: type: EXPORT
+POSTHOOK: Input: default@intermmediate_nonpart
+#### A masked pattern was here ####
+PREHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part'
+PREHOOK: type: EXPORT
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+#### A masked pattern was here ####
+POSTHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part'
+POSTHOOK: type: EXPORT
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+#### A masked pattern was here ####
+PREHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part'
+PREHOOK: type: EXPORT
+PREHOOK: Input: default@intermmediate@p=455
+PREHOOK: Input: default@intermmediate@p=456
+PREHOOK: Input: default@intermmediate@p=457
+#### A masked pattern was here ####
+POSTHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part'
+POSTHOOK: type: EXPORT
+POSTHOOK: Input: default@intermmediate@p=455
+POSTHOOK: Input: default@intermmediate@p=456
+POSTHOOK: Input: default@intermmediate@p=457
+#### A masked pattern was here ####
+PREHOOK: query: drop table intermediate_nonpart
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@intermediate_nonpart
+PREHOOK: Output: default@intermediate_nonpart
+POSTHOOK: query: drop table intermediate_nonpart
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@intermediate_nonpart
+POSTHOOK: Output: default@intermediate_nonpart
+PREHOOK: query: drop table intermmediate_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table intermmediate_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table intermmediate_nonpart
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@intermmediate_nonpart
+PREHOOK: Output: default@intermmediate_nonpart
+POSTHOOK: query: drop table intermmediate_nonpart
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@intermmediate_nonpart
+POSTHOOK: Output: default@intermmediate_nonpart
+PREHOOK: query: drop table import0_mm
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table import0_mm
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@import0_mm
+POSTHOOK: query: create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@import0_mm
+PREHOOK: query: import table import0_mm from 'ql/test/data/exports/intermediate_nonpart'
+PREHOOK: type: IMPORT
+#### A masked pattern was here ####
+PREHOOK: Output: default@import0_mm
+POSTHOOK: query: import table import0_mm from 'ql/test/data/exports/intermediate_nonpart'
+POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
+POSTHOOK: Output: default@import0_mm
+PREHOOK: query: select * from import0_mm order by key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@import0_mm
+#### A masked pattern was here ####
+POSTHOOK: query: select * from import0_mm order by key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@import0_mm
+#### A masked pattern was here ####
+0	456
+10	456
+97	455
+98	455
+100	457
+103	457
+PREHOOK: query: drop table import0_mm
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@import0_mm
+PREHOOK: Output: default@import0_mm
+POSTHOOK: query: drop table import0_mm
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@import0_mm
+POSTHOOK: Output: default@import0_mm
+PREHOOK: query: drop table import1_mm
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table import1_mm
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table import1_mm(key int) partitioned by (p int)
+  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@import1_mm
+POSTHOOK: query: create table import1_mm(key int) partitioned by (p int)
+  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@import1_mm
+PREHOOK: query: import table import1_mm from 'ql/test/data/exports/intermediate_part'
+PREHOOK: type: IMPORT
+#### A masked pattern was here ####
+PREHOOK: Output: default@import1_mm
+POSTHOOK: query: import table import1_mm from 'ql/test/data/exports/intermediate_part'
+POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
+POSTHOOK: Output: default@import1_mm
+POSTHOOK: Output: default@import1_mm@p=455
+POSTHOOK: Output: default@import1_mm@p=456
+POSTHOOK: Output: default@import1_mm@p=457
+PREHOOK: query: select * from import1_mm order by key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@import1_mm
+PREHOOK: Input: default@import1_mm@p=455
+PREHOOK: Input: default@import1_mm@p=456
+PREHOOK: Input: default@import1_mm@p=457
+#### A masked pattern was here ####
+POSTHOOK: query: select * from import1_mm order by key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@import1_mm
+POSTHOOK: Input: default@import1_mm@p=455
+POSTHOOK: Input: default@import1_mm@p=456
+POSTHOOK: Input: default@import1_mm@p=457
+#### A masked pattern was here ####
+0	456
+10	456
+97	455
+98	455
+100	457
+103	457
+PREHOOK: query: drop table import1_mm
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@import1_mm
+PREHOOK: Output: default@import1_mm
+POSTHOOK: query: drop table import1_mm
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@import1_mm
+POSTHOOK: Output: default@import1_mm
+PREHOOK: query: drop table import4_mm
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table import4_mm
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@import4_mm
+POSTHOOK: query: create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@import4_mm
+PREHOOK: query: import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart'
+PREHOOK: type: IMPORT
+#### A masked pattern was here ####
+PREHOOK: Output: default@import4_mm
+POSTHOOK: query: import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart'
+POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
+POSTHOOK: Output: default@import4_mm
+PREHOOK: query: select * from import4_mm order by key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@import4_mm
+#### A masked pattern was here ####
+POSTHOOK: query: select * from import4_mm order by key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@import4_mm
+#### A masked pattern was here ####
+0	456
+10	456
+97	455
+98	455
+100	457
+103	457
+PREHOOK: query: drop table import4_mm
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@import4_mm
+PREHOOK: Output: default@import4_mm
+POSTHOOK: query: drop table import4_mm
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@import4_mm
+POSTHOOK: Output: default@import4_mm
+PREHOOK: query: drop table import5_mm
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table import5_mm
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@import5_mm
+POSTHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@import5_mm
+PREHOOK: query: import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part'
+PREHOOK: type: IMPORT
+#### A masked pattern was here ####
+PREHOOK: Output: default@import5_mm
+POSTHOOK: query: import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part'
+POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
+POSTHOOK: Output: default@import5_mm
+POSTHOOK: Output: default@import5_mm@p=455
+PREHOOK: query: select * from import5_mm order by key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@import5_mm
+PREHOOK: Input: default@import5_mm@p=455
+#### A masked pattern was here ####
+POSTHOOK: query: select * from import5_mm order by key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@import5_mm
+POSTHOOK: Input: default@import5_mm@p=455
+#### A masked pattern was here ####
+97	455
+98	455
+PREHOOK: query: drop table import5_mm
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@import5_mm
+PREHOOK: Output: default@import5_mm
+POSTHOOK: query: drop table import5_mm
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@import5_mm
+POSTHOOK: Output: default@import5_mm
+PREHOOK: query: drop table import6_mm
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table import6_mm
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table import6_mm(key int, p int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@import6_mm
+POSTHOOK: query: create table import6_mm(key int, p int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@import6_mm
+PREHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart'
+PREHOOK: type: IMPORT
+#### A masked pattern was here ####
+PREHOOK: Output: default@import6_mm
+POSTHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart'
+POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
+POSTHOOK: Output: default@import6_mm
+PREHOOK: query: select * from import6_mm order by key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@import6_mm
+#### A masked pattern was here ####
+POSTHOOK: query: select * from import6_mm order by key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@import6_mm
+#### A masked pattern was here ####
+0	456
+10	456
+97	455
+98	455
+100	457
+103	457
+PREHOOK: query: drop table import6_mm
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@import6_mm
+PREHOOK: Output: default@import6_mm
+POSTHOOK: query: drop table import6_mm
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@import6_mm
+POSTHOOK: Output: default@import6_mm
+PREHOOK: query: drop table import7_mm
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table import7_mm
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table import7_mm(key int) partitioned by (p int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@import7_mm
+POSTHOOK: query: create table import7_mm(key int) partitioned by (p int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@import7_mm
+PREHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part'
+PREHOOK: type: IMPORT
+#### A masked pattern was here ####
+PREHOOK: Output: default@import7_mm
+POSTHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part'
+POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
+POSTHOOK: Output: default@import7_mm
+POSTHOOK: Output: default@import7_mm@p=455
+POSTHOOK: Output: default@import7_mm@p=456
+POSTHOOK: Output: default@import7_mm@p=457
+PREHOOK: query: select * from import7_mm order by key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@import7_mm
+PREHOOK: Input: default@import7_mm@p=455
+PREHOOK: Input: default@import7_mm@p=456
+PREHOOK: Input: default@import7_mm@p=457
+#### A masked pattern was here ####
+POSTHOOK: query: select * from import7_mm order by key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@import7_mm
+POSTHOOK: Input: default@import7_mm@p=455
+POSTHOOK: Input: default@import7_mm@p=456
+POSTHOOK: Input: default@import7_mm@p=457
+#### A masked pattern was here ####
+0	456
+10	456
+97	455
+98	455
+100	457
+103	457
+PREHOOK: query: drop table import7_mm
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@import7_mm
+PREHOOK: Output: default@import7_mm
+POSTHOOK: query: drop table import7_mm
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@import7_mm
+POSTHOOK: Output: default@import7_mm

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/results/clientpositive/mm_insertonly_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_insertonly_acid.q.out b/ql/src/test/results/clientpositive/mm_insertonly_acid.q.out
deleted file mode 100644
index 22bdc93..0000000
--- a/ql/src/test/results/clientpositive/mm_insertonly_acid.q.out
+++ /dev/null
@@ -1,115 +0,0 @@
-PREHOOK: query: drop table qtr_acid
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table qtr_acid
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table qtr_acid (key int) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@qtr_acid
-POSTHOOK: query: create table qtr_acid (key int) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@qtr_acid
-PREHOOK: query: insert into table qtr_acid partition(p='123') select distinct key from src where key > 0 order by key asc limit 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@qtr_acid@p=123
-POSTHOOK: query: insert into table qtr_acid partition(p='123') select distinct key from src where key > 0 order by key asc limit 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@qtr_acid@p=123
-POSTHOOK: Lineage: qtr_acid PARTITION(p=123).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: insert into table qtr_acid partition(p='456') select distinct key from src where key > 0 order by key desc limit 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@qtr_acid@p=456
-POSTHOOK: query: insert into table qtr_acid partition(p='456') select distinct key from src where key > 0 order by key desc limit 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@qtr_acid@p=456
-POSTHOOK: Lineage: qtr_acid PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: explain
-select * from qtr_acid order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select * from qtr_acid order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: qtr_acid
-            Statistics: Num rows: 20 Data size: 47 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), p (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 20 Data size: 47 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: int)
-                sort order: +
-                Statistics: Num rows: 20 Data size: 47 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col1 (type: int)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 20 Data size: 47 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 20 Data size: 47 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select * from qtr_acid order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@qtr_acid
-PREHOOK: Input: default@qtr_acid@p=123
-PREHOOK: Input: default@qtr_acid@p=456
-#### A masked pattern was here ####
-POSTHOOK: query: select * from qtr_acid order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@qtr_acid
-POSTHOOK: Input: default@qtr_acid@p=123
-POSTHOOK: Input: default@qtr_acid@p=456
-#### A masked pattern was here ####
-9	456
-10	123
-11	123
-85	456
-86	456
-87	456
-90	456
-92	456
-95	456
-96	456
-97	456
-98	456
-100	123
-103	123
-104	123
-105	123
-111	123
-113	123
-114	123
-116	123
-PREHOOK: query: drop table qtr_acid
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@qtr_acid
-PREHOOK: Output: default@qtr_acid
-POSTHOOK: query: drop table qtr_acid
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@qtr_acid
-POSTHOOK: Output: default@qtr_acid


[09/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 1de9056..8ee84af 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -1239,34 +1239,6 @@ class Iface(fb303.FacebookService.Iface):
     """
     pass
 
-  def get_next_write_id(self, req):
-    """
-    Parameters:
-     - req
-    """
-    pass
-
-  def finalize_write_id(self, req):
-    """
-    Parameters:
-     - req
-    """
-    pass
-
-  def heartbeat_write_id(self, req):
-    """
-    Parameters:
-     - req
-    """
-    pass
-
-  def get_valid_write_ids(self, req):
-    """
-    Parameters:
-     - req
-    """
-    pass
-
 
 class Client(fb303.FacebookService.Client, Iface):
   """
@@ -6832,130 +6804,6 @@ class Client(fb303.FacebookService.Client, Iface):
       return result.success
     raise TApplicationException(TApplicationException.MISSING_RESULT, "cache_file_metadata failed: unknown result")
 
-  def get_next_write_id(self, req):
-    """
-    Parameters:
-     - req
-    """
-    self.send_get_next_write_id(req)
-    return self.recv_get_next_write_id()
-
-  def send_get_next_write_id(self, req):
-    self._oprot.writeMessageBegin('get_next_write_id', TMessageType.CALL, self._seqid)
-    args = get_next_write_id_args()
-    args.req = req
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_get_next_write_id(self):
-    iprot = self._iprot
-    (fname, mtype, rseqid) = iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(iprot)
-      iprot.readMessageEnd()
-      raise x
-    result = get_next_write_id_result()
-    result.read(iprot)
-    iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_next_write_id failed: unknown result")
-
-  def finalize_write_id(self, req):
-    """
-    Parameters:
-     - req
-    """
-    self.send_finalize_write_id(req)
-    return self.recv_finalize_write_id()
-
-  def send_finalize_write_id(self, req):
-    self._oprot.writeMessageBegin('finalize_write_id', TMessageType.CALL, self._seqid)
-    args = finalize_write_id_args()
-    args.req = req
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_finalize_write_id(self):
-    iprot = self._iprot
-    (fname, mtype, rseqid) = iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(iprot)
-      iprot.readMessageEnd()
-      raise x
-    result = finalize_write_id_result()
-    result.read(iprot)
-    iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "finalize_write_id failed: unknown result")
-
-  def heartbeat_write_id(self, req):
-    """
-    Parameters:
-     - req
-    """
-    self.send_heartbeat_write_id(req)
-    return self.recv_heartbeat_write_id()
-
-  def send_heartbeat_write_id(self, req):
-    self._oprot.writeMessageBegin('heartbeat_write_id', TMessageType.CALL, self._seqid)
-    args = heartbeat_write_id_args()
-    args.req = req
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_heartbeat_write_id(self):
-    iprot = self._iprot
-    (fname, mtype, rseqid) = iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(iprot)
-      iprot.readMessageEnd()
-      raise x
-    result = heartbeat_write_id_result()
-    result.read(iprot)
-    iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "heartbeat_write_id failed: unknown result")
-
-  def get_valid_write_ids(self, req):
-    """
-    Parameters:
-     - req
-    """
-    self.send_get_valid_write_ids(req)
-    return self.recv_get_valid_write_ids()
-
-  def send_get_valid_write_ids(self, req):
-    self._oprot.writeMessageBegin('get_valid_write_ids', TMessageType.CALL, self._seqid)
-    args = get_valid_write_ids_args()
-    args.req = req
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_get_valid_write_ids(self):
-    iprot = self._iprot
-    (fname, mtype, rseqid) = iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(iprot)
-      iprot.readMessageEnd()
-      raise x
-    result = get_valid_write_ids_result()
-    result.read(iprot)
-    iprot.readMessageEnd()
-    if result.success is not None:
-      return result.success
-    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_valid_write_ids failed: unknown result")
-
 
 class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
   def __init__(self, handler):
@@ -7114,10 +6962,6 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     self._processMap["put_file_metadata"] = Processor.process_put_file_metadata
     self._processMap["clear_file_metadata"] = Processor.process_clear_file_metadata
     self._processMap["cache_file_metadata"] = Processor.process_cache_file_metadata
-    self._processMap["get_next_write_id"] = Processor.process_get_next_write_id
-    self._processMap["finalize_write_id"] = Processor.process_finalize_write_id
-    self._processMap["heartbeat_write_id"] = Processor.process_heartbeat_write_id
-    self._processMap["get_valid_write_ids"] = Processor.process_get_valid_write_ids
 
   def process(self, iprot, oprot):
     (name, type, seqid) = iprot.readMessageBegin()
@@ -10888,82 +10732,6 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
-  def process_get_next_write_id(self, seqid, iprot, oprot):
-    args = get_next_write_id_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = get_next_write_id_result()
-    try:
-      result.success = self._handler.get_next_write_id(args.req)
-      msg_type = TMessageType.REPLY
-    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
-      raise
-    except Exception as ex:
-      msg_type = TMessageType.EXCEPTION
-      logging.exception(ex)
-      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("get_next_write_id", msg_type, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_finalize_write_id(self, seqid, iprot, oprot):
-    args = finalize_write_id_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = finalize_write_id_result()
-    try:
-      result.success = self._handler.finalize_write_id(args.req)
-      msg_type = TMessageType.REPLY
-    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
-      raise
-    except Exception as ex:
-      msg_type = TMessageType.EXCEPTION
-      logging.exception(ex)
-      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("finalize_write_id", msg_type, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_heartbeat_write_id(self, seqid, iprot, oprot):
-    args = heartbeat_write_id_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = heartbeat_write_id_result()
-    try:
-      result.success = self._handler.heartbeat_write_id(args.req)
-      msg_type = TMessageType.REPLY
-    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
-      raise
-    except Exception as ex:
-      msg_type = TMessageType.EXCEPTION
-      logging.exception(ex)
-      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("heartbeat_write_id", msg_type, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
-  def process_get_valid_write_ids(self, seqid, iprot, oprot):
-    args = get_valid_write_ids_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = get_valid_write_ids_result()
-    try:
-      result.success = self._handler.get_valid_write_ids(args.req)
-      msg_type = TMessageType.REPLY
-    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
-      raise
-    except Exception as ex:
-      msg_type = TMessageType.EXCEPTION
-      logging.exception(ex)
-      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("get_valid_write_ids", msg_type, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
 
 # HELPER FUNCTIONS AND STRUCTURES
 
@@ -11850,10 +11618,10 @@ class get_databases_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype604, _size601) = iprot.readListBegin()
-          for _i605 in xrange(_size601):
-            _elem606 = iprot.readString()
-            self.success.append(_elem606)
+          (_etype597, _size594) = iprot.readListBegin()
+          for _i598 in xrange(_size594):
+            _elem599 = iprot.readString()
+            self.success.append(_elem599)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -11876,8 +11644,8 @@ class get_databases_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter607 in self.success:
-        oprot.writeString(iter607)
+      for iter600 in self.success:
+        oprot.writeString(iter600)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -11982,10 +11750,10 @@ class get_all_databases_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype611, _size608) = iprot.readListBegin()
-          for _i612 in xrange(_size608):
-            _elem613 = iprot.readString()
-            self.success.append(_elem613)
+          (_etype604, _size601) = iprot.readListBegin()
+          for _i605 in xrange(_size601):
+            _elem606 = iprot.readString()
+            self.success.append(_elem606)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12008,8 +11776,8 @@ class get_all_databases_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter614 in self.success:
-        oprot.writeString(iter614)
+      for iter607 in self.success:
+        oprot.writeString(iter607)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -12779,12 +12547,12 @@ class get_type_all_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype616, _vtype617, _size615 ) = iprot.readMapBegin()
-          for _i619 in xrange(_size615):
-            _key620 = iprot.readString()
-            _val621 = Type()
-            _val621.read(iprot)
-            self.success[_key620] = _val621
+          (_ktype609, _vtype610, _size608 ) = iprot.readMapBegin()
+          for _i612 in xrange(_size608):
+            _key613 = iprot.readString()
+            _val614 = Type()
+            _val614.read(iprot)
+            self.success[_key613] = _val614
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -12807,9 +12575,9 @@ class get_type_all_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
-      for kiter622,viter623 in self.success.items():
-        oprot.writeString(kiter622)
-        viter623.write(oprot)
+      for kiter615,viter616 in self.success.items():
+        oprot.writeString(kiter615)
+        viter616.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -12952,11 +12720,11 @@ class get_fields_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype627, _size624) = iprot.readListBegin()
-          for _i628 in xrange(_size624):
-            _elem629 = FieldSchema()
-            _elem629.read(iprot)
-            self.success.append(_elem629)
+          (_etype620, _size617) = iprot.readListBegin()
+          for _i621 in xrange(_size617):
+            _elem622 = FieldSchema()
+            _elem622.read(iprot)
+            self.success.append(_elem622)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12991,8 +12759,8 @@ class get_fields_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter630 in self.success:
-        iter630.write(oprot)
+      for iter623 in self.success:
+        iter623.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -13159,11 +12927,11 @@ class get_fields_with_environment_context_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype634, _size631) = iprot.readListBegin()
-          for _i635 in xrange(_size631):
-            _elem636 = FieldSchema()
-            _elem636.read(iprot)
-            self.success.append(_elem636)
+          (_etype627, _size624) = iprot.readListBegin()
+          for _i628 in xrange(_size624):
+            _elem629 = FieldSchema()
+            _elem629.read(iprot)
+            self.success.append(_elem629)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13198,8 +12966,8 @@ class get_fields_with_environment_context_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter637 in self.success:
-        iter637.write(oprot)
+      for iter630 in self.success:
+        iter630.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -13352,11 +13120,11 @@ class get_schema_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype641, _size638) = iprot.readListBegin()
-          for _i642 in xrange(_size638):
-            _elem643 = FieldSchema()
-            _elem643.read(iprot)
-            self.success.append(_elem643)
+          (_etype634, _size631) = iprot.readListBegin()
+          for _i635 in xrange(_size631):
+            _elem636 = FieldSchema()
+            _elem636.read(iprot)
+            self.success.append(_elem636)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13391,8 +13159,8 @@ class get_schema_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter644 in self.success:
-        iter644.write(oprot)
+      for iter637 in self.success:
+        iter637.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -13559,11 +13327,11 @@ class get_schema_with_environment_context_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype648, _size645) = iprot.readListBegin()
-          for _i649 in xrange(_size645):
-            _elem650 = FieldSchema()
-            _elem650.read(iprot)
-            self.success.append(_elem650)
+          (_etype641, _size638) = iprot.readListBegin()
+          for _i642 in xrange(_size638):
+            _elem643 = FieldSchema()
+            _elem643.read(iprot)
+            self.success.append(_elem643)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13598,8 +13366,8 @@ class get_schema_with_environment_context_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter651 in self.success:
-        iter651.write(oprot)
+      for iter644 in self.success:
+        iter644.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -14040,22 +13808,22 @@ class create_table_with_constraints_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.primaryKeys = []
-          (_etype655, _size652) = iprot.readListBegin()
-          for _i656 in xrange(_size652):
-            _elem657 = SQLPrimaryKey()
-            _elem657.read(iprot)
-            self.primaryKeys.append(_elem657)
+          (_etype648, _size645) = iprot.readListBegin()
+          for _i649 in xrange(_size645):
+            _elem650 = SQLPrimaryKey()
+            _elem650.read(iprot)
+            self.primaryKeys.append(_elem650)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.LIST:
           self.foreignKeys = []
-          (_etype661, _size658) = iprot.readListBegin()
-          for _i662 in xrange(_size658):
-            _elem663 = SQLForeignKey()
-            _elem663.read(iprot)
-            self.foreignKeys.append(_elem663)
+          (_etype654, _size651) = iprot.readListBegin()
+          for _i655 in xrange(_size651):
+            _elem656 = SQLForeignKey()
+            _elem656.read(iprot)
+            self.foreignKeys.append(_elem656)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -14076,15 +13844,15 @@ class create_table_with_constraints_args:
     if self.primaryKeys is not None:
       oprot.writeFieldBegin('primaryKeys', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys))
-      for iter664 in self.primaryKeys:
-        iter664.write(oprot)
+      for iter657 in self.primaryKeys:
+        iter657.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.foreignKeys is not None:
       oprot.writeFieldBegin('foreignKeys', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys))
-      for iter665 in self.foreignKeys:
-        iter665.write(oprot)
+      for iter658 in self.foreignKeys:
+        iter658.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -15056,10 +14824,10 @@ class truncate_table_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.partNames = []
-          (_etype669, _size666) = iprot.readListBegin()
-          for _i670 in xrange(_size666):
-            _elem671 = iprot.readString()
-            self.partNames.append(_elem671)
+          (_etype662, _size659) = iprot.readListBegin()
+          for _i663 in xrange(_size659):
+            _elem664 = iprot.readString()
+            self.partNames.append(_elem664)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15084,8 +14852,8 @@ class truncate_table_args:
     if self.partNames is not None:
       oprot.writeFieldBegin('partNames', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.partNames))
-      for iter672 in self.partNames:
-        oprot.writeString(iter672)
+      for iter665 in self.partNames:
+        oprot.writeString(iter665)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -15285,10 +15053,10 @@ class get_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype676, _size673) = iprot.readListBegin()
-          for _i677 in xrange(_size673):
-            _elem678 = iprot.readString()
-            self.success.append(_elem678)
+          (_etype669, _size666) = iprot.readListBegin()
+          for _i670 in xrange(_size666):
+            _elem671 = iprot.readString()
+            self.success.append(_elem671)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15311,8 +15079,8 @@ class get_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter679 in self.success:
-        oprot.writeString(iter679)
+      for iter672 in self.success:
+        oprot.writeString(iter672)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -15462,10 +15230,10 @@ class get_tables_by_type_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype683, _size680) = iprot.readListBegin()
-          for _i684 in xrange(_size680):
-            _elem685 = iprot.readString()
-            self.success.append(_elem685)
+          (_etype676, _size673) = iprot.readListBegin()
+          for _i677 in xrange(_size673):
+            _elem678 = iprot.readString()
+            self.success.append(_elem678)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15488,8 +15256,8 @@ class get_tables_by_type_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter686 in self.success:
-        oprot.writeString(iter686)
+      for iter679 in self.success:
+        oprot.writeString(iter679)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -15562,10 +15330,10 @@ class get_table_meta_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.tbl_types = []
-          (_etype690, _size687) = iprot.readListBegin()
-          for _i691 in xrange(_size687):
-            _elem692 = iprot.readString()
-            self.tbl_types.append(_elem692)
+          (_etype683, _size680) = iprot.readListBegin()
+          for _i684 in xrange(_size680):
+            _elem685 = iprot.readString()
+            self.tbl_types.append(_elem685)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15590,8 +15358,8 @@ class get_table_meta_args:
     if self.tbl_types is not None:
       oprot.writeFieldBegin('tbl_types', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.tbl_types))
-      for iter693 in self.tbl_types:
-        oprot.writeString(iter693)
+      for iter686 in self.tbl_types:
+        oprot.writeString(iter686)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -15647,11 +15415,11 @@ class get_table_meta_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype697, _size694) = iprot.readListBegin()
-          for _i698 in xrange(_size694):
-            _elem699 = TableMeta()
-            _elem699.read(iprot)
-            self.success.append(_elem699)
+          (_etype690, _size687) = iprot.readListBegin()
+          for _i691 in xrange(_size687):
+            _elem692 = TableMeta()
+            _elem692.read(iprot)
+            self.success.append(_elem692)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15674,8 +15442,8 @@ class get_table_meta_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter700 in self.success:
-        iter700.write(oprot)
+      for iter693 in self.success:
+        iter693.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -15799,10 +15567,10 @@ class get_all_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype704, _size701) = iprot.readListBegin()
-          for _i705 in xrange(_size701):
-            _elem706 = iprot.readString()
-            self.success.append(_elem706)
+          (_etype697, _size694) = iprot.readListBegin()
+          for _i698 in xrange(_size694):
+            _elem699 = iprot.readString()
+            self.success.append(_elem699)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15825,8 +15593,8 @@ class get_all_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter707 in self.success:
-        oprot.writeString(iter707)
+      for iter700 in self.success:
+        oprot.writeString(iter700)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -16062,10 +15830,10 @@ class get_table_objects_by_name_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.tbl_names = []
-          (_etype711, _size708) = iprot.readListBegin()
-          for _i712 in xrange(_size708):
-            _elem713 = iprot.readString()
-            self.tbl_names.append(_elem713)
+          (_etype704, _size701) = iprot.readListBegin()
+          for _i705 in xrange(_size701):
+            _elem706 = iprot.readString()
+            self.tbl_names.append(_elem706)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16086,8 +15854,8 @@ class get_table_objects_by_name_args:
     if self.tbl_names is not None:
       oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.tbl_names))
-      for iter714 in self.tbl_names:
-        oprot.writeString(iter714)
+      for iter707 in self.tbl_names:
+        oprot.writeString(iter707)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -16139,11 +15907,11 @@ class get_table_objects_by_name_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype718, _size715) = iprot.readListBegin()
-          for _i719 in xrange(_size715):
-            _elem720 = Table()
-            _elem720.read(iprot)
-            self.success.append(_elem720)
+          (_etype711, _size708) = iprot.readListBegin()
+          for _i712 in xrange(_size708):
+            _elem713 = Table()
+            _elem713.read(iprot)
+            self.success.append(_elem713)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16160,8 +15928,8 @@ class get_table_objects_by_name_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter721 in self.success:
-        iter721.write(oprot)
+      for iter714 in self.success:
+        iter714.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -16644,10 +16412,10 @@ class get_table_names_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype725, _size722) = iprot.readListBegin()
-          for _i726 in xrange(_size722):
-            _elem727 = iprot.readString()
-            self.success.append(_elem727)
+          (_etype718, _size715) = iprot.readListBegin()
+          for _i719 in xrange(_size715):
+            _elem720 = iprot.readString()
+            self.success.append(_elem720)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16682,8 +16450,8 @@ class get_table_names_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter728 in self.success:
-        oprot.writeString(iter728)
+      for iter721 in self.success:
+        oprot.writeString(iter721)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17653,11 +17421,11 @@ class add_partitions_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype732, _size729) = iprot.readListBegin()
-          for _i733 in xrange(_size729):
-            _elem734 = Partition()
-            _elem734.read(iprot)
-            self.new_parts.append(_elem734)
+          (_etype725, _size722) = iprot.readListBegin()
+          for _i726 in xrange(_size722):
+            _elem727 = Partition()
+            _elem727.read(iprot)
+            self.new_parts.append(_elem727)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17674,8 +17442,8 @@ class add_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter735 in self.new_parts:
-        iter735.write(oprot)
+      for iter728 in self.new_parts:
+        iter728.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -17833,11 +17601,11 @@ class add_partitions_pspec_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype739, _size736) = iprot.readListBegin()
-          for _i740 in xrange(_size736):
-            _elem741 = PartitionSpec()
-            _elem741.read(iprot)
-            self.new_parts.append(_elem741)
+          (_etype732, _size729) = iprot.readListBegin()
+          for _i733 in xrange(_size729):
+            _elem734 = PartitionSpec()
+            _elem734.read(iprot)
+            self.new_parts.append(_elem734)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17854,8 +17622,8 @@ class add_partitions_pspec_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter742 in self.new_parts:
-        iter742.write(oprot)
+      for iter735 in self.new_parts:
+        iter735.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -18029,10 +17797,10 @@ class append_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype746, _size743) = iprot.readListBegin()
-          for _i747 in xrange(_size743):
-            _elem748 = iprot.readString()
-            self.part_vals.append(_elem748)
+          (_etype739, _size736) = iprot.readListBegin()
+          for _i740 in xrange(_size736):
+            _elem741 = iprot.readString()
+            self.part_vals.append(_elem741)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18057,8 +17825,8 @@ class append_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter749 in self.part_vals:
-        oprot.writeString(iter749)
+      for iter742 in self.part_vals:
+        oprot.writeString(iter742)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -18411,10 +18179,10 @@ class append_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype753, _size750) = iprot.readListBegin()
-          for _i754 in xrange(_size750):
-            _elem755 = iprot.readString()
-            self.part_vals.append(_elem755)
+          (_etype746, _size743) = iprot.readListBegin()
+          for _i747 in xrange(_size743):
+            _elem748 = iprot.readString()
+            self.part_vals.append(_elem748)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18445,8 +18213,8 @@ class append_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter756 in self.part_vals:
-        oprot.writeString(iter756)
+      for iter749 in self.part_vals:
+        oprot.writeString(iter749)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -19041,10 +18809,10 @@ class drop_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype760, _size757) = iprot.readListBegin()
-          for _i761 in xrange(_size757):
-            _elem762 = iprot.readString()
-            self.part_vals.append(_elem762)
+          (_etype753, _size750) = iprot.readListBegin()
+          for _i754 in xrange(_size750):
+            _elem755 = iprot.readString()
+            self.part_vals.append(_elem755)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19074,8 +18842,8 @@ class drop_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter763 in self.part_vals:
-        oprot.writeString(iter763)
+      for iter756 in self.part_vals:
+        oprot.writeString(iter756)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -19248,10 +19016,10 @@ class drop_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype767, _size764) = iprot.readListBegin()
-          for _i768 in xrange(_size764):
-            _elem769 = iprot.readString()
-            self.part_vals.append(_elem769)
+          (_etype760, _size757) = iprot.readListBegin()
+          for _i761 in xrange(_size757):
+            _elem762 = iprot.readString()
+            self.part_vals.append(_elem762)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19287,8 +19055,8 @@ class drop_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter770 in self.part_vals:
-        oprot.writeString(iter770)
+      for iter763 in self.part_vals:
+        oprot.writeString(iter763)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -20025,10 +19793,10 @@ class get_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype774, _size771) = iprot.readListBegin()
-          for _i775 in xrange(_size771):
-            _elem776 = iprot.readString()
-            self.part_vals.append(_elem776)
+          (_etype767, _size764) = iprot.readListBegin()
+          for _i768 in xrange(_size764):
+            _elem769 = iprot.readString()
+            self.part_vals.append(_elem769)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20053,8 +19821,8 @@ class get_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter777 in self.part_vals:
-        oprot.writeString(iter777)
+      for iter770 in self.part_vals:
+        oprot.writeString(iter770)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20213,11 +19981,11 @@ class exchange_partition_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype779, _vtype780, _size778 ) = iprot.readMapBegin()
-          for _i782 in xrange(_size778):
-            _key783 = iprot.readString()
-            _val784 = iprot.readString()
-            self.partitionSpecs[_key783] = _val784
+          (_ktype772, _vtype773, _size771 ) = iprot.readMapBegin()
+          for _i775 in xrange(_size771):
+            _key776 = iprot.readString()
+            _val777 = iprot.readString()
+            self.partitionSpecs[_key776] = _val777
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -20254,9 +20022,9 @@ class exchange_partition_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter785,viter786 in self.partitionSpecs.items():
-        oprot.writeString(kiter785)
-        oprot.writeString(viter786)
+      for kiter778,viter779 in self.partitionSpecs.items():
+        oprot.writeString(kiter778)
+        oprot.writeString(viter779)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -20461,11 +20229,11 @@ class exchange_partitions_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype788, _vtype789, _size787 ) = iprot.readMapBegin()
-          for _i791 in xrange(_size787):
-            _key792 = iprot.readString()
-            _val793 = iprot.readString()
-            self.partitionSpecs[_key792] = _val793
+          (_ktype781, _vtype782, _size780 ) = iprot.readMapBegin()
+          for _i784 in xrange(_size780):
+            _key785 = iprot.readString()
+            _val786 = iprot.readString()
+            self.partitionSpecs[_key785] = _val786
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -20502,9 +20270,9 @@ class exchange_partitions_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter794,viter795 in self.partitionSpecs.items():
-        oprot.writeString(kiter794)
-        oprot.writeString(viter795)
+      for kiter787,viter788 in self.partitionSpecs.items():
+        oprot.writeString(kiter787)
+        oprot.writeString(viter788)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -20587,11 +20355,11 @@ class exchange_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype799, _size796) = iprot.readListBegin()
-          for _i800 in xrange(_size796):
-            _elem801 = Partition()
-            _elem801.read(iprot)
-            self.success.append(_elem801)
+          (_etype792, _size789) = iprot.readListBegin()
+          for _i793 in xrange(_size789):
+            _elem794 = Partition()
+            _elem794.read(iprot)
+            self.success.append(_elem794)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20632,8 +20400,8 @@ class exchange_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter802 in self.success:
-        iter802.write(oprot)
+      for iter795 in self.success:
+        iter795.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -20727,10 +20495,10 @@ class get_partition_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype806, _size803) = iprot.readListBegin()
-          for _i807 in xrange(_size803):
-            _elem808 = iprot.readString()
-            self.part_vals.append(_elem808)
+          (_etype799, _size796) = iprot.readListBegin()
+          for _i800 in xrange(_size796):
+            _elem801 = iprot.readString()
+            self.part_vals.append(_elem801)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20742,10 +20510,10 @@ class get_partition_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype812, _size809) = iprot.readListBegin()
-          for _i813 in xrange(_size809):
-            _elem814 = iprot.readString()
-            self.group_names.append(_elem814)
+          (_etype805, _size802) = iprot.readListBegin()
+          for _i806 in xrange(_size802):
+            _elem807 = iprot.readString()
+            self.group_names.append(_elem807)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20770,8 +20538,8 @@ class get_partition_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter815 in self.part_vals:
-        oprot.writeString(iter815)
+      for iter808 in self.part_vals:
+        oprot.writeString(iter808)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.user_name is not None:
@@ -20781,8 +20549,8 @@ class get_partition_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter816 in self.group_names:
-        oprot.writeString(iter816)
+      for iter809 in self.group_names:
+        oprot.writeString(iter809)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -21211,11 +20979,11 @@ class get_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype820, _size817) = iprot.readListBegin()
-          for _i821 in xrange(_size817):
-            _elem822 = Partition()
-            _elem822.read(iprot)
-            self.success.append(_elem822)
+          (_etype813, _size810) = iprot.readListBegin()
+          for _i814 in xrange(_size810):
+            _elem815 = Partition()
+            _elem815.read(iprot)
+            self.success.append(_elem815)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21244,8 +21012,8 @@ class get_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter823 in self.success:
-        iter823.write(oprot)
+      for iter816 in self.success:
+        iter816.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21339,10 +21107,10 @@ class get_partitions_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype827, _size824) = iprot.readListBegin()
-          for _i828 in xrange(_size824):
-            _elem829 = iprot.readString()
-            self.group_names.append(_elem829)
+          (_etype820, _size817) = iprot.readListBegin()
+          for _i821 in xrange(_size817):
+            _elem822 = iprot.readString()
+            self.group_names.append(_elem822)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21375,8 +21143,8 @@ class get_partitions_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter830 in self.group_names:
-        oprot.writeString(iter830)
+      for iter823 in self.group_names:
+        oprot.writeString(iter823)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -21437,11 +21205,11 @@ class get_partitions_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype834, _size831) = iprot.readListBegin()
-          for _i835 in xrange(_size831):
-            _elem836 = Partition()
-            _elem836.read(iprot)
-            self.success.append(_elem836)
+          (_etype827, _size824) = iprot.readListBegin()
+          for _i828 in xrange(_size824):
+            _elem829 = Partition()
+            _elem829.read(iprot)
+            self.success.append(_elem829)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21470,8 +21238,8 @@ class get_partitions_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter837 in self.success:
-        iter837.write(oprot)
+      for iter830 in self.success:
+        iter830.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21629,11 +21397,11 @@ class get_partitions_pspec_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype841, _size838) = iprot.readListBegin()
-          for _i842 in xrange(_size838):
-            _elem843 = PartitionSpec()
-            _elem843.read(iprot)
-            self.success.append(_elem843)
+          (_etype834, _size831) = iprot.readListBegin()
+          for _i835 in xrange(_size831):
+            _elem836 = PartitionSpec()
+            _elem836.read(iprot)
+            self.success.append(_elem836)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21662,8 +21430,8 @@ class get_partitions_pspec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter844 in self.success:
-        iter844.write(oprot)
+      for iter837 in self.success:
+        iter837.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21818,10 +21586,10 @@ class get_partition_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype848, _size845) = iprot.readListBegin()
-          for _i849 in xrange(_size845):
-            _elem850 = iprot.readString()
-            self.success.append(_elem850)
+          (_etype841, _size838) = iprot.readListBegin()
+          for _i842 in xrange(_size838):
+            _elem843 = iprot.readString()
+            self.success.append(_elem843)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21844,8 +21612,8 @@ class get_partition_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter851 in self.success:
-        oprot.writeString(iter851)
+      for iter844 in self.success:
+        oprot.writeString(iter844)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -21921,10 +21689,10 @@ class get_partitions_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype855, _size852) = iprot.readListBegin()
-          for _i856 in xrange(_size852):
-            _elem857 = iprot.readString()
-            self.part_vals.append(_elem857)
+          (_etype848, _size845) = iprot.readListBegin()
+          for _i849 in xrange(_size845):
+            _elem850 = iprot.readString()
+            self.part_vals.append(_elem850)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21954,8 +21722,8 @@ class get_partitions_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter858 in self.part_vals:
-        oprot.writeString(iter858)
+      for iter851 in self.part_vals:
+        oprot.writeString(iter851)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -22019,11 +21787,11 @@ class get_partitions_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype862, _size859) = iprot.readListBegin()
-          for _i863 in xrange(_size859):
-            _elem864 = Partition()
-            _elem864.read(iprot)
-            self.success.append(_elem864)
+          (_etype855, _size852) = iprot.readListBegin()
+          for _i856 in xrange(_size852):
+            _elem857 = Partition()
+            _elem857.read(iprot)
+            self.success.append(_elem857)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22052,8 +21820,8 @@ class get_partitions_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter865 in self.success:
-        iter865.write(oprot)
+      for iter858 in self.success:
+        iter858.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -22140,10 +21908,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype869, _size866) = iprot.readListBegin()
-          for _i870 in xrange(_size866):
-            _elem871 = iprot.readString()
-            self.part_vals.append(_elem871)
+          (_etype862, _size859) = iprot.readListBegin()
+          for _i863 in xrange(_size859):
+            _elem864 = iprot.readString()
+            self.part_vals.append(_elem864)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22160,10 +21928,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 6:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype875, _size872) = iprot.readListBegin()
-          for _i876 in xrange(_size872):
-            _elem877 = iprot.readString()
-            self.group_names.append(_elem877)
+          (_etype868, _size865) = iprot.readListBegin()
+          for _i869 in xrange(_size865):
+            _elem870 = iprot.readString()
+            self.group_names.append(_elem870)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22188,8 +21956,8 @@ class get_partitions_ps_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter878 in self.part_vals:
-        oprot.writeString(iter878)
+      for iter871 in self.part_vals:
+        oprot.writeString(iter871)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -22203,8 +21971,8 @@ class get_partitions_ps_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 6)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter879 in self.group_names:
-        oprot.writeString(iter879)
+      for iter872 in self.group_names:
+        oprot.writeString(iter872)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -22266,11 +22034,11 @@ class get_partitions_ps_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype883, _size880) = iprot.readListBegin()
-          for _i884 in xrange(_size880):
-            _elem885 = Partition()
-            _elem885.read(iprot)
-            self.success.append(_elem885)
+          (_etype876, _size873) = iprot.readListBegin()
+          for _i877 in xrange(_size873):
+            _elem878 = Partition()
+            _elem878.read(iprot)
+            self.success.append(_elem878)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22299,8 +22067,8 @@ class get_partitions_ps_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter886 in self.success:
-        iter886.write(oprot)
+      for iter879 in self.success:
+        iter879.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -22381,10 +22149,10 @@ class get_partition_names_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype890, _size887) = iprot.readListBegin()
-          for _i891 in xrange(_size887):
-            _elem892 = iprot.readString()
-            self.part_vals.append(_elem892)
+          (_etype883, _size880) = iprot.readListBegin()
+          for _i884 in xrange(_size880):
+            _elem885 = iprot.readString()
+            self.part_vals.append(_elem885)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22414,8 +22182,8 @@ class get_partition_names_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter893 in self.part_vals:
-        oprot.writeString(iter893)
+      for iter886 in self.part_vals:
+        oprot.writeString(iter886)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -22479,10 +22247,10 @@ class get_partition_names_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype897, _size894) = iprot.readListBegin()
-          for _i898 in xrange(_size894):
-            _elem899 = iprot.readString()
-            self.success.append(_elem899)
+          (_etype890, _size887) = iprot.readListBegin()
+          for _i891 in xrange(_size887):
+            _elem892 = iprot.readString()
+            self.success.append(_elem892)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22511,8 +22279,8 @@ class get_partition_names_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter900 in self.success:
-        oprot.writeString(iter900)
+      for iter893 in self.success:
+        oprot.writeString(iter893)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -22683,11 +22451,11 @@ class get_partitions_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype904, _size901) = iprot.readListBegin()
-          for _i905 in xrange(_size901):
-            _elem906 = Partition()
-            _elem906.read(iprot)
-            self.success.append(_elem906)
+          (_etype897, _size894) = iprot.readListBegin()
+          for _i898 in xrange(_size894):
+            _elem899 = Partition()
+            _elem899.read(iprot)
+            self.success.append(_elem899)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22716,8 +22484,8 @@ class get_partitions_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter907 in self.success:
-        iter907.write(oprot)
+      for iter900 in self.success:
+        iter900.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -22888,11 +22656,11 @@ class get_part_specs_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype911, _size908) = iprot.readListBegin()
-          for _i912 in xrange(_size908):
-            _elem913 = PartitionSpec()
-            _elem913.read(iprot)
-            self.success.append(_elem913)
+          (_etype904, _size901) = iprot.readListBegin()
+          for _i905 in xrange(_size901):
+            _elem906 = PartitionSpec()
+            _elem906.read(iprot)
+            self.success.append(_elem906)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22921,8 +22689,8 @@ class get_part_specs_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter914 in self.success:
-        iter914.write(oprot)
+      for iter907 in self.success:
+        iter907.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23342,10 +23110,10 @@ class get_partitions_by_names_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.names = []
-          (_etype918, _size915) = iprot.readListBegin()
-          for _i919 in xrange(_size915):
-            _elem920 = iprot.readString()
-            self.names.append(_elem920)
+          (_etype911, _size908) = iprot.readListBegin()
+          for _i912 in xrange(_size908):
+            _elem913 = iprot.readString()
+            self.names.append(_elem913)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23370,8 +23138,8 @@ class get_partitions_by_names_args:
     if self.names is not None:
       oprot.writeFieldBegin('names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.names))
-      for iter921 in self.names:
-        oprot.writeString(iter921)
+      for iter914 in self.names:
+        oprot.writeString(iter914)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23430,11 +23198,11 @@ class get_partitions_by_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype925, _size922) = iprot.readListBegin()
-          for _i926 in xrange(_size922):
-            _elem927 = Partition()
-            _elem927.read(iprot)
-            self.success.append(_elem927)
+          (_etype918, _size915) = iprot.readListBegin()
+          for _i919 in xrange(_size915):
+            _elem920 = Partition()
+            _elem920.read(iprot)
+            self.success.append(_elem920)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23463,8 +23231,8 @@ class get_partitions_by_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter928 in self.success:
-        iter928.write(oprot)
+      for iter921 in self.success:
+        iter921.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23714,11 +23482,11 @@ class alter_partitions_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype932, _size929) = iprot.readListBegin()
-          for _i933 in xrange(_size929):
-            _elem934 = Partition()
-            _elem934.read(iprot)
-            self.new_parts.append(_elem934)
+          (_etype925, _size922) = iprot.readListBegin()
+          for _i926 in xrange(_size922):
+            _elem927 = Partition()
+            _elem927.read(iprot)
+            self.new_parts.append(_elem927)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23743,8 +23511,8 @@ class alter_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter935 in self.new_parts:
-        iter935.write(oprot)
+      for iter928 in self.new_parts:
+        iter928.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23897,11 +23665,11 @@ class alter_partitions_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype939, _size936) = iprot.readListBegin()
-          for _i940 in xrange(_size936):
-            _elem941 = Partition()
-            _elem941.read(iprot)
-            self.new_parts.append(_elem941)
+          (_etype932, _size929) = iprot.readListBegin()
+          for _i933 in xrange(_size929):
+            _elem934 = Partition()
+            _elem934.read(iprot)
+            self.new_parts.append(_elem934)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23932,8 +23700,8 @@ class alter_partitions_with_environment_context_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter942 in self.new_parts:
-        iter942.write(oprot)
+      for iter935 in self.new_parts:
+        iter935.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -24277,10 +24045,10 @@ class rename_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype946, _size943) = iprot.readListBegin()
-          for _i947 in xrange(_size943):
-            _elem948 = iprot.readString()
-            self.part_vals.append(_elem948)
+          (_etype939, _size936) = iprot.readListBegin()
+          for _i940 in xrange(_size936):
+            _elem941 = iprot.readString()
+            self.part_vals.append(_elem941)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24311,8 +24079,8 @@ class rename_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter949 in self.part_vals:
-        oprot.writeString(iter949)
+      for iter942 in self.part_vals:
+        oprot.writeString(iter942)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.new_part is not None:
@@ -24454,10 +24222,10 @@ class partition_name_has_valid_characters_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype953, _size950) = iprot.readListBegin()
-          for _i954 in xrange(_size950):
-            _elem955 = iprot.readString()
-            self.part_vals.append(_elem955)
+          (_etype946, _size943) = iprot.readListBegin()
+          for _i947 in xrange(_size943):
+            _elem948 = iprot.readString()
+            self.part_vals.append(_elem948)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24479,8 +24247,8 @@ class partition_name_has_valid_characters_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter956 in self.part_vals:
-        oprot.writeString(iter956)
+      for iter949 in self.part_vals:
+        oprot.writeString(iter949)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.throw_exception is not None:
@@ -24838,10 +24606,10 @@ class partition_name_to_vals_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype960, _size957) = iprot.readListBegin()
-          for _i961 in xrange(_size957):
-            _elem962 = iprot.readString()
-            self.success.append(_elem962)
+          (_etype953, _size950) = iprot.readListBegin()
+          for _i954 in xrange(_size950):
+            _elem955 = iprot.readString()
+            self.success.append(_elem955)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24864,8 +24632,8 @@ class partition_name_to_vals_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter963 in self.success:
-        oprot.writeString(iter963)
+      for iter956 in self.success:
+        oprot.writeString(iter956)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -24989,11 +24757,11 @@ class partition_name_to_spec_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype965, _vtype966, _size964 ) = iprot.readMapBegin()
-          for _i968 in xrange(_size964):
-            _key969 = iprot.readString()
-            _val970 = iprot.readString()
-            self.success[_key969] = _val970
+          (_ktype958, _vtype959, _size957 ) = iprot.readMapBegin()
+          for _i961 in xrange(_size957):
+            _key962 = iprot.readString()
+            _val963 = iprot.readString()
+            self.success[_key962] = _val963
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -25016,9 +24784,9 @@ class partition_name_to_spec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
-      for kiter971,viter972 in self.success.items():
-        oprot.writeString(kiter971)
-        oprot.writeString(viter972)
+      for kiter964,viter965 in self.success.items():
+        oprot.writeString(kiter964)
+        oprot.writeString(viter965)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -25094,11 +24862,11 @@ class markPartitionForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype974, _vtype975, _size973 ) = iprot.readMapBegin()
-          for _i977 in xrange(_size973):
-            _key978 = iprot.readString()
-            _val979 = iprot.readString()
-            self.part_vals[_key978] = _val979
+          (_ktype967, _vtype968, _size966 ) = iprot.readMapBegin()
+          for _i970 in xrange(_size966):
+            _key971 = iprot.readString()
+            _val972 = iprot.readString()
+            self.part_vals[_key971] = _val972
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -25128,9 +24896,9 @@ class markPartitionForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter980,viter981 in self.part_vals.items():
-        oprot.writeString(kiter980)
-        oprot.writeString(viter981)
+      for kiter973,viter974 in self.part_vals.items():
+        oprot.writeString(kiter973)
+        oprot.writeString(viter974)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -25344,11 +25112,11 @@ class isPartitionMarkedForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype983, _vtype984, _size982 ) = iprot.readMapBegin()
-          for _i986 in xrange(_size982):
-            _key987 = iprot.readString()
-            _val988 = iprot.readString()
-            self.part_vals[_key987] = _val988
+          (_ktype976, _vtype977, _size975 ) = iprot.readMapBegin()
+          for _i979 in xrange(_size975):
+            _key980 = iprot.readString()
+            _val981 = iprot.readString()
+            self.part_vals[_key980] = _val981
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -25378,9 +25146,9 @@ class isPartitionMarkedForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter989,viter990 in self.part_vals.items():
-        oprot.writeString(kiter989)
-        oprot.writeString(viter990)
+      for kiter982,viter983 in self.part_vals.items():
+        oprot.writeString(kiter982)
+        oprot.writeString(viter983)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -26435,11 +26203,11 @@ class get_indexes_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype994, _size991) = iprot.readListBegin()
-          for _i995 in xrange(_size991):
-            _elem996 = Index()
-            _elem996.read(iprot)
-            self.success.append(_elem996)
+          (_etype987, _size984) = iprot.readListBegin()
+          for _i988 in xrange(_size984):
+            _elem989 = Index()
+            _elem989.read(iprot)
+            self.success.append(_elem989)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26468,8 +26236,8 @@ class get_indexes_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter997 in self.success:
-        iter997.write(oprot)
+      for iter990 in self.success:
+        iter990.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -26624,10 +26392,10 @@ class get_index_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1001, _size998) = iprot.readListBegin()
-          for _i1002 in xrange(_size998):
-            _elem1003 = iprot.readString()
-            self.success.append(_elem1003)
+          (_etype994, _size991) = iprot.readListBegin()
+          for _i995 in xrange(_size991):
+            _elem996 = iprot.readString()
+            self.success.append(_elem996)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26650,8 +26418,8 @@ class get_index_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1004 in self.success:
-        oprot.writeString(iter1004)
+      for iter997 in self.success:
+        oprot.writeString(iter997)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -29517,10 +29285,10 @@ class get_functions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1008, _size1005) = iprot.readListBegin()
-          for _i1009 in xrange(_size1005):
-            _elem1010 = iprot.readString()
-            self.success.append(_elem1010)
+          (_etype1001, _size998) = iprot.readListBegin()
+          for _i1002 in xrange(_size998):
+            _elem1003 = iprot.readString()
+            self.success.append(_elem1003)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -29543,8 +29311,8 @@ class get_functions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1011 in self.success:
-        oprot.writeString(iter1011)
+      for iter1004 in self.success:
+        oprot.writeString(iter1004)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -30232,10 +30000,10 @@ class get_role_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1015, _size1012) = iprot.readListBegin()
-          for _i1016 in xrange(_size1012):
-            _elem1017 = iprot.readString()
-            self.success.append(_elem1017)
+          (_etype1008, _size1005) = iprot.readListBegin()
+          for _i1009 in xrange(_size1005):
+            _elem1010 = iprot.readString()
+            self.success.append(_elem1010)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -30258,8 +30026,8 @@ class get_role_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1018 in self.success:
-        oprot.writeString(iter1018)
+      for iter1011 in self.success:
+        oprot.writeString(iter1011)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -30773,11 +30541,11 @@ class list_roles_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1022, _size1019) = iprot.readListBegin()
-          for _i1023 in xrange(_size1019):
-            _elem1024 = Role()
-            _elem1024.read(iprot)
-            self.success.append(_elem1024)
+          (_etype1015, _size1012) = iprot.readListBegin()
+          for _i1016 in xrange(_size1012):
+            _elem1017 = Role()
+            _elem1017.read(iprot)
+            self.success.append(_elem1017)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -30800,8 +30568,8 @@ class list_roles_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1025 in self.success:
-        iter1025.write(oprot)
+      for iter1018 in self.success:
+        iter1018.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -31310,10 +31078,10 @@ class get_privilege_set_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1029, _size1026) = iprot.readListBegin()
-          for _i1030 in xrange(_size1026):
-            _elem1031 = iprot.readString()
-            self.group_names.append(_elem1031)
+          (_etype1022, _size1019) = iprot.readListBegin()
+          for _i1023 in xrange(_size1019):
+            _elem1024 = iprot.readString()
+            self.group_names.append(_elem1024)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -31338,8 +31106,8 @@ class get_privilege_set_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1032 in self.group_names:
-        oprot.writeString(iter1032)
+      for iter1025 in self.group_names:
+        oprot.writeString(iter1025)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -31566,11 +31334,11 @@ class list_privileges_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1036, _size1033) = iprot.readListBegin()
-          for _i1037 in xrange(_size1033):
-            _elem1038 = HiveObjectPrivilege()
-            _elem1038.read(iprot)
-            self.success.append(_elem1038)
+          (_etype1029, _size1026) = iprot.readListBegin()
+          for _i1030 in xrange(_size1026):
+            _elem1031 = HiveObjectPrivilege()
+            _elem1031.read(iprot)
+            self.success.append(_elem1031)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -31593,8 +31361,8 @@ class list_privileges_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1039 in self.success:
-        iter1039.write(oprot)
+      for iter1032 in self.success:
+        iter1032.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -32092,10 +31860,10 @@ class set_ugi_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1043, _size1040) = iprot.readListBegin()
-          for _i1044 in xrange(_size1040):
-            _elem1045 = iprot.readString()
-            self.group_names.append(_elem1045)
+          (_etype1036, _size1033) = iprot.readListBegin()
+          for _i1037 in xrange(_size1033):
+            _elem1038 = iprot.readString()
+            self.group_names.append(_elem1038)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -32116,8 +31884,8 @@ class set_ugi_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1046 in self.group_names:
-        oprot.writeString(iter1046)
+      for iter1039 in self.group_names:
+        oprot.writeString(iter1039)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -32172,10 +31940,10 @@ class set_ugi_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1050, _size1047) = iprot.readListBegin()
-          for _i1051 in xrange(_size1047):
-            _elem1052 = iprot.readString()
-            self.success.append(_elem1052)
+          (_etype1043, _size1040) = iprot.readListBegin()
+          for _i1044 in xrange(_size1040):
+            _elem1045 = iprot.readString()
+            self.success.append(_elem1045)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -32198,8 +31966,8 @@ class set_ugi_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1053 in self.success:
-        oprot.writeString(iter1053)
+      for iter1046 in self.success:
+        oprot.writeString(iter1046)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -33131,10 +32899,10 @@ class get_all_token_identifiers_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1057, _size1054) = iprot.readListBegin()
-          for _i1058 in xrange(_size1054):
-            _elem1059 = iprot.readString()
-            self.success.append(_elem1059)
+          (_etype1050, _size1047) = iprot.readListBegin()
+          for _i1051 in xrange(_size1047):
+            _elem1052 = iprot.readString()
+            self.success.append(_elem1052)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -33151,8 +32919,8 @@ class get_all_token_identifiers_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1060 in self.success:
-        oprot.writeString(iter1060)
+      for iter1053 in self.success:
+        oprot.writeString(iter1053)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -33679,10 +33447,10 @@ class get_master_keys_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1064, _size1061) = iprot.readListBegin()
-          for _i1065 in xrange(_size1061):
-            _elem1066 = iprot.readString()
-            self.success.append(_elem1066)
+          (_etype1057, _size1054) = iprot.readListBegin()
+          for _i1058 in xrange(_size1054):
+            _elem1059 = iprot.readString()
+            self.success.append(_elem1059)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -33699,8 +33467,8 @@ class get_master_keys_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1067 in self.success:
-        oprot.writeString(iter1067)
+      for iter1060 in self.success:
+        oprot.writeString(iter1060)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -34145,596 +33913,19 @@ class abort_txn_args:
   def __ne__(self, other):
     return not (self == other)
 
-class abort_txn_result:
-  """
-  Attributes:
-   - o1
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1
-  )
-
-  def __init__(self, o1=None,):
-    self.o1 = o1
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRUCT:
-          self.o1 = NoSuchTxnException()
-          self.o1.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('abort_txn_result')
-    if self.o1 is not None:
-      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
-      self.o1.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.o1)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class abort_txns_args:
-  """
-  Attributes:
-   - rqst
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRUCT, 'rqst', (AbortTxnsRequest, AbortTxnsRequest.thrift_spec), None, ), # 1
-  )
-
-  def __init__(self, rqst=None,):
-    self.rqst = rqst
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRUCT:
-          self.rqst = AbortTxnsRequest()
-          self.rqst.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('abort_txns_args')
-    if self.rqst is not None:
-      oprot.writeFieldBegin('rqst', TType.STRUCT, 1)
-      self.rqst.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.rqst)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class abort_txns_result:
-  """
-  Attributes:
-   - o1
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1
-  )
-
-  def __init__(self, o1=None,):
-    self.o1 = o1
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRUCT:
-          self.o1 = NoSuchTxnException()
-          self.o1.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('abort_txns_result')
-    if self.o1 is not None:
-      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
-      self.o1.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.o1)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class commit_txn_args:
-  """
-  Attributes:
-   - rqst
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRUCT, 'rqst', (CommitTxnRequest, CommitTxnRequest.thrift_spec), None, ), # 1
-  )
-
-  def __init__(self, rqst=None,):
-    self.rqst = rqst
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRUCT:
-          self.rqst = CommitTxnRequest()
-          self.rqst.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('commit_txn_args')
-    if self.rqst is not None:
-      oprot.writeFieldBegin('rqst', TType.STRUCT, 1)
-      self.rqst.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.rqst)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class commit_txn_result:
-  """
-  Attributes:
-   - o1
-   - o2
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1
-    (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2
-  )
-
-  def __init__(self, o1=None, o2=None,):
-    self.o1 = o1
-    self.o2 = o2
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRUCT:
-          self.o1 = NoSuchTxnException()
-          self.o1.read(iprot)
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRUCT:
-          self.o2 = TxnAbortedException()
-          self.o2.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('commit_txn_result')
-    if self.o1 is not None:
-      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
-      self.o1.write(oprot)
-      oprot.writeFieldEnd()
-    if self.o2 is not None:
-      oprot.writeFieldBegin('o2', TType.STRUCT, 2)
-      self.o2.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.o1)
-    value = (value * 31) ^ hash(self.o2)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self

<TRUNCATED>

[06/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 777c119..03c50a7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -67,9 +67,6 @@ import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.zip.Deflater;
@@ -100,8 +97,8 @@ import org.apache.hadoop.hive.common.HiveInterruptUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.common.StringInternUtils;
+import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
@@ -211,61 +208,9 @@ import org.apache.hive.common.util.ReflectionUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.esotericsoftware.kryo.Kryo;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import java.beans.DefaultPersistenceDelegate;
-import java.beans.Encoder;
-import java.beans.Expression;
-import java.beans.Statement;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInput;
-import java.io.EOFException;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.Serializable;
-import java.net.URI;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.net.URLDecoder;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.sql.SQLFeatureNotSupportedException;
-import java.sql.SQLTransientException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Calendar;
-import java.util.Collection;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Random;
-import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.zip.Deflater;
-import java.util.zip.DeflaterOutputStream;
-import java.util.zip.InflaterInputStream;
 
 
 /**
@@ -1592,7 +1537,7 @@ public final class Utilities {
     int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(),
         numBuckets = (conf != null && conf.getTable() != null)
           ? conf.getTable().getNumBuckets() : 0;
-    return removeTempOrDuplicateFiles(fs, fileStats, dpLevels, numBuckets, hconf, null);
+    return removeTempOrDuplicateFiles(fs, fileStats, dpLevels, numBuckets, hconf, null, 0);
   }
   
   private static boolean removeEmptyDpDirectory(FileSystem fs, Path path) throws IOException {
@@ -1608,7 +1553,7 @@ public final class Utilities {
   }
 
   public static List<Path> removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats,
-      int dpLevels, int numBuckets, Configuration hconf, Long mmWriteId) throws IOException {
+      int dpLevels, int numBuckets, Configuration hconf, Long txnId, int stmtId) throws IOException {
     if (fileStats == null) {
       return null;
     }
@@ -1627,9 +1572,9 @@ public final class Utilities {
         }
         FileStatus[] items = fs.listStatus(path);
 
-        if (mmWriteId != null) {
+        if (txnId != null) {
           Path mmDir = parts[i].getPath();
-          if (!mmDir.getName().equals(ValidWriteIds.getMmFilePrefix(mmWriteId))) {
+          if (!mmDir.getName().equals(AcidUtils.deltaSubdir(txnId, txnId, stmtId))) {
             throw new IOException("Unexpected non-MM directory name " + mmDir);
           }
           Utilities.LOG14535.info("removeTempOrDuplicateFiles processing files in MM directory " + mmDir);
@@ -1644,14 +1589,14 @@ public final class Utilities {
       if (items.length == 0) {
         return result;
       }
-      if (mmWriteId == null) {
+      if (txnId == null) {
         taskIDToFile = removeTempOrDuplicateFilesNonMm(items, fs);
       } else {
         if (items.length > 1) {
           throw new IOException("Unexpected directories for non-DP MM: " + Arrays.toString(items));
         }
         Path mmDir = items[0].getPath();
-        if (!mmDir.getName().equals(ValidWriteIds.getMmFilePrefix(mmWriteId))) {
+        if (!mmDir.getName().equals(AcidUtils.deltaSubdir(txnId, txnId, stmtId))) {
           throw new IOException("Unexpected non-MM directory " + mmDir);
         }
         Utilities.LOG14535.info(
@@ -4003,10 +3948,10 @@ public final class Utilities {
   }
 
   public static Path[] getMmDirectoryCandidates(FileSystem fs, Path path, int dpLevels,
-      int lbLevels, PathFilter filter, long mmWriteId, Configuration conf) throws IOException {
+      int lbLevels, PathFilter filter, long txnId, int stmtId, Configuration conf) throws IOException {
     int skipLevels = dpLevels + lbLevels;
     if (filter == null) {
-      filter = new ValidWriteIds.IdPathFilter(mmWriteId, true);
+      filter = new JavaUtils.IdPathFilter(txnId, stmtId, true);
     }
     if (skipLevels == 0) {
       return statusToPath(fs.listStatus(path, filter));
@@ -4014,7 +3959,7 @@ public final class Utilities {
     if (HiveConf.getBoolVar(conf, ConfVars.HIVE_MM_AVOID_GLOBSTATUS_ON_S3) && isS3(fs)) {
       return getMmDirectoryCandidatesRecursive(fs, path, skipLevels, filter);
     }
-    return getMmDirectoryCandidatesGlobStatus(fs, path, skipLevels, filter, mmWriteId);
+    return getMmDirectoryCandidatesGlobStatus(fs, path, skipLevels, filter, txnId, stmtId);
   }
 
   private static boolean isS3(FileSystem fs) {
@@ -4082,22 +4027,22 @@ public final class Utilities {
   }
 
   private static Path[] getMmDirectoryCandidatesGlobStatus(FileSystem fs,
-      Path path, int skipLevels, PathFilter filter, long mmWriteId) throws IOException {
+      Path path, int skipLevels, PathFilter filter, long txnId, int stmtId) throws IOException {
     StringBuilder sb = new StringBuilder(path.toUri().getPath());
     for (int i = 0; i < skipLevels; i++) {
       sb.append(Path.SEPARATOR).append("*");
     }
-    sb.append(Path.SEPARATOR).append(ValidWriteIds.getMmFilePrefix(mmWriteId));
+    sb.append(Path.SEPARATOR).append(AcidUtils.deltaSubdir(txnId, txnId, stmtId));
     Path pathPattern = new Path(path, sb.toString());
     Utilities.LOG14535.info("Looking for files via: " + pathPattern);
     return statusToPath(fs.globStatus(pathPattern, filter));
   }
 
   private static void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manifestDir,
-      int dpLevels, int lbLevels, String unionSuffix, ValidWriteIds.IdPathFilter filter,
-      long mmWriteId, Configuration conf) throws IOException {
+                                          int dpLevels, int lbLevels, JavaUtils.IdPathFilter filter,
+                                          long txnId, int stmtId, Configuration conf) throws IOException {
     Path[] files = getMmDirectoryCandidates(
-        fs, specPath, dpLevels, lbLevels, filter, mmWriteId, conf);
+        fs, specPath, dpLevels, lbLevels, filter, txnId, stmtId, conf);
     if (files != null) {
       for (Path path : files) {
         Utilities.LOG14535.info("Deleting " + path + " on failure");
@@ -4110,10 +4055,10 @@ public final class Utilities {
 
 
   public static void writeMmCommitManifest(List<Path> commitPaths, Path specPath, FileSystem fs,
-      String taskId, Long mmWriteId, String unionSuffix) throws HiveException {
+      String taskId, Long txnId, int stmtId, String unionSuffix) throws HiveException {
     if (commitPaths.isEmpty()) return;
     // We assume one FSOP per task (per specPath), so we create it in specPath.
-    Path manifestPath = getManifestDir(specPath, mmWriteId, unionSuffix);
+    Path manifestPath = getManifestDir(specPath, txnId, stmtId, unionSuffix);
     manifestPath = new Path(manifestPath, taskId + MANIFEST_EXTENSION);
     Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths);
     try {
@@ -4132,8 +4077,8 @@ public final class Utilities {
     }
   }
 
-  private static Path getManifestDir(Path specPath, long mmWriteId, String unionSuffix) {
-    Path manifestPath = new Path(specPath, "_tmp." + ValidWriteIds.getMmFilePrefix(mmWriteId));
+  private static Path getManifestDir(Path specPath, long txnId, int stmtId, String unionSuffix) {
+    Path manifestPath = new Path(specPath, "_tmp." + AcidUtils.deltaSubdir(txnId, txnId, stmtId));
     return (unionSuffix == null) ? manifestPath : new Path(manifestPath, unionSuffix);
   }
 
@@ -4149,18 +4094,18 @@ public final class Utilities {
   }
 
   public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Configuration hconf,
-      boolean success, int dpLevels, int lbLevels, MissingBucketsContext mbc, long mmWriteId,
+      boolean success, int dpLevels, int lbLevels, MissingBucketsContext mbc, long txnId, int stmtId,
       Reporter reporter, boolean isMmCtas) throws IOException, HiveException {
     FileSystem fs = specPath.getFileSystem(hconf);
-    Path manifestDir = getManifestDir(specPath, mmWriteId, unionSuffix);
+    Path manifestDir = getManifestDir(specPath, txnId, stmtId, unionSuffix);
     if (!success) {
-      ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true);
+      JavaUtils.IdPathFilter filter = new JavaUtils.IdPathFilter(txnId, stmtId, true);
       tryDeleteAllMmFiles(fs, specPath, manifestDir, dpLevels, lbLevels,
-          unionSuffix, filter, mmWriteId, hconf);
+          filter, txnId, stmtId, hconf);
       return;
     }
 
-    Utilities.LOG14535.info("Looking for manifests in: " + manifestDir + " (" + mmWriteId + ")");
+    Utilities.LOG14535.info("Looking for manifests in: " + manifestDir + " (" + txnId + ")");
     // TODO# may be wrong if there are no splits (empty insert/CTAS)
     List<Path> manifests = new ArrayList<>();
     if (fs.exists(manifestDir)) {
@@ -4180,14 +4125,14 @@ public final class Utilities {
     }
 
     Utilities.LOG14535.info("Looking for files in: " + specPath);
-    ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true);
+    JavaUtils.IdPathFilter filter = new JavaUtils.IdPathFilter(txnId, stmtId, true);
     if (isMmCtas && !fs.exists(specPath)) {
       // TODO: do we also need to do this when creating an empty partition from select?
       Utilities.LOG14535.info("Creating table directory for CTAS with no output at " + specPath);
       FileUtils.mkdir(fs, specPath, hconf);
     }
     Path[] files = getMmDirectoryCandidates(
-        fs, specPath, dpLevels, lbLevels, filter, mmWriteId, hconf);
+        fs, specPath, dpLevels, lbLevels, filter, txnId, stmtId, hconf);
     ArrayList<Path> mmDirectories = new ArrayList<>();
     if (files != null) {
       for (Path path : files) {
@@ -4243,7 +4188,7 @@ public final class Utilities {
       finalResults[i] = new PathOnlyFileStatus(mmDirectories.get(i));
     }
     List<Path> emptyBuckets = Utilities.removeTempOrDuplicateFiles(
-        fs, finalResults, dpLevels, mbc == null ? 0 : mbc.numBuckets, hconf, mmWriteId);
+        fs, finalResults, dpLevels, mbc == null ? 0 : mbc.numBuckets, hconf, txnId, stmtId);
     // create empty buckets if necessary
     if (emptyBuckets.size() > 0) {
       assert mbc != null;
@@ -4294,7 +4239,7 @@ public final class Utilities {
    * if the entire directory is valid (has no uncommitted/temporary files).
    */
   public static List<Path> getValidMmDirectoriesFromTableOrPart(Path path, Configuration conf,
-      ValidWriteIds ids, int lbLevels) throws IOException {
+      ValidTxnList validTxnList, int lbLevels) throws IOException {
     Utilities.LOG14535.info("Looking for valid MM paths under " + path);
     // NULL means this directory is entirely valid.
     List<Path> result = null;
@@ -4304,8 +4249,8 @@ public final class Utilities {
     for (int i = 0; i < children.length; ++i) {
       FileStatus file = children[i];
       Path childPath = file.getPath();
-      Long writeId = ValidWriteIds.extractWriteId(childPath);
-      if (!file.isDirectory() || writeId == null || !ids.isValid(writeId)) {
+      Long txnId = JavaUtils.extractTxnId(childPath);
+      if (!file.isDirectory() || txnId == null || !validTxnList.isTxnValid(txnId)) {
         Utilities.LOG14535.info("Skipping path " + childPath);
         if (result == null) {
           result = new ArrayList<>(children.length - 1);

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 740488c..902caa3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -276,9 +276,8 @@ public class AcidUtils {
     return result;
   }
 
-  // INSERT_ONLY is a special operation which we only support INSERT operations, no UPDATE/DELETE
   public enum Operation {
-    NOT_ACID, INSERT, UPDATE, DELETE, INSERT_ONLY
+    NOT_ACID, INSERT, UPDATE, DELETE
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
index 9b83cb4..8bcf8c7 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
@@ -20,11 +20,9 @@ package org.apache.hadoop.hive.ql.io;
 
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
@@ -34,8 +32,11 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.Map.Entry;
 
-import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StringInternUtils;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
+import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -44,8 +45,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
@@ -423,12 +422,11 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
    */
   private void addSplitsForGroup(List<Path> dirs, TableScanOperator tableScan, JobConf conf,
       InputFormat inputFormat, Class<? extends InputFormat> inputFormatClass, int splits,
-      TableDesc table, Map<String, ValidWriteIds> writeIdMap, List<InputSplit> result)
+      TableDesc table, List<InputSplit> result)
           throws IOException {
-    ValidWriteIds writeIds = extractWriteIds(writeIdMap, conf, table.getTableName());
-    if (writeIds != null) {
-      Utilities.LOG14535.info("Observing " + table.getTableName() + ": " + writeIds);
-    }
+    String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY);
+    ValidTxnList validTxnList = txnString == null ? new ValidReadTxnList() :
+        new ValidReadTxnList(txnString);
 
     Utilities.copyTablePropertiesToConf(table, conf);
 
@@ -436,7 +434,7 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
       pushFilters(conf, tableScan);
     }
 
-    Path[] finalDirs = processPathsForMmRead(dirs, conf, writeIds);
+    Path[] finalDirs = processPathsForMmRead(dirs, conf, validTxnList);
     if (finalDirs == null) {
       return; // No valid inputs.
     }
@@ -461,13 +459,13 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
   }
 
   public static Path[] processPathsForMmRead(List<Path> dirs, JobConf conf,
-      ValidWriteIds writeIds) throws IOException {
-    if (writeIds == null) {
+      ValidTxnList validTxnList) throws IOException {
+    if (validTxnList == null) {
       return dirs.toArray(new Path[dirs.size()]);
     } else {
       List<Path> finalPaths = new ArrayList<>(dirs.size());
       for (Path dir : dirs) {
-        processForWriteIds(dir, conf, writeIds, finalPaths);
+        processForWriteIds(dir, conf, validTxnList, finalPaths);
       }
       if (finalPaths.isEmpty()) {
         LOG.warn("No valid inputs found in " + dirs);
@@ -478,7 +476,7 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
   }
 
   private static void processForWriteIds(Path dir, JobConf conf,
-      ValidWriteIds writeIds, List<Path> finalPaths) throws IOException {
+      ValidTxnList validTxnList, List<Path> finalPaths) throws IOException {
     FileSystem fs = dir.getFileSystem(conf);
     Utilities.LOG14535.warn("Checking " + dir + " (root) for inputs");
     // Ignore nullscan-optimized paths.
@@ -489,17 +487,17 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
     FileStatus[] files = fs.listStatus(dir); // TODO: batch?
     LinkedList<Path> subdirs = new LinkedList<>();
     for (FileStatus file : files) {
-      handleNonMmDirChild(file, writeIds, subdirs, finalPaths);
+      handleNonMmDirChild(file, validTxnList, subdirs, finalPaths);
     }
     while (!subdirs.isEmpty()) {
       Path subdir = subdirs.poll();
       for (FileStatus file : fs.listStatus(subdir)) {
-        handleNonMmDirChild(file, writeIds, subdirs, finalPaths);
+        handleNonMmDirChild(file, validTxnList, subdirs, finalPaths);
       }
     }
   }
 
-  private static void handleNonMmDirChild(FileStatus file, ValidWriteIds writeIds,
+  private static void handleNonMmDirChild(FileStatus file, ValidTxnList validTxnList,
       LinkedList<Path> subdirs, List<Path> finalPaths) {
     Path path = file.getPath();
     Utilities.LOG14535.warn("Checking " + path + " for inputs");
@@ -507,12 +505,12 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
       Utilities.LOG14535.warn("Ignoring a file not in MM directory " + path);
       return;
     }
-    Long writeId = ValidWriteIds.extractWriteId(path);
-    if (writeId == null) {
+    Long txnId = JavaUtils.extractTxnId(path);
+    if (txnId == null) {
       subdirs.add(path);
       return;
     }
-    if (!writeIds.isValid(writeId)) {
+    if (!validTxnList.isTxnValid(txnId)) {
       Utilities.LOG14535.warn("Ignoring an uncommitted directory " + path);
       return;
     }
@@ -564,7 +562,6 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
     StringBuilder readColumnNamesBuffer = new StringBuilder(newjob.
       get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, ""));
     // for each dir, get the InputFormat, and do getSplits.
-    Map<String, ValidWriteIds> writeIdMap = new HashMap<>();
     for (Path dir : dirs) {
       PartitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir);
       Class<? extends InputFormat> inputFormatClass = part.getInputFileFormatClass();
@@ -615,7 +612,7 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
         addSplitsForGroup(currentDirs, currentTableScan, newjob,
             getInputFormatFromCache(currentInputFormatClass, job),
             currentInputFormatClass, currentDirs.size()*(numSplits / dirs.length),
-            currentTable, writeIdMap, result);
+            currentTable, result);
       }
 
       currentDirs.clear();
@@ -637,7 +634,7 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
       addSplitsForGroup(currentDirs, currentTableScan, newjob,
           getInputFormatFromCache(currentInputFormatClass, job),
           currentInputFormatClass, currentDirs.size()*(numSplits / dirs.length),
-          currentTable, writeIdMap, result);
+          currentTable, result);
     }
 
     Utilities.clearWorkMapForConf(job);
@@ -648,19 +645,6 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
     return result.toArray(new HiveInputSplit[result.size()]);
   }
 
-  public static ValidWriteIds extractWriteIds(Map<String, ValidWriteIds> writeIdMap,
-      JobConf newjob, String tableName) {
-    if (StringUtils.isBlank(tableName)) return null;
-    ValidWriteIds writeIds = writeIdMap.get(tableName);
-    if (writeIds == null) {
-      writeIds = ValidWriteIds.createFromConf(newjob, tableName);
-      writeIdMap.put(tableName, writeIds != null ? writeIds : ValidWriteIds.NO_WRITE_IDS);
-    } else if (writeIds == ValidWriteIds.NO_WRITE_IDS) {
-      writeIds = null;
-    }
-    return writeIds;
-  }
-
   private void pushProjection(final JobConf newjob, final StringBuilder readColumnsBuffer,
       final StringBuilder readColumnNamesBuffer) {
     String readColIds = readColumnsBuffer.toString();

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 6498199..d793ccf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -32,7 +32,6 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -52,7 +51,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.ConcurrentHashMap;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
@@ -70,9 +68,9 @@ import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.common.BlobStorageUtils;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
+import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -104,7 +102,6 @@ import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
-import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResult;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 import org.apache.hadoop.hive.metastore.api.HiveObjectType;
@@ -157,7 +154,6 @@ import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
@@ -1624,26 +1620,13 @@ public class Hive {
   public void loadPartition(Path loadPath, String tableName,
       Map<String, String> partSpec, boolean replace, boolean inheritTableSpecs,
       boolean isSkewedStoreAsSubdir,  boolean isSrcLocal, boolean isAcid,
-      boolean hasFollowingStatsTask, Long mmWriteId, boolean isCommitMmWrite)
+      boolean hasFollowingStatsTask, Long txnId, int stmtId)
           throws HiveException {
     Table tbl = getTable(tableName);
-    boolean isMmTableWrite = (mmWriteId != null);
+    boolean isMmTableWrite = (txnId != null);
     Preconditions.checkState(isMmTableWrite == MetaStoreUtils.isInsertOnlyTable(tbl.getParameters()));
     loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs,
-        isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask, mmWriteId);
-    if (isMmTableWrite && isCommitMmWrite) {
-      // The assumption behind committing here is that this partition is the only one outputted.
-      commitMmTableWrite(tbl, mmWriteId);
-    }
-  }
-
-  public void commitMmTableWrite(Table tbl, Long mmWriteId)
-      throws HiveException {
-    try {
-      getMSC().finalizeTableWrite(tbl.getDbName(), tbl.getTableName(), mmWriteId, true);
-    } catch (TException e) {
-      throw new HiveException(e);
-    }
+        isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask, txnId, stmtId);
   }
 
   /**
@@ -1673,7 +1656,7 @@ public class Hive {
    */
   public Partition loadPartition(Path loadPath, Table tbl, Map<String, String> partSpec,
       boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir,
-      boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask, Long mmWriteId)
+      boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask, Long txnId, int stmtId)
           throws HiveException {
     Path tblDataLocationPath =  tbl.getDataLocation();
     try {
@@ -1722,34 +1705,34 @@ public class Hive {
         newFiles = Collections.synchronizedList(new ArrayList<Path>());
       }
       // TODO: this assumes both paths are qualified; which they are, currently.
-      if (mmWriteId != null && loadPath.equals(newPartPath)) {
+      if (txnId != null && loadPath.equals(newPartPath)) {
         // MM insert query, move itself is a no-op.
         Utilities.LOG14535.info("not moving " + loadPath + " to " + newPartPath + " (MM)");
         assert !isAcid;
         if (areEventsForDmlNeeded(tbl, oldPart)) {
-          newFiles = listFilesCreatedByQuery(loadPath, mmWriteId);
+          newFiles = listFilesCreatedByQuery(loadPath, txnId, stmtId);
         }
         Utilities.LOG14535.info("maybe deleting stuff from " + oldPartPath + " (new " + newPartPath + ") for replace");
         if (replace && oldPartPath != null) {
           boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
           deleteOldPathForReplace(newPartPath, oldPartPath, getConf(), isAutoPurge,
-              new ValidWriteIds.IdPathFilter(mmWriteId, false, true), mmWriteId != null,
+              new JavaUtils.IdPathFilter(txnId, stmtId, false, true), true,
               tbl.isStoredAsSubDirectories() ? tbl.getSkewedColNames().size() : 0);
         }
       } else {
         // Either a non-MM query, or a load into MM table from an external source.
         PathFilter filter = FileUtils.HIDDEN_FILES_PATH_FILTER;
         Path destPath = newPartPath;
-        if (mmWriteId != null) {
+        if (txnId != null) {
           // We will load into MM directory, and delete from the parent if needed.
-          destPath = new Path(destPath, ValidWriteIds.getMmFilePrefix(mmWriteId));
-          filter = replace ? new ValidWriteIds.IdPathFilter(mmWriteId, false, true) : filter;
+          destPath = new Path(destPath, AcidUtils.deltaSubdir(txnId, txnId, stmtId));
+          filter = replace ? new JavaUtils.IdPathFilter(txnId, stmtId, false, true) : filter;
         }
         Utilities.LOG14535.info("moving " + loadPath + " to " + destPath);
         if (replace || (oldPart == null && !isAcid)) {
           boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
           replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(),
-              isSrcLocal, isAutoPurge, newFiles, filter, mmWriteId != null);
+              isSrcLocal, isAutoPurge, newFiles, filter, txnId != null);
         } else {
           FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
           Hive.copyFiles(conf, loadPath, destPath, fs, isSrcLocal, isAcid, newFiles);
@@ -1834,9 +1817,9 @@ public class Hive {
     return conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null;
   }
 
-  private List<Path> listFilesCreatedByQuery(Path loadPath, long mmWriteId) throws HiveException {
+  private List<Path> listFilesCreatedByQuery(Path loadPath, long txnId, int stmtId) throws HiveException {
     List<Path> newFiles = new ArrayList<Path>();
-    final String filePrefix = ValidWriteIds.getMmFilePrefix(mmWriteId);
+    final String filePrefix = AcidUtils.deltaSubdir(txnId, txnId, stmtId);
     FileStatus[] srcs;
     FileSystem srcFs;
     try {
@@ -1999,11 +1982,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @throws HiveException
    */
   private Set<Path> getValidPartitionsInPath(
-      int numDP, int numLB, Path loadPath, Long mmWriteId) throws HiveException {
+      int numDP, int numLB, Path loadPath, Long txnId, int stmtId) throws HiveException {
     Set<Path> validPartitions = new HashSet<Path>();
     try {
       FileSystem fs = loadPath.getFileSystem(conf);
-      if (mmWriteId == null) {
+      if (txnId == null) {
         FileStatus[] leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP, fs);
         // Check for empty partitions
         for (FileStatus s : leafStatus) {
@@ -2018,7 +2001,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
         // The non-MM path only finds new partitions, as it is looking at the temp path.
         // To produce the same effect, we will find all the partitions affected by this write ID.
         Path[] leafStatus = Utilities.getMmDirectoryCandidates(
-            fs, loadPath, numDP, numLB, null, mmWriteId, conf);
+            fs, loadPath, numDP, numLB, null, txnId, stmtId, conf);
         for (Path p : leafStatus) {
           Path dpPath = p.getParent(); // Skip the MM directory that we have found.
           for (int i = 0; i < numLB; ++i) {
@@ -2064,8 +2047,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
    */
   public Map<Map<String, String>, Partition> loadDynamicPartitions(final Path loadPath,
       final String tableName, final Map<String, String> partSpec, final boolean replace,
-      final int numDP, final int numLB, final boolean isAcid, final long txnId,
-      final boolean hasFollowingStatsTask, final AcidUtils.Operation operation, final Long mmWriteId)
+      final int numDP, final int numLB, final boolean isAcid, final long txnId, final int stmtId,
+      final boolean hasFollowingStatsTask, final AcidUtils.Operation operation)
       throws HiveException {
 
     final Map<Map<String, String>, Partition> partitionsMap =
@@ -2080,7 +2063,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
     // Get all valid partition paths and existing partitions for them (if any)
     final Table tbl = getTable(tableName);
-    final Set<Path> validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, mmWriteId);
+    final Set<Path> validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, txnId, stmtId);
 
     final int partsToLoad = validPartitions.size();
     final AtomicInteger partitionsLoaded = new AtomicInteger(0);
@@ -2114,7 +2097,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
               Utilities.LOG14535.info("loadPartition called for DPP from " + partPath + " to " + tbl.getTableName());
               Partition newPartition = loadPartition(partPath, tbl, fullPartSpec,
                   replace, true, numLB > 0,
-                  false, isAcid, hasFollowingStatsTask, mmWriteId);
+                  false, isAcid, hasFollowingStatsTask, txnId, stmtId);
               partitionsMap.put(fullPartSpec, newPartition);
 
               if (inPlaceEligible) {
@@ -2146,10 +2129,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
       for (Future future : futures) {
         future.get();
       }
-      if (mmWriteId != null) {
-        // Commit after we have processed all the partitions.
-        commitMmTableWrite(tbl, mmWriteId);
-      }
     } catch (InterruptedException | ExecutionException e) {
       LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks");
       //cancel other futures
@@ -2200,8 +2179,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
    */
   public void loadTable(Path loadPath, String tableName, boolean replace, boolean isSrcLocal,
       boolean isSkewedStoreAsSubdir, boolean isAcid, boolean hasFollowingStatsTask,
-      Long mmWriteId) throws HiveException {
-
+      Long txnId, int stmtId) throws HiveException {
     List<Path> newFiles = null;
     Table tbl = getTable(tableName);
     HiveConf sessionConf = SessionState.getSessionConf();
@@ -2209,30 +2187,30 @@ private void constructOneLBLocationMap(FileStatus fSta,
       newFiles = Collections.synchronizedList(new ArrayList<Path>());
     }
     // TODO: this assumes both paths are qualified; which they are, currently.
-    if (mmWriteId != null && loadPath.equals(tbl.getPath())) {
+    if (txnId != null && loadPath.equals(tbl.getPath())) {
       Utilities.LOG14535.info("not moving " + loadPath + " to " + tbl.getPath());
       if (replace) {
         Path tableDest = tbl.getPath();
         boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
         deleteOldPathForReplace(tableDest, tableDest, sessionConf, isAutopurge,
-            new ValidWriteIds.IdPathFilter(mmWriteId, false, true), mmWriteId != null,
+            new JavaUtils.IdPathFilter(txnId, stmtId, false, true), true,
             tbl.isStoredAsSubDirectories() ? tbl.getSkewedColNames().size() : 0);
       }
-      newFiles = listFilesCreatedByQuery(loadPath, mmWriteId);
+      newFiles = listFilesCreatedByQuery(loadPath, txnId, stmtId);
     } else {
       // Either a non-MM query, or a load into MM table from an external source.
       Path tblPath = tbl.getPath(), destPath = tblPath;
       PathFilter filter = FileUtils.HIDDEN_FILES_PATH_FILTER;
-      if (mmWriteId != null) {
+      if (txnId != null) {
         // We will load into MM directory, and delete from the parent if needed.
-        destPath = new Path(destPath, ValidWriteIds.getMmFilePrefix(mmWriteId));
-        filter = replace ? new ValidWriteIds.IdPathFilter(mmWriteId, false, true) : filter;
+        destPath = new Path(destPath, AcidUtils.deltaSubdir(txnId, txnId, stmtId));
+        filter = replace ? new JavaUtils.IdPathFilter(txnId, stmtId, false, true) : filter;
       }
       Utilities.LOG14535.info("moving " + loadPath + " to " + tblPath + " (replace = " + replace + ")");
       if (replace) {
         boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
         replaceFiles(tblPath, loadPath, destPath, tblPath,
-            sessionConf, isSrcLocal, isAutopurge, newFiles, filter, mmWriteId != null);
+            sessionConf, isSrcLocal, isAutopurge, newFiles, filter, txnId != null);
       } else {
         try {
           FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf);
@@ -2274,10 +2252,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
       throw new HiveException(e);
     }
 
-    if (mmWriteId != null) {
-      commitMmTableWrite(tbl, mmWriteId);
-    }
-
     fireInsertEvent(tbl, null, replace, newFiles);
   }
 
@@ -4337,25 +4311,4 @@ private void constructOneLBLocationMap(FileStatus fSta,
       throw new HiveException(e);
     }
   }
-
-  public long getNextTableWriteId(String dbName, String tableName) throws HiveException {
-    try {
-      return getMSC().getNextTableWriteId(dbName, tableName);
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
-  }
-
-  public ValidWriteIds getValidWriteIdsForTable(
-      String dbName, String tableName) throws HiveException {
-    try {
-      // TODO: decode ID ranges here if we use that optimization
-      GetValidWriteIdsResult result = getMSC().getValidWriteIds(dbName, tableName);
-      return new ValidWriteIds(result.getLowWatermarkId(), result.getHighWatermarkId(),
-          result.isSetAreIdsValid() && result.isAreIdsValid(),
-          result.isSetIds() ? new HashSet<Long>(result.getIds()) : null);
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
-  }
-};
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 5efaf70..6282548 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -1026,8 +1026,4 @@ public class Table implements Serializable {
   public boolean hasDeserializer() {
     return deserializer != null;
   }
-
-  public void setMmNextWriteId(long writeId) {
-    this.tTable.setMmNextWriteId(writeId);
-  }
 };

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 87fff3e..204e67d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -1636,7 +1636,9 @@ public final class GenMapRedUtils {
     } else {
       fmd = new OrcFileMergeDesc();
     }
-    fmd.setMmWriteId(fsInputDesc.getMmWriteId());
+    fmd.setTxnId(fsInputDesc.getMmWriteId());
+    int stmtId = fsInputDesc.getStatementId();
+    fmd.setStmtId(stmtId == -1 ? 0 : stmtId);
     fmd.setDpCtx(fsInputDesc.getDynPartCtx());
     fmd.setOutputPath(finalName);
     fmd.setHasDynamicPartitions(work.hasDynamicPartitions());

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java
index 64db005..b50f664 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java
@@ -86,7 +86,7 @@ public class SkewJoinResolver implements PhysicalPlanResolver {
       ParseContext pc = physicalContext.getParseContext();
       if (pc.getLoadTableWork() != null) {
         for (LoadTableDesc ltd : pc.getLoadTableWork()) {
-          if (ltd.getMmWriteId() == null) continue;
+          if (ltd.getTxnId() == null) continue;
           // See the path in FSOP that calls fs.exists on finalPath.
           LOG.debug("Not using skew join because the destination table "
               + ltd.getTable().getTableName() + " is an insert_only table");

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index 41245c8..b9db582 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -116,7 +116,8 @@ public abstract class BaseSemanticAnalyzer {
    */
   protected Set<FileSinkDesc> acidFileSinks = new HashSet<FileSinkDesc>();
 
-  // whether any ACID table is involved in a query
+  // whether any ACID table or Insert-only (mm) table is involved in a query
+  // They both require DbTxnManager and both need to recordValidTxns when acquiring locks in Driver
   protected boolean acidInQuery;
 
   public static final int HIVE_COLUMN_ORDER_ASC = 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
index deb51be..e534272 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
@@ -18,15 +18,8 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-
-import org.apache.hadoop.hive.common.ValidWriteIds;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.ql.exec.Utilities;
 
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -41,13 +34,17 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
+import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.ReplCopyTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.metadata.Hive;
@@ -214,8 +211,6 @@ public class ExportSemanticAnalyzer extends BaseSemanticAnalyzer {
 
       int lbLevels = isMmTable && ts.tableHandle.isStoredAsSubDirectories()
           ? ts.tableHandle.getSkewedColNames().size() : 0;
-      ValidWriteIds ids = isMmTable ? db.getValidWriteIdsForTable(
-          ts.tableHandle.getDbName(), ts.tableHandle.getTableName()) : null;
       if (ts.tableHandle.isPartitioned()) {
         for (Partition partition : partitions) {
           Path fromPath = partition.getDataLocation();
@@ -229,7 +224,7 @@ public class ExportSemanticAnalyzer extends BaseSemanticAnalyzer {
             }
             copyTask = ReplCopyTask.getDumpCopyTask(replicationSpec, fromPath, toPartPath, conf);
           } else {
-            CopyWork cw = createCopyWork(isMmTable, lbLevels, ids, fromPath, toPartPath, conf);
+            CopyWork cw = createCopyWork(isMmTable, lbLevels, new ValidReadTxnList(), fromPath, toPartPath, conf);
             copyTask = TaskFactory.get(cw, conf);
           }
           rootTasks.add(copyTask);
@@ -248,7 +243,7 @@ public class ExportSemanticAnalyzer extends BaseSemanticAnalyzer {
           copyTask = ReplCopyTask.getDumpCopyTask(replicationSpec, fromPath, toDataPath, conf);
         } else {
           // TODO# master merge - did master remove this path or did it never exit? we need it for MM
-          CopyWork cw = createCopyWork(isMmTable, lbLevels, ids, fromPath, toDataPath, conf);
+          CopyWork cw = createCopyWork(isMmTable, lbLevels, new ValidReadTxnList(), fromPath, toDataPath, conf);
           copyTask = TaskFactory.get(cw, conf);
         }
         rootTasks.add(copyTask);
@@ -260,14 +255,14 @@ public class ExportSemanticAnalyzer extends BaseSemanticAnalyzer {
     }
   }
 
-  private static CopyWork createCopyWork(boolean isMmTable, int lbLevels, ValidWriteIds ids,
+  private static CopyWork createCopyWork(boolean isMmTable, int lbLevels, ValidTxnList validTxnList,
       Path fromPath, Path toDataPath, Configuration conf) throws IOException {
     List<Path> validPaths = null;
     if (isMmTable) {
       fromPath = fromPath.getFileSystem(conf).makeQualified(fromPath);
-      validPaths = Utilities.getValidMmDirectoriesFromTableOrPart(fromPath, conf, ids, lbLevels);
+      validPaths = Utilities.getValidMmDirectoriesFromTableOrPart(fromPath, conf, validTxnList, lbLevels);
     }
-    if (validPaths == null) {
+    if (validPaths == null || validPaths.isEmpty()) {
       return new CopyWork(fromPath, toDataPath, false); // Not MM, or no need to skip anything.
     } else {
       return createCopyWorkForValidPaths(fromPath, toDataPath, validPaths);

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 99a7392..a220d1a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -33,13 +33,11 @@ import java.util.TreeMap;
 
 import org.antlr.runtime.tree.Tree;
 import org.apache.commons.lang.ObjectUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
@@ -57,6 +55,7 @@ import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -303,31 +302,31 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       tableExists = true;
     }
 
-    Long mmWriteId = null;
+    Long txnId = null;
+    int stmtId = 0;
     if (table != null && MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
-      mmWriteId = x.getHive().getNextTableWriteId(table.getDbName(), table.getTableName());
+      txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
     } else if (table == null && isSourceMm) {
       // We could import everything as is - directories and IDs, but that won't work with ACID
       // txn ids in future. So, let's import everything into the new MM directory with ID == 0.
-      mmWriteId = 0l;
+      txnId = 0l;
     }
-    //todo due to master merge on May 4, tblDesc has been changed from CreateTableDesc to ImportTableDesc
-    // which may result in Import test failure
+    //todo due to the master merge, tblDesc is no longer CreateTableDesc, but ImportTableDesc
     /*
-    if (mmWriteId != null) {
-      tblDesc.setInitialMmWriteId(mmWriteId);
+    if (txnId != null) {
+      tblDesc.setInitialMmWriteId(txnId);
     }
     */
     if (!replicationSpec.isInReplicationScope()) {
       createRegularImportTasks(
           tblDesc, partitionDescs,
           isPartSpecSet, replicationSpec, table,
-          fromURI, fs, wh, x, mmWriteId, isSourceMm);
+          fromURI, fs, wh, x, txnId, stmtId, isSourceMm);
     } else {
       createReplImportTasks(
           tblDesc, partitionDescs,
           isPartSpecSet, replicationSpec, waitOnPrecursor, table,
-          fromURI, fs, wh, x, mmWriteId, isSourceMm);
+          fromURI, fs, wh, x, txnId, stmtId, isSourceMm);
     }
     return tableExists;
   }
@@ -362,17 +361,17 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
 
   private static Task<?> loadTable(URI fromURI, Table table, boolean replace, Path tgtPath,
       ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x,
-      Long mmWriteId, boolean isSourceMm) {
+      Long txnId, int stmtId, boolean isSourceMm) {
     Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME);
-    Path destPath = mmWriteId == null ? x.getCtx().getExternalTmpPath(tgtPath)
-        : new Path(tgtPath, ValidWriteIds.getMmFilePrefix(mmWriteId));
+    Path destPath = txnId == null ? x.getCtx().getExternalTmpPath(tgtPath)
+        : new Path(tgtPath, AcidUtils.deltaSubdir(txnId, txnId, stmtId));
     Utilities.LOG14535.info("adding import work for table with source location: "
         + dataPath + "; table: " + tgtPath + "; copy destination " + destPath + "; mm "
-        + mmWriteId + " (src " + isSourceMm + ") for " + (table == null ? "a new table" : table.getTableName()));
+        + txnId + " (src " + isSourceMm + ") for " + (table == null ? "a new table" : table.getTableName()));
 
     Task<?> copyTask = null;
     if (replicationSpec.isInReplicationScope()) {
-      if (isSourceMm || mmWriteId != null) {
+      if (isSourceMm || txnId != null) {
         // TODO: ReplCopyTask is completely screwed. Need to support when it's not as screwed.
         throw new RuntimeException(
             "Not supported right now because Replication is completely screwed");
@@ -385,7 +384,9 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     LoadTableDesc loadTableWork = new LoadTableDesc(destPath,
-        Utilities.getTableDesc(table), new TreeMap<String, String>(), replace, mmWriteId);
+        Utilities.getTableDesc(table), new TreeMap<String, String>(), replace, txnId);
+    loadTableWork.setTxnId(txnId);
+    loadTableWork.setStmtId(stmtId);
     MoveWork mv = new MoveWork(x.getInputs(), x.getOutputs(), loadTableWork, null, false);
     Task<?> loadTableTask = TaskFactory.get(mv, x.getConf());
     copyTask.addDependentTask(loadTableTask);
@@ -433,7 +434,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
 
  private static Task<?> addSinglePartition(URI fromURI, FileSystem fs, ImportTableDesc tblDesc,
       Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec,
-      EximUtil.SemanticAnalyzerWrapperContext x, Long mmWriteId, boolean isSourceMm,
+      EximUtil.SemanticAnalyzerWrapperContext x, Long txnId, int stmtId, boolean isSourceMm,
       Task<?> commitTask)
       throws MetaException, IOException, HiveException {
     AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0);
@@ -452,17 +453,17 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
           + partSpecToString(partSpec.getPartSpec())
           + " with source location: " + srcLocation);
       Path tgtLocation = new Path(partSpec.getLocation());
-      Path destPath = mmWriteId == null ? x.getCtx().getExternalTmpPath(tgtLocation)
-          : new Path(tgtLocation, ValidWriteIds.getMmFilePrefix(mmWriteId));
-      Path moveTaskSrc =  mmWriteId == null ? destPath : tgtLocation;
+      Path destPath = txnId == null ? x.getCtx().getExternalTmpPath(tgtLocation)
+          : new Path(tgtLocation, AcidUtils.deltaSubdir(txnId, txnId, stmtId));
+      Path moveTaskSrc =  txnId == null ? destPath : tgtLocation;
       Utilities.LOG14535.info("adding import work for partition with source location: "
           + srcLocation + "; target: " + tgtLocation + "; copy dest " + destPath + "; mm "
-          + mmWriteId + " (src " + isSourceMm + ") for " + partSpecToString(partSpec.getPartSpec()));
+          + txnId + " (src " + isSourceMm + ") for " + partSpecToString(partSpec.getPartSpec()));
 
 
       Task<?> copyTask = null;
       if (replicationSpec.isInReplicationScope()) {
-        if (isSourceMm || mmWriteId != null) {
+        if (isSourceMm || txnId != null) {
           // TODO: ReplCopyTask is completely screwed. Need to support when it's not as screwed.
           throw new RuntimeException(
               "Not supported right now because Replication is completely screwed");
@@ -478,11 +479,13 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       Task<?> addPartTask = TaskFactory.get(new DDLWork(x.getInputs(),
           x.getOutputs(), addPartitionDesc), x.getConf());
       LoadTableDesc loadTableWork = new LoadTableDesc(moveTaskSrc, Utilities.getTableDesc(table),
-          partSpec.getPartSpec(), replicationSpec.isReplace(), mmWriteId);
+          partSpec.getPartSpec(), replicationSpec.isReplace(), txnId);
+      loadTableWork.setTxnId(txnId);
+      loadTableWork.setStmtId(stmtId);
       loadTableWork.setInheritTableSpecs(false);
       // Do not commit the write ID from each task; need to commit once.
       // TODO: we should just change the import to use a single MoveTask, like dynparts.
-      loadTableWork.setIntermediateInMmWrite(mmWriteId != null);
+      loadTableWork.setIntermediateInMmWrite(txnId != null);
       Task<?> loadPartTask = TaskFactory.get(new MoveWork(
           x.getInputs(), x.getOutputs(), loadTableWork, null, false), x.getConf());
       copyTask.addDependentTask(loadPartTask);
@@ -778,21 +781,21 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
   private static void createRegularImportTasks(
       ImportTableDesc tblDesc, List<AddPartitionDesc> partitionDescs, boolean isPartSpecSet,
       ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh,
-      EximUtil.SemanticAnalyzerWrapperContext x, Long mmWriteId, boolean isSourceMm)
+      EximUtil.SemanticAnalyzerWrapperContext x, Long txnId, int stmtId, boolean isSourceMm)
       throws HiveException, URISyntaxException, IOException, MetaException {
 
     if (table != null) {
       if (table.isPartitioned()) {
         x.getLOG().debug("table partitioned");
         Task<?> ict = createImportCommitTask(
-            table.getDbName(), table.getTableName(), mmWriteId, x.getConf());
+            table.getDbName(), table.getTableName(), txnId, stmtId, x.getConf());
 
         for (AddPartitionDesc addPartitionDesc : partitionDescs) {
           Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
           org.apache.hadoop.hive.ql.metadata.Partition ptn = null;
           if ((ptn = x.getHive().getPartition(table, partSpec, false)) == null) {
             x.getTasks().add(addSinglePartition(
-                fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x, mmWriteId, isSourceMm, ict));
+                fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x, txnId, stmtId, isSourceMm, ict));
           } else {
             throw new SemanticException(
                 ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec)));
@@ -804,7 +807,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
         Path tgtPath = new Path(table.getDataLocation().toString());
         FileSystem tgtFs = FileSystem.get(tgtPath.toUri(), x.getConf());
         checkTargetLocationEmpty(tgtFs, tgtPath, replicationSpec, x);
-        loadTable(fromURI, table, false, tgtPath, replicationSpec, x, mmWriteId, isSourceMm);
+        loadTable(fromURI, table, false, tgtPath, replicationSpec, x, txnId, stmtId, isSourceMm);
       }
       // Set this to read because we can't overwrite any existing partitions
       x.getOutputs().add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
@@ -821,10 +824,10 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
 
       if (isPartitioned(tblDesc)) {
         Task<?> ict = createImportCommitTask(
-            tblDesc.getDatabaseName(), tblDesc.getTableName(), mmWriteId, x.getConf());
+            tblDesc.getDatabaseName(), tblDesc.getTableName(), txnId, stmtId, x.getConf());
         for (AddPartitionDesc addPartitionDesc : partitionDescs) {
           t.addDependentTask(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc,
-            replicationSpec, x, mmWriteId, isSourceMm, ict));
+            replicationSpec, x, txnId, stmtId, isSourceMm, ict));
         }
       } else {
         x.getLOG().debug("adding dependent CopyWork/MoveWork for table");
@@ -841,7 +844,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
           }
           FileSystem tgtFs = FileSystem.get(tablePath.toUri(), x.getConf());
           checkTargetLocationEmpty(tgtFs, tablePath, replicationSpec,x);
-          t.addDependentTask(loadTable(fromURI, table, false, tablePath, replicationSpec, x, mmWriteId, isSourceMm));
+          t.addDependentTask(loadTable(fromURI, table, false, tablePath, replicationSpec, x, txnId, stmtId, isSourceMm));
         }
       }
       x.getTasks().add(t);
@@ -849,10 +852,10 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private static Task<?> createImportCommitTask(
-      String dbName, String tblName, Long mmWriteId, HiveConf conf) {
+      String dbName, String tblName, Long txnId, int stmtId, HiveConf conf) {
     @SuppressWarnings("unchecked")
-    Task<ImportCommitWork> ict = (mmWriteId == null) ? null : TaskFactory.get(
-        new ImportCommitWork(dbName, tblName, mmWriteId), conf);
+    Task<ImportCommitWork> ict = (txnId == null) ? null : TaskFactory.get(
+        new ImportCommitWork(dbName, tblName, txnId, stmtId), conf);
     return ict;
   }
 
@@ -864,7 +867,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       List<AddPartitionDesc> partitionDescs,
       boolean isPartSpecSet, ReplicationSpec replicationSpec, boolean waitOnPrecursor,
       Table table, URI fromURI, FileSystem fs, Warehouse wh,
-      EximUtil.SemanticAnalyzerWrapperContext x, Long mmWriteId, boolean isSourceMm)
+      EximUtil.SemanticAnalyzerWrapperContext x, Long txnId, int stmtId, boolean isSourceMm)
       throws HiveException, URISyntaxException, IOException, MetaException {
 
     Task<?> dr = null;
@@ -933,15 +936,15 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       if (!replicationSpec.isMetadataOnly()) {
         if (isPartitioned(tblDesc)) {
           Task<?> ict = createImportCommitTask(
-              tblDesc.getDatabaseName(), tblDesc.getTableName(), mmWriteId, x.getConf());
+              tblDesc.getDatabaseName(), tblDesc.getTableName(), txnId, stmtId, x.getConf());
           for (AddPartitionDesc addPartitionDesc : partitionDescs) {
             addPartitionDesc.setReplicationSpec(replicationSpec);
             t.addDependentTask(
-                addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x, mmWriteId, isSourceMm, ict));
+                addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x, txnId, stmtId, isSourceMm, ict));
           }
         } else {
           x.getLOG().debug("adding dependent CopyWork/MoveWork for table");
-          t.addDependentTask(loadTable(fromURI, table, true, new Path(tblDesc.getLocation()), replicationSpec, x, mmWriteId, isSourceMm));
+          t.addDependentTask(loadTable(fromURI, table, true, new Path(tblDesc.getLocation()), replicationSpec, x, txnId, stmtId, isSourceMm));
         }
       }
       if (dr == null){
@@ -961,11 +964,11 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
           Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
           org.apache.hadoop.hive.ql.metadata.Partition ptn = null;
           Task<?> ict = replicationSpec.isMetadataOnly() ? null : createImportCommitTask(
-              tblDesc.getDatabaseName(), tblDesc.getTableName(), mmWriteId, x.getConf());
+              tblDesc.getDatabaseName(), tblDesc.getTableName(), txnId, stmtId, x.getConf());
           if ((ptn = x.getHive().getPartition(table, partSpec, false)) == null) {
             if (!replicationSpec.isMetadataOnly()){
               x.getTasks().add(addSinglePartition(
-                  fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x, mmWriteId, isSourceMm, ict));
+                  fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x, txnId, stmtId, isSourceMm, ict));
             }
           } else {
             // If replicating, then the partition already existing means we need to replace, maybe, if
@@ -973,7 +976,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
             if (replicationSpec.allowReplacementInto(ptn)){
               if (!replicationSpec.isMetadataOnly()){
                 x.getTasks().add(addSinglePartition(
-                    fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x, mmWriteId, isSourceMm, ict));
+                    fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x, txnId, stmtId, isSourceMm, ict));
               } else {
                 x.getTasks().add(alterSinglePartition(
                     fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, ptn, x));
@@ -1002,7 +1005,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
         if (!replicationSpec.isMetadataOnly()) {
           // repl-imports are replace-into unless the event is insert-into
           loadTable(fromURI, table, replicationSpec.isReplace(), new Path(fromURI),
-            replicationSpec, x, mmWriteId, isSourceMm);
+            replicationSpec, x, txnId, stmtId, isSourceMm);
         } else {
           x.getTasks().add(alterTableTask(tblDesc, x, replicationSpec));
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
index d3b4da1..f31775e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.parse;
 
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.ql.Driver;
@@ -44,7 +43,6 @@ import java.util.Set;
 public class IndexUpdater {
   private List<LoadTableDesc> loadTableWork;
   private HiveConf conf;
-  private Configuration parentConf;
   // Assumes one instance of this + single-threaded compilation for each query.
   private Hive hive;
   private List<Task<? extends Serializable>> tasks;
@@ -54,7 +52,6 @@ public class IndexUpdater {
   public IndexUpdater(List<LoadTableDesc> loadTableWork, Set<ReadEntity> inputs, Configuration conf) {
     this.loadTableWork = loadTableWork;
     this.inputs = inputs;
-    this.parentConf = conf;
     this.conf = new HiveConf(conf, IndexUpdater.class);
     this.tasks = new LinkedList<Task<? extends Serializable>>();
   }
@@ -63,7 +60,6 @@ public class IndexUpdater {
       Configuration conf) {
     this.loadTableWork = new LinkedList<LoadTableDesc>();
     this.loadTableWork.add(loadTableWork);
-    this.parentConf = conf;
     this.conf = new HiveConf(conf, IndexUpdater.class);
     this.tasks = new LinkedList<Task<? extends Serializable>>();
     this.inputs = inputs;
@@ -79,15 +75,15 @@ public class IndexUpdater {
       Map<String, String> partSpec = ltd.getPartitionSpec();
       if (partSpec == null || partSpec.size() == 0) {
         //unpartitioned table, update whole index
-        doIndexUpdate(tblIndexes, ltd.getMmWriteId());
+        doIndexUpdate(tblIndexes);
       } else {
-        doIndexUpdate(tblIndexes, partSpec, ltd.getMmWriteId());
+        doIndexUpdate(tblIndexes, partSpec);
       }
     }
     return tasks;
   }
 
-  private void doIndexUpdate(List<Index> tblIndexes, Long mmWriteId) throws HiveException {
+  private void doIndexUpdate(List<Index> tblIndexes) throws HiveException {
     for (Index idx : tblIndexes) {
       StringBuilder sb = new StringBuilder();
       sb.append("ALTER INDEX ");
@@ -96,21 +92,20 @@ public class IndexUpdater {
       sb.append(idx.getDbName()).append('.');
       sb.append(idx.getOrigTableName());
       sb.append(" REBUILD");
-      compileRebuild(sb.toString(), idx, mmWriteId);
+      compileRebuild(sb.toString());
     }
   }
 
   private void doIndexUpdate(List<Index> tblIndexes, Map<String, String>
-      partSpec, Long mmWriteId) throws HiveException {
+      partSpec) throws HiveException {
     for (Index index : tblIndexes) {
       if (containsPartition(index, partSpec)) {
-        doIndexUpdate(index, partSpec, mmWriteId);
+        doIndexUpdate(index, partSpec);
       }
     }
   }
 
-  private void doIndexUpdate(Index index, Map<String, String> partSpec, Long mmWriteId)
-      throws HiveException {
+  private void doIndexUpdate(Index index, Map<String, String> partSpec) {
     StringBuilder ps = new StringBuilder();
     boolean first = true;
     ps.append("(");
@@ -134,18 +129,12 @@ public class IndexUpdater {
     sb.append(" PARTITION ");
     sb.append(ps.toString());
     sb.append(" REBUILD");
-    compileRebuild(sb.toString(), index, mmWriteId);
+    compileRebuild(sb.toString());
   }
 
-  private void compileRebuild(String query, Index index, Long mmWriteId)
-      throws HiveException {
+  private void compileRebuild(String query) {
     Driver driver = new Driver(this.conf);
     driver.compile(query, false);
-    if (mmWriteId != null) {
-      // TODO: this is rather fragile
-      ValidWriteIds.addCurrentToConf(
-          parentConf, index.getDbName(), index.getOrigTableName(), mmWriteId);
-    }
     tasks.addAll(driver.getPlan().getRootTasks());
     inputs.addAll(driver.getPlan().getInputs());
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 04e8cac..5ef77f5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.ql.parse;
 
 import org.apache.hadoop.hive.conf.HiveConf.StrictChecks;
 
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-
 import java.io.IOException;
 import java.io.Serializable;
 import java.net.URI;
@@ -271,19 +269,18 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
       }
     }
 
-    Long mmWriteId = null;
+    Long txnId = null;
+    int stmtId = 0;
     Table tbl = ts.tableHandle;
     if (MetaStoreUtils.isInsertOnlyTable(tbl.getParameters())) {
-      try {
-        mmWriteId = db.getNextTableWriteId(tbl.getDbName(), tbl.getTableName());
-      } catch (HiveException e) {
-        throw new SemanticException(e);
-      }
+      txnId = 0l; //todo to be replaced with txnId in Driver
     }
 
     LoadTableDesc loadTableWork;
     loadTableWork = new LoadTableDesc(new Path(fromURI),
-      Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite, mmWriteId);
+      Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite, txnId);
+    loadTableWork.setTxnId(txnId);
+    loadTableWork.setStmtId(stmtId);
     if (preservePartitionSpecs){
       // Note : preservePartitionSpecs=true implies inheritTableSpecs=false but
       // but preservePartitionSpecs=false(default) here is not sufficient enough

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 1bd4f26..29bc183 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -74,7 +74,6 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -6707,7 +6706,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       }
       input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), nullOrder.toString(),
               maxReducers, (AcidUtils.isFullAcidTable(dest_tab) ?
-              getAcidType(dest_tab, table_desc.getOutputFileFormatClass(), dest) : AcidUtils.Operation.NOT_ACID));
+              getAcidType(table_desc.getOutputFileFormatClass(), dest) : AcidUtils.Operation.NOT_ACID));
       reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator)input.getParentOperators().get(0));
       ctx.setMultiFileSpray(multiFileSpray);
       ctx.setNumFiles(numFiles);
@@ -6786,7 +6785,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     ListBucketingCtx lbCtx = null;
     Map<String, String> partSpec = null;
     boolean isMmTable = false, isMmCtas = false;
-    Long mmWriteId = null;
+    Long txnId = null;
 
     switch (dest_type.intValue()) {
     case QBMetaData.DEST_TABLE: {
@@ -6840,17 +6839,18 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       if (!isNonNativeTable) {
         AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
         if (destTableIsAcid) {
-          acidOp = getAcidType(dest_tab, table_desc.getOutputFileFormatClass(), dest);
+          acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
           checkAcidConstraints(qb, table_desc, dest_tab, acidOp);
         }
-        try {
-          mmWriteId = getMmWriteId(dest_tab, isMmTable);
-        } catch (HiveException e) {
-          throw new SemanticException(e);
+        if (MetaStoreUtils.isInsertOnlyTable(table_desc.getProperties())) {
+          acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
+        }
+        if (isMmTable) {
+          txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
         }
         boolean isReplace = !qb.getParseInfo().isInsertIntoTable(
             dest_tab.getDbName(), dest_tab.getTableName());
-        ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp, isReplace, mmWriteId);
+        ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp, isReplace, txnId);
         ltd.setLbCtx(lbCtx);
         loadTableWork.add(ltd);
       } else {
@@ -6903,16 +6903,16 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           dest_part.isStoredAsSubDirectories(), conf);
       AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
       if (destTableIsAcid) {
-        acidOp = getAcidType(dest_tab, table_desc.getOutputFileFormatClass(), dest);
+        acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
         checkAcidConstraints(qb, table_desc, dest_tab, acidOp);
       }
-      try {
-        mmWriteId = getMmWriteId(dest_tab, isMmTable);
-      } catch (HiveException e) {
-        // How is this a semantic exception? Stupid Java and signatures.
-        throw new SemanticException(e);
+      if (MetaStoreUtils.isInsertOnlyTable(dest_part.getTable().getParameters())) {
+        acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
       }
-      ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, mmWriteId);
+      if (isMmTable) {
+        txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
+      }
+      ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, txnId);
       ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
           dest_tab.getTableName()));
       ltd.setLbCtx(lbCtx);
@@ -6946,10 +6946,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         destTableIsMaterialization = tblDesc.isMaterialization();
         if (!destTableIsTemporary && MetaStoreUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) {
           isMmTable = isMmCtas = true;
-          // TODO# this should really get current ACID txn; assuming ACID works correctly the txn
-          //       should have been opened to create the ACID table. For now use the first ID.
-          mmWriteId = 0l;
-          tblDesc.setInitialMmWriteId(mmWriteId);
+          txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
+          tblDesc.setInitialMmWriteId(txnId);
         }
       } else if (viewDesc != null) {
         field_schemas = new ArrayList<FieldSchema>();
@@ -7078,11 +7076,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       genPartnCols(dest, input, qb, table_desc, dest_tab, rsCtx);
     }
 
-    assert isMmTable == (mmWriteId != null);
+    assert isMmTable == (txnId != null);
     FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, table_desc, dest_part,
         dest_path, currentTableId, destTableIsAcid, destTableIsTemporary,
         destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS,
-        canBeMerged, mmWriteId, isMmCtas);
+        canBeMerged, txnId, isMmCtas);
     if (isMmCtas) {
       // Add FSD so that the LoadTask compilation could fix up its path to avoid the move.
       tableDesc.setWriter(fileSinkDesc);
@@ -7185,12 +7183,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     return result;
   }
 
-  private static Long getMmWriteId(Table tbl, boolean isMmTable) throws HiveException {
-    if (!isMmTable) return null;
-    // Get the next write ID for this table. We will prefix files with this write ID.
-    return Hive.get().getNextTableWriteId(tbl.getDbName(), tbl.getTableName());
-  }
-
   private FileSinkDesc createFileSinkDesc(String dest, TableDesc table_desc,
       Partition dest_part, Path dest_path, int currentTableId,
       boolean destTableIsAcid, boolean destTableIsTemporary,
@@ -7210,7 +7202,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         MetaStoreUtils.isInsertOnlyTable(dest_part.getTable().getParameters()))
         || (table_desc != null && MetaStoreUtils.isInsertOnlyTable(table_desc.getProperties()));
 
-    if (destTableIsAcid && !isDestInsertOnly) {
+    if (isDestInsertOnly) {
+      fileSinkDesc.setWriteType(Operation.INSERT);
+      acidFileSinks.add(fileSinkDesc);
+    }
+
+    if (destTableIsAcid) {
       AcidUtils.Operation wt = updating(dest) ? AcidUtils.Operation.UPDATE :
           (deleting(dest) ? AcidUtils.Operation.DELETE : AcidUtils.Operation.INSERT);
       fileSinkDesc.setWriteType(wt);
@@ -7422,7 +7419,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   private void checkAcidConstraints(QB qb, TableDesc tableDesc,
                                     Table table, AcidUtils.Operation acidOp) throws SemanticException {
     String tableName = tableDesc.getTableName();
-    if (!qb.getParseInfo().isInsertIntoTable(tableName) && !Operation.INSERT_ONLY.equals(acidOp)) {
+    if (!qb.getParseInfo().isInsertIntoTable(tableName)) {
       LOG.debug("Couldn't find table " + tableName + " in insertIntoTable");
       throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID, tableName);
     }
@@ -7437,7 +7434,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     */
     conf.set(AcidUtils.CONF_ACID_KEY, "true");
 
-    if (!Operation.NOT_ACID.equals(acidOp) && !Operation.INSERT_ONLY.equals(acidOp)) {
+    if (!Operation.NOT_ACID.equals(acidOp)) {
       if (table.getNumBuckets() < 1) {
         throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, table.getTableName());
       }
@@ -11875,7 +11872,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       if (p != null) {
         tbl = p.getTable();
       }
-      if (tbl != null && AcidUtils.isFullAcidTable(tbl)) {
+      if (tbl != null && (AcidUtils.isFullAcidTable(tbl) || MetaStoreUtils.isInsertOnlyTable(tbl.getParameters()))) {
         acidInQuery = true;
         checkAcidTxnManager(tbl);
       }
@@ -11938,7 +11935,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         tbl = writeEntity.getTable();
       }
 
-      if (tbl != null && AcidUtils.isFullAcidTable(tbl)) {
+      if (tbl != null && (AcidUtils.isFullAcidTable(tbl) || MetaStoreUtils.isInsertOnlyTable(tbl.getParameters()))) {
         acidInQuery = true;
         checkAcidTxnManager(tbl);
       }
@@ -13603,12 +13600,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
             AcidUtils.Operation.INSERT);
   }
 
-  private AcidUtils.Operation getAcidType(
-      Table table, Class<? extends OutputFormat> of, String dest) {
+  private AcidUtils.Operation getAcidType(Class<? extends OutputFormat> of, String dest) {
     if (SessionState.get() == null || !SessionState.get().getTxnMgr().supportsAcid()) {
       return AcidUtils.Operation.NOT_ACID;
-    } else if (MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
-      return AcidUtils.Operation.INSERT_ONLY;
     } else if (isAcidOutputFormat(of)) {
       return getAcidType(dest);
     } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 6629a0c..356ab6f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -20,23 +20,18 @@ package org.apache.hadoop.hive.ql.parse;
 
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashSet;
-import java.util.LinkedList;
 import java.util.List;
-import java.util.Queue;
 import java.util.Set;
-import java.util.Stack;
 
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
-import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.ql.Context;
@@ -44,7 +39,6 @@ import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.ColumnStatsTask;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
-import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
@@ -56,7 +50,6 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils;
-import org.apache.hadoop.hive.ql.optimizer.physical.AnnotateRunTimeStatsOptimizer;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsWork;
@@ -319,21 +312,22 @@ public abstract class TaskCompiler {
   private void setLoadFileLocation(
       final ParseContext pCtx, LoadFileDesc lfd) throws SemanticException {
     // CTAS; make the movetask's destination directory the table's destination.
-    Long mmWriteIdForCtas = null;
+    Long txnIdForCtas = null;
+    int stmtId = 0; // CTAS cannot be part of multi-txn stmt
     FileSinkDesc dataSinkForCtas = null;
     String loc = null;
     if (pCtx.getQueryProperties().isCTAS()) {
       CreateTableDesc ctd = pCtx.getCreateTable();
       dataSinkForCtas = ctd.getAndUnsetWriter();
-      mmWriteIdForCtas = ctd.getInitialMmWriteId();
+      txnIdForCtas = ctd.getInitialMmWriteId();
       loc = ctd.getLocation();
     } else {
       loc = pCtx.getCreateViewDesc().getLocation();
     }
     Path location = (loc == null) ? getDefaultCtasLocation(pCtx) : new Path(loc);
-    if (mmWriteIdForCtas != null) {
+    if (txnIdForCtas != null) {
       dataSinkForCtas.setDirName(location);
-      location = new Path(location, ValidWriteIds.getMmFilePrefix(mmWriteIdForCtas));
+      location = new Path(location, AcidUtils.deltaSubdir(txnIdForCtas, txnIdForCtas, stmtId));
       lfd.setSourcePath(location);
       Utilities.LOG14535.info("Setting MM CTAS to  " + location);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java
index 615c63d..8f6166a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java
@@ -28,7 +28,8 @@ public class FileMergeDesc extends AbstractOperatorDesc {
   private int listBucketingDepth;
   private boolean hasDynamicPartitions;
   private boolean isListBucketingAlterTableConcatenate;
-  private Long mmWriteId;
+  private Long txnId;
+  private int stmtId;
 
   public FileMergeDesc(DynamicPartitionCtx dynPartCtx, Path outputDir) {
     this.dpCtx = dynPartCtx;
@@ -75,11 +76,19 @@ public class FileMergeDesc extends AbstractOperatorDesc {
     this.isListBucketingAlterTableConcatenate = isListBucketingAlterTableConcatenate;
   }
 
-  public Long getMmWriteId() {
-    return mmWriteId;
+  public Long getTxnId() {
+    return txnId;
   }
 
-  public void setMmWriteId(Long mmWriteId) {
-    this.mmWriteId = mmWriteId;
+  public void setTxnId(Long txnId) {
+    this.txnId = txnId;
+  }
+
+  public int getStmtId() {
+    return stmtId;
+  }
+
+  public void setStmtId(int stmtId) {
+    this.stmtId = stmtId;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index d0d5acb..7f4cabe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
@@ -203,7 +202,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
   public Path getMergeInputDirName() {
     Path root = getFinalDirName();
     if (mmWriteId == null) return root;
-    return new Path(root, ValidWriteIds.getMmFilePrefix(mmWriteId));
+    return new Path(root, AcidUtils.deltaSubdir(txnId, txnId, 0));
   }
 
   @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -476,6 +475,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
   }
   public void setTransactionId(long id) {
     txnId = id;
+    setMmWriteId(id);
   }
   public long getTransactionId() {
     return txnId;

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java
index 9d5c6b8..5e19729 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java
@@ -68,6 +68,14 @@ public class LoadMultiFilesDesc implements Serializable {
     return srcDirs;
   }
 
+  public void setSourceDirs(List<Path> srcs) {
+    this.srcDirs = srcs;
+  }
+
+  public void setTargetDirs(final List<Path> targetDir) {
+    this.targetDirs = targetDir;
+  }
+
   @Explain(displayName = "hdfs directory")
   public boolean getIsDfsDir() {
     return isDfsDir;


[10/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index a3201cc..acc541d 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -4569,14 +4569,6 @@ class Table {
    * @var bool
    */
   public $rewriteEnabled = null;
-  /**
-   * @var int
-   */
-  public $mmNextWriteId = null;
-  /**
-   * @var int
-   */
-  public $mmWatermarkWriteId = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -4656,14 +4648,6 @@ class Table {
           'var' => 'rewriteEnabled',
           'type' => TType::BOOL,
           ),
-        16 => array(
-          'var' => 'mmNextWriteId',
-          'type' => TType::I64,
-          ),
-        17 => array(
-          'var' => 'mmWatermarkWriteId',
-          'type' => TType::I64,
-          ),
         );
     }
     if (is_array($vals)) {
@@ -4712,12 +4696,6 @@ class Table {
       if (isset($vals['rewriteEnabled'])) {
         $this->rewriteEnabled = $vals['rewriteEnabled'];
       }
-      if (isset($vals['mmNextWriteId'])) {
-        $this->mmNextWriteId = $vals['mmNextWriteId'];
-      }
-      if (isset($vals['mmWatermarkWriteId'])) {
-        $this->mmWatermarkWriteId = $vals['mmWatermarkWriteId'];
-      }
     }
   }
 
@@ -4871,20 +4849,6 @@ class Table {
             $xfer += $input->skip($ftype);
           }
           break;
-        case 16:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->mmNextWriteId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 17:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->mmWatermarkWriteId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -5004,16 +4968,6 @@ class Table {
       $xfer += $output->writeBool($this->rewriteEnabled);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->mmNextWriteId !== null) {
-      $xfer += $output->writeFieldBegin('mmNextWriteId', TType::I64, 16);
-      $xfer += $output->writeI64($this->mmNextWriteId);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->mmWatermarkWriteId !== null) {
-      $xfer += $output->writeFieldBegin('mmWatermarkWriteId', TType::I64, 17);
-      $xfer += $output->writeI64($this->mmWatermarkWriteId);
-      $xfer += $output->writeFieldEnd();
-    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;
@@ -17795,43 +17749,37 @@ class CacheFileMetadataRequest {
 
 }
 
-class GetNextWriteIdRequest {
+class GetAllFunctionsResponse {
   static $_TSPEC;
 
   /**
-   * @var string
-   */
-  public $dbName = null;
-  /**
-   * @var string
+   * @var \metastore\Function[]
    */
-  public $tblName = null;
+  public $functions = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
       self::$_TSPEC = array(
         1 => array(
-          'var' => 'dbName',
-          'type' => TType::STRING,
-          ),
-        2 => array(
-          'var' => 'tblName',
-          'type' => TType::STRING,
+          'var' => 'functions',
+          'type' => TType::LST,
+          'etype' => TType::STRUCT,
+          'elem' => array(
+            'type' => TType::STRUCT,
+            'class' => '\metastore\Function',
+            ),
           ),
         );
     }
     if (is_array($vals)) {
-      if (isset($vals['dbName'])) {
-        $this->dbName = $vals['dbName'];
-      }
-      if (isset($vals['tblName'])) {
-        $this->tblName = $vals['tblName'];
+      if (isset($vals['functions'])) {
+        $this->functions = $vals['functions'];
       }
     }
   }
 
   public function getName() {
-    return 'GetNextWriteIdRequest';
+    return 'GetAllFunctionsResponse';
   }
 
   public function read($input)
@@ -17850,15 +17798,19 @@ class GetNextWriteIdRequest {
       switch ($fid)
       {
         case 1:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->dbName);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 2:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->tblName);
+          if ($ftype == TType::LST) {
+            $this->functions = array();
+            $_size568 = 0;
+            $_etype571 = 0;
+            $xfer += $input->readListBegin($_etype571, $_size568);
+            for ($_i572 = 0; $_i572 < $_size568; ++$_i572)
+            {
+              $elem573 = null;
+              $elem573 = new \metastore\Function();
+              $xfer += $elem573->read($input);
+              $this->functions []= $elem573;
+            }
+            $xfer += $input->readListEnd();
           } else {
             $xfer += $input->skip($ftype);
           }
@@ -17875,15 +17827,22 @@ class GetNextWriteIdRequest {
 
   public function write($output) {
     $xfer = 0;
-    $xfer += $output->writeStructBegin('GetNextWriteIdRequest');
-    if ($this->dbName !== null) {
-      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
-      $xfer += $output->writeString($this->dbName);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->tblName !== null) {
-      $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2);
-      $xfer += $output->writeString($this->tblName);
+    $xfer += $output->writeStructBegin('GetAllFunctionsResponse');
+    if ($this->functions !== null) {
+      if (!is_array($this->functions)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('functions', TType::LST, 1);
+      {
+        $output->writeListBegin(TType::STRUCT, count($this->functions));
+        {
+          foreach ($this->functions as $iter574)
+          {
+            $xfer += $iter574->write($output);
+          }
+        }
+        $output->writeListEnd();
+      }
       $xfer += $output->writeFieldEnd();
     }
     $xfer += $output->writeFieldStop();
@@ -17893,32 +17852,36 @@ class GetNextWriteIdRequest {
 
 }
 
-class GetNextWriteIdResult {
+class ClientCapabilities {
   static $_TSPEC;
 
   /**
-   * @var int
+   * @var int[]
    */
-  public $writeId = null;
+  public $values = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
       self::$_TSPEC = array(
         1 => array(
-          'var' => 'writeId',
-          'type' => TType::I64,
+          'var' => 'values',
+          'type' => TType::LST,
+          'etype' => TType::I32,
+          'elem' => array(
+            'type' => TType::I32,
+            ),
           ),
         );
     }
     if (is_array($vals)) {
-      if (isset($vals['writeId'])) {
-        $this->writeId = $vals['writeId'];
+      if (isset($vals['values'])) {
+        $this->values = $vals['values'];
       }
     }
   }
 
   public function getName() {
-    return 'GetNextWriteIdResult';
+    return 'ClientCapabilities';
   }
 
   public function read($input)
@@ -17937,8 +17900,18 @@ class GetNextWriteIdResult {
       switch ($fid)
       {
         case 1:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->writeId);
+          if ($ftype == TType::LST) {
+            $this->values = array();
+            $_size575 = 0;
+            $_etype578 = 0;
+            $xfer += $input->readListBegin($_etype578, $_size575);
+            for ($_i579 = 0; $_i579 < $_size575; ++$_i579)
+            {
+              $elem580 = null;
+              $xfer += $input->readI32($elem580);
+              $this->values []= $elem580;
+            }
+            $xfer += $input->readListEnd();
           } else {
             $xfer += $input->skip($ftype);
           }
@@ -17955,10 +17928,22 @@ class GetNextWriteIdResult {
 
   public function write($output) {
     $xfer = 0;
-    $xfer += $output->writeStructBegin('GetNextWriteIdResult');
-    if ($this->writeId !== null) {
-      $xfer += $output->writeFieldBegin('writeId', TType::I64, 1);
-      $xfer += $output->writeI64($this->writeId);
+    $xfer += $output->writeStructBegin('ClientCapabilities');
+    if ($this->values !== null) {
+      if (!is_array($this->values)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('values', TType::LST, 1);
+      {
+        $output->writeListBegin(TType::I32, count($this->values));
+        {
+          foreach ($this->values as $iter581)
+          {
+            $xfer += $output->writeI32($iter581);
+          }
+        }
+        $output->writeListEnd();
+      }
       $xfer += $output->writeFieldEnd();
     }
     $xfer += $output->writeFieldStop();
@@ -17968,7 +17953,7 @@ class GetNextWriteIdResult {
 
 }
 
-class FinalizeWriteIdRequest {
+class GetTableRequest {
   static $_TSPEC;
 
   /**
@@ -17980,13 +17965,9 @@ class FinalizeWriteIdRequest {
    */
   public $tblName = null;
   /**
-   * @var int
-   */
-  public $writeId = null;
-  /**
-   * @var bool
+   * @var \metastore\ClientCapabilities
    */
-  public $commit = null;
+  public $capabilities = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -18000,12 +17981,9 @@ class FinalizeWriteIdRequest {
           'type' => TType::STRING,
           ),
         3 => array(
-          'var' => 'writeId',
-          'type' => TType::I64,
-          ),
-        4 => array(
-          'var' => 'commit',
-          'type' => TType::BOOL,
+          'var' => 'capabilities',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\ClientCapabilities',
           ),
         );
     }
@@ -18016,17 +17994,14 @@ class FinalizeWriteIdRequest {
       if (isset($vals['tblName'])) {
         $this->tblName = $vals['tblName'];
       }
-      if (isset($vals['writeId'])) {
-        $this->writeId = $vals['writeId'];
-      }
-      if (isset($vals['commit'])) {
-        $this->commit = $vals['commit'];
+      if (isset($vals['capabilities'])) {
+        $this->capabilities = $vals['capabilities'];
       }
     }
   }
 
   public function getName() {
-    return 'FinalizeWriteIdRequest';
+    return 'GetTableRequest';
   }
 
   public function read($input)
@@ -18059,15 +18034,9 @@ class FinalizeWriteIdRequest {
           }
           break;
         case 3:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->writeId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 4:
-          if ($ftype == TType::BOOL) {
-            $xfer += $input->readBool($this->commit);
+          if ($ftype == TType::STRUCT) {
+            $this->capabilities = new \metastore\ClientCapabilities();
+            $xfer += $this->capabilities->read($input);
           } else {
             $xfer += $input->skip($ftype);
           }
@@ -18084,7 +18053,7 @@ class FinalizeWriteIdRequest {
 
   public function write($output) {
     $xfer = 0;
-    $xfer += $output->writeStructBegin('FinalizeWriteIdRequest');
+    $xfer += $output->writeStructBegin('GetTableRequest');
     if ($this->dbName !== null) {
       $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
       $xfer += $output->writeString($this->dbName);
@@ -18095,14 +18064,12 @@ class FinalizeWriteIdRequest {
       $xfer += $output->writeString($this->tblName);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->writeId !== null) {
-      $xfer += $output->writeFieldBegin('writeId', TType::I64, 3);
-      $xfer += $output->writeI64($this->writeId);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->commit !== null) {
-      $xfer += $output->writeFieldBegin('commit', TType::BOOL, 4);
-      $xfer += $output->writeBool($this->commit);
+    if ($this->capabilities !== null) {
+      if (!is_object($this->capabilities)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('capabilities', TType::STRUCT, 3);
+      $xfer += $this->capabilities->write($output);
       $xfer += $output->writeFieldEnd();
     }
     $xfer += $output->writeFieldStop();
@@ -18112,19 +18079,33 @@ class FinalizeWriteIdRequest {
 
 }
 
-class FinalizeWriteIdResult {
+class GetTableResult {
   static $_TSPEC;
 
+  /**
+   * @var \metastore\Table
+   */
+  public $table = null;
 
-  public function __construct() {
+  public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
       self::$_TSPEC = array(
+        1 => array(
+          'var' => 'table',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\Table',
+          ),
         );
     }
+    if (is_array($vals)) {
+      if (isset($vals['table'])) {
+        $this->table = $vals['table'];
+      }
+    }
   }
 
   public function getName() {
-    return 'FinalizeWriteIdResult';
+    return 'GetTableResult';
   }
 
   public function read($input)
@@ -18142,6 +18123,14 @@ class FinalizeWriteIdResult {
       }
       switch ($fid)
       {
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->table = new \metastore\Table();
+            $xfer += $this->table->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -18154,7 +18143,15 @@ class FinalizeWriteIdResult {
 
   public function write($output) {
     $xfer = 0;
-    $xfer += $output->writeStructBegin('FinalizeWriteIdResult');
+    $xfer += $output->writeStructBegin('GetTableResult');
+    if ($this->table !== null) {
+      if (!is_object($this->table)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('table', TType::STRUCT, 1);
+      $xfer += $this->table->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;
@@ -18162,7 +18159,7 @@ class FinalizeWriteIdResult {
 
 }
 
-class HeartbeatWriteIdRequest {
+class GetTablesRequest {
   static $_TSPEC;
 
   /**
@@ -18170,13 +18167,13 @@ class HeartbeatWriteIdRequest {
    */
   public $dbName = null;
   /**
-   * @var string
+   * @var string[]
    */
-  public $tblName = null;
+  public $tblNames = null;
   /**
-   * @var int
+   * @var \metastore\ClientCapabilities
    */
-  public $writeId = null;
+  public $capabilities = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -18186,12 +18183,17 @@ class HeartbeatWriteIdRequest {
           'type' => TType::STRING,
           ),
         2 => array(
-          'var' => 'tblName',
-          'type' => TType::STRING,
+          'var' => 'tblNames',
+          'type' => TType::LST,
+          'etype' => TType::STRING,
+          'elem' => array(
+            'type' => TType::STRING,
+            ),
           ),
         3 => array(
-          'var' => 'writeId',
-          'type' => TType::I64,
+          'var' => 'capabilities',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\ClientCapabilities',
           ),
         );
     }
@@ -18199,17 +18201,17 @@ class HeartbeatWriteIdRequest {
       if (isset($vals['dbName'])) {
         $this->dbName = $vals['dbName'];
       }
-      if (isset($vals['tblName'])) {
-        $this->tblName = $vals['tblName'];
+      if (isset($vals['tblNames'])) {
+        $this->tblNames = $vals['tblNames'];
       }
-      if (isset($vals['writeId'])) {
-        $this->writeId = $vals['writeId'];
+      if (isset($vals['capabilities'])) {
+        $this->capabilities = $vals['capabilities'];
       }
     }
   }
 
   public function getName() {
-    return 'HeartbeatWriteIdRequest';
+    return 'GetTablesRequest';
   }
 
   public function read($input)
@@ -18235,15 +18237,26 @@ class HeartbeatWriteIdRequest {
           }
           break;
         case 2:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->tblName);
+          if ($ftype == TType::LST) {
+            $this->tblNames = array();
+            $_size582 = 0;
+            $_etype585 = 0;
+            $xfer += $input->readListBegin($_etype585, $_size582);
+            for ($_i586 = 0; $_i586 < $_size582; ++$_i586)
+            {
+              $elem587 = null;
+              $xfer += $input->readString($elem587);
+              $this->tblNames []= $elem587;
+            }
+            $xfer += $input->readListEnd();
           } else {
             $xfer += $input->skip($ftype);
           }
           break;
         case 3:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->writeId);
+          if ($ftype == TType::STRUCT) {
+            $this->capabilities = new \metastore\ClientCapabilities();
+            $xfer += $this->capabilities->read($input);
           } else {
             $xfer += $input->skip($ftype);
           }
@@ -18260,872 +18273,7 @@ class HeartbeatWriteIdRequest {
 
   public function write($output) {
     $xfer = 0;
-    $xfer += $output->writeStructBegin('HeartbeatWriteIdRequest');
-    if ($this->dbName !== null) {
-      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
-      $xfer += $output->writeString($this->dbName);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->tblName !== null) {
-      $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2);
-      $xfer += $output->writeString($this->tblName);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->writeId !== null) {
-      $xfer += $output->writeFieldBegin('writeId', TType::I64, 3);
-      $xfer += $output->writeI64($this->writeId);
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class HeartbeatWriteIdResult {
-  static $_TSPEC;
-
-
-  public function __construct() {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        );
-    }
-  }
-
-  public function getName() {
-    return 'HeartbeatWriteIdResult';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('HeartbeatWriteIdResult');
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class GetValidWriteIdsRequest {
-  static $_TSPEC;
-
-  /**
-   * @var string
-   */
-  public $dbName = null;
-  /**
-   * @var string
-   */
-  public $tblName = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'dbName',
-          'type' => TType::STRING,
-          ),
-        2 => array(
-          'var' => 'tblName',
-          'type' => TType::STRING,
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['dbName'])) {
-        $this->dbName = $vals['dbName'];
-      }
-      if (isset($vals['tblName'])) {
-        $this->tblName = $vals['tblName'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'GetValidWriteIdsRequest';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->dbName);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 2:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->tblName);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('GetValidWriteIdsRequest');
-    if ($this->dbName !== null) {
-      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
-      $xfer += $output->writeString($this->dbName);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->tblName !== null) {
-      $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2);
-      $xfer += $output->writeString($this->tblName);
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class GetValidWriteIdsResult {
-  static $_TSPEC;
-
-  /**
-   * @var int
-   */
-  public $lowWatermarkId = null;
-  /**
-   * @var int
-   */
-  public $highWatermarkId = null;
-  /**
-   * @var bool
-   */
-  public $areIdsValid = null;
-  /**
-   * @var int[]
-   */
-  public $ids = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'lowWatermarkId',
-          'type' => TType::I64,
-          ),
-        2 => array(
-          'var' => 'highWatermarkId',
-          'type' => TType::I64,
-          ),
-        3 => array(
-          'var' => 'areIdsValid',
-          'type' => TType::BOOL,
-          ),
-        4 => array(
-          'var' => 'ids',
-          'type' => TType::LST,
-          'etype' => TType::I64,
-          'elem' => array(
-            'type' => TType::I64,
-            ),
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['lowWatermarkId'])) {
-        $this->lowWatermarkId = $vals['lowWatermarkId'];
-      }
-      if (isset($vals['highWatermarkId'])) {
-        $this->highWatermarkId = $vals['highWatermarkId'];
-      }
-      if (isset($vals['areIdsValid'])) {
-        $this->areIdsValid = $vals['areIdsValid'];
-      }
-      if (isset($vals['ids'])) {
-        $this->ids = $vals['ids'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'GetValidWriteIdsResult';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->lowWatermarkId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 2:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->highWatermarkId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 3:
-          if ($ftype == TType::BOOL) {
-            $xfer += $input->readBool($this->areIdsValid);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 4:
-          if ($ftype == TType::LST) {
-            $this->ids = array();
-            $_size568 = 0;
-            $_etype571 = 0;
-            $xfer += $input->readListBegin($_etype571, $_size568);
-            for ($_i572 = 0; $_i572 < $_size568; ++$_i572)
-            {
-              $elem573 = null;
-              $xfer += $input->readI64($elem573);
-              $this->ids []= $elem573;
-            }
-            $xfer += $input->readListEnd();
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('GetValidWriteIdsResult');
-    if ($this->lowWatermarkId !== null) {
-      $xfer += $output->writeFieldBegin('lowWatermarkId', TType::I64, 1);
-      $xfer += $output->writeI64($this->lowWatermarkId);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->highWatermarkId !== null) {
-      $xfer += $output->writeFieldBegin('highWatermarkId', TType::I64, 2);
-      $xfer += $output->writeI64($this->highWatermarkId);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->areIdsValid !== null) {
-      $xfer += $output->writeFieldBegin('areIdsValid', TType::BOOL, 3);
-      $xfer += $output->writeBool($this->areIdsValid);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->ids !== null) {
-      if (!is_array($this->ids)) {
-        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
-      }
-      $xfer += $output->writeFieldBegin('ids', TType::LST, 4);
-      {
-        $output->writeListBegin(TType::I64, count($this->ids));
-        {
-          foreach ($this->ids as $iter574)
-          {
-            $xfer += $output->writeI64($iter574);
-          }
-        }
-        $output->writeListEnd();
-      }
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class GetAllFunctionsResponse {
-  static $_TSPEC;
-
-  /**
-   * @var \metastore\Function[]
-   */
-  public $functions = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'functions',
-          'type' => TType::LST,
-          'etype' => TType::STRUCT,
-          'elem' => array(
-            'type' => TType::STRUCT,
-            'class' => '\metastore\Function',
-            ),
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['functions'])) {
-        $this->functions = $vals['functions'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'GetAllFunctionsResponse';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::LST) {
-            $this->functions = array();
-            $_size575 = 0;
-            $_etype578 = 0;
-            $xfer += $input->readListBegin($_etype578, $_size575);
-            for ($_i579 = 0; $_i579 < $_size575; ++$_i579)
-            {
-              $elem580 = null;
-              $elem580 = new \metastore\Function();
-              $xfer += $elem580->read($input);
-              $this->functions []= $elem580;
-            }
-            $xfer += $input->readListEnd();
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('GetAllFunctionsResponse');
-    if ($this->functions !== null) {
-      if (!is_array($this->functions)) {
-        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
-      }
-      $xfer += $output->writeFieldBegin('functions', TType::LST, 1);
-      {
-        $output->writeListBegin(TType::STRUCT, count($this->functions));
-        {
-          foreach ($this->functions as $iter581)
-          {
-            $xfer += $iter581->write($output);
-          }
-        }
-        $output->writeListEnd();
-      }
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class ClientCapabilities {
-  static $_TSPEC;
-
-  /**
-   * @var int[]
-   */
-  public $values = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'values',
-          'type' => TType::LST,
-          'etype' => TType::I32,
-          'elem' => array(
-            'type' => TType::I32,
-            ),
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['values'])) {
-        $this->values = $vals['values'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'ClientCapabilities';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::LST) {
-            $this->values = array();
-            $_size582 = 0;
-            $_etype585 = 0;
-            $xfer += $input->readListBegin($_etype585, $_size582);
-            for ($_i586 = 0; $_i586 < $_size582; ++$_i586)
-            {
-              $elem587 = null;
-              $xfer += $input->readI32($elem587);
-              $this->values []= $elem587;
-            }
-            $xfer += $input->readListEnd();
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('ClientCapabilities');
-    if ($this->values !== null) {
-      if (!is_array($this->values)) {
-        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
-      }
-      $xfer += $output->writeFieldBegin('values', TType::LST, 1);
-      {
-        $output->writeListBegin(TType::I32, count($this->values));
-        {
-          foreach ($this->values as $iter588)
-          {
-            $xfer += $output->writeI32($iter588);
-          }
-        }
-        $output->writeListEnd();
-      }
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class GetTableRequest {
-  static $_TSPEC;
-
-  /**
-   * @var string
-   */
-  public $dbName = null;
-  /**
-   * @var string
-   */
-  public $tblName = null;
-  /**
-   * @var \metastore\ClientCapabilities
-   */
-  public $capabilities = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'dbName',
-          'type' => TType::STRING,
-          ),
-        2 => array(
-          'var' => 'tblName',
-          'type' => TType::STRING,
-          ),
-        3 => array(
-          'var' => 'capabilities',
-          'type' => TType::STRUCT,
-          'class' => '\metastore\ClientCapabilities',
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['dbName'])) {
-        $this->dbName = $vals['dbName'];
-      }
-      if (isset($vals['tblName'])) {
-        $this->tblName = $vals['tblName'];
-      }
-      if (isset($vals['capabilities'])) {
-        $this->capabilities = $vals['capabilities'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'GetTableRequest';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->dbName);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 2:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->tblName);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 3:
-          if ($ftype == TType::STRUCT) {
-            $this->capabilities = new \metastore\ClientCapabilities();
-            $xfer += $this->capabilities->read($input);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('GetTableRequest');
-    if ($this->dbName !== null) {
-      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
-      $xfer += $output->writeString($this->dbName);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->tblName !== null) {
-      $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2);
-      $xfer += $output->writeString($this->tblName);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->capabilities !== null) {
-      if (!is_object($this->capabilities)) {
-        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
-      }
-      $xfer += $output->writeFieldBegin('capabilities', TType::STRUCT, 3);
-      $xfer += $this->capabilities->write($output);
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class GetTableResult {
-  static $_TSPEC;
-
-  /**
-   * @var \metastore\Table
-   */
-  public $table = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'table',
-          'type' => TType::STRUCT,
-          'class' => '\metastore\Table',
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['table'])) {
-        $this->table = $vals['table'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'GetTableResult';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::STRUCT) {
-            $this->table = new \metastore\Table();
-            $xfer += $this->table->read($input);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('GetTableResult');
-    if ($this->table !== null) {
-      if (!is_object($this->table)) {
-        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
-      }
-      $xfer += $output->writeFieldBegin('table', TType::STRUCT, 1);
-      $xfer += $this->table->write($output);
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class GetTablesRequest {
-  static $_TSPEC;
-
-  /**
-   * @var string
-   */
-  public $dbName = null;
-  /**
-   * @var string[]
-   */
-  public $tblNames = null;
-  /**
-   * @var \metastore\ClientCapabilities
-   */
-  public $capabilities = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'dbName',
-          'type' => TType::STRING,
-          ),
-        2 => array(
-          'var' => 'tblNames',
-          'type' => TType::LST,
-          'etype' => TType::STRING,
-          'elem' => array(
-            'type' => TType::STRING,
-            ),
-          ),
-        3 => array(
-          'var' => 'capabilities',
-          'type' => TType::STRUCT,
-          'class' => '\metastore\ClientCapabilities',
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['dbName'])) {
-        $this->dbName = $vals['dbName'];
-      }
-      if (isset($vals['tblNames'])) {
-        $this->tblNames = $vals['tblNames'];
-      }
-      if (isset($vals['capabilities'])) {
-        $this->capabilities = $vals['capabilities'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'GetTablesRequest';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->dbName);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 2:
-          if ($ftype == TType::LST) {
-            $this->tblNames = array();
-            $_size589 = 0;
-            $_etype592 = 0;
-            $xfer += $input->readListBegin($_etype592, $_size589);
-            for ($_i593 = 0; $_i593 < $_size589; ++$_i593)
-            {
-              $elem594 = null;
-              $xfer += $input->readString($elem594);
-              $this->tblNames []= $elem594;
-            }
-            $xfer += $input->readListEnd();
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 3:
-          if ($ftype == TType::STRUCT) {
-            $this->capabilities = new \metastore\ClientCapabilities();
-            $xfer += $this->capabilities->read($input);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('GetTablesRequest');
+    $xfer += $output->writeStructBegin('GetTablesRequest');
     if ($this->dbName !== null) {
       $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
       $xfer += $output->writeString($this->dbName);
@@ -19139,9 +18287,9 @@ class GetTablesRequest {
       {
         $output->writeListBegin(TType::STRING, count($this->tblNames));
         {
-          foreach ($this->tblNames as $iter595)
+          foreach ($this->tblNames as $iter588)
           {
-            $xfer += $output->writeString($iter595);
+            $xfer += $output->writeString($iter588);
           }
         }
         $output->writeListEnd();
@@ -19214,15 +18362,15 @@ class GetTablesResult {
         case 1:
           if ($ftype == TType::LST) {
             $this->tables = array();
-            $_size596 = 0;
-            $_etype599 = 0;
-            $xfer += $input->readListBegin($_etype599, $_size596);
-            for ($_i600 = 0; $_i600 < $_size596; ++$_i600)
+            $_size589 = 0;
+            $_etype592 = 0;
+            $xfer += $input->readListBegin($_etype592, $_size589);
+            for ($_i593 = 0; $_i593 < $_size589; ++$_i593)
             {
-              $elem601 = null;
-              $elem601 = new \metastore\Table();
-              $xfer += $elem601->read($input);
-              $this->tables []= $elem601;
+              $elem594 = null;
+              $elem594 = new \metastore\Table();
+              $xfer += $elem594->read($input);
+              $this->tables []= $elem594;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19250,9 +18398,9 @@ class GetTablesResult {
       {
         $output->writeListBegin(TType::STRUCT, count($this->tables));
         {
-          foreach ($this->tables as $iter602)
+          foreach ($this->tables as $iter595)
           {
-            $xfer += $iter602->write($output);
+            $xfer += $iter595->write($output);
           }
         }
         $output->writeListEnd();

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index c1c3393..f2a9799 100755
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -178,10 +178,6 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
   print('  PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req)')
   print('  ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req)')
   print('  CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req)')
-  print('  GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req)')
-  print('  FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req)')
-  print('  HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req)')
-  print('  GetValidWriteIdsResult get_valid_write_ids(GetValidWriteIdsRequest req)')
   print('  string getName()')
   print('  string getVersion()')
   print('  fb_status getStatus()')
@@ -1175,30 +1171,6 @@ elif cmd == 'cache_file_metadata':
     sys.exit(1)
   pp.pprint(client.cache_file_metadata(eval(args[0]),))
 
-elif cmd == 'get_next_write_id':
-  if len(args) != 1:
-    print('get_next_write_id requires 1 args')
-    sys.exit(1)
-  pp.pprint(client.get_next_write_id(eval(args[0]),))
-
-elif cmd == 'finalize_write_id':
-  if len(args) != 1:
-    print('finalize_write_id requires 1 args')
-    sys.exit(1)
-  pp.pprint(client.finalize_write_id(eval(args[0]),))
-
-elif cmd == 'heartbeat_write_id':
-  if len(args) != 1:
-    print('heartbeat_write_id requires 1 args')
-    sys.exit(1)
-  pp.pprint(client.heartbeat_write_id(eval(args[0]),))
-
-elif cmd == 'get_valid_write_ids':
-  if len(args) != 1:
-    print('get_valid_write_ids requires 1 args')
-    sys.exit(1)
-  pp.pprint(client.get_valid_write_ids(eval(args[0]),))
-
 elif cmd == 'getName':
   if len(args) != 0:
     print('getName requires 0 args')


[14/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
index d7ab0cf..81534fe 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
@@ -354,13 +354,13 @@ public class ClientCapabilities implements org.apache.thrift.TBase<ClientCapabil
           case 1: // VALUES
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list658 = iprot.readListBegin();
-                struct.values = new ArrayList<ClientCapability>(_list658.size);
-                ClientCapability _elem659;
-                for (int _i660 = 0; _i660 < _list658.size; ++_i660)
+                org.apache.thrift.protocol.TList _list650 = iprot.readListBegin();
+                struct.values = new ArrayList<ClientCapability>(_list650.size);
+                ClientCapability _elem651;
+                for (int _i652 = 0; _i652 < _list650.size; ++_i652)
                 {
-                  _elem659 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
-                  struct.values.add(_elem659);
+                  _elem651 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
+                  struct.values.add(_elem651);
                 }
                 iprot.readListEnd();
               }
@@ -386,9 +386,9 @@ public class ClientCapabilities implements org.apache.thrift.TBase<ClientCapabil
         oprot.writeFieldBegin(VALUES_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size()));
-          for (ClientCapability _iter661 : struct.values)
+          for (ClientCapability _iter653 : struct.values)
           {
-            oprot.writeI32(_iter661.getValue());
+            oprot.writeI32(_iter653.getValue());
           }
           oprot.writeListEnd();
         }
@@ -413,9 +413,9 @@ public class ClientCapabilities implements org.apache.thrift.TBase<ClientCapabil
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.values.size());
-        for (ClientCapability _iter662 : struct.values)
+        for (ClientCapability _iter654 : struct.values)
         {
-          oprot.writeI32(_iter662.getValue());
+          oprot.writeI32(_iter654.getValue());
         }
       }
     }
@@ -424,13 +424,13 @@ public class ClientCapabilities implements org.apache.thrift.TBase<ClientCapabil
     public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32());
-        struct.values = new ArrayList<ClientCapability>(_list663.size);
-        ClientCapability _elem664;
-        for (int _i665 = 0; _i665 < _list663.size; ++_i665)
+        org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32());
+        struct.values = new ArrayList<ClientCapability>(_list655.size);
+        ClientCapability _elem656;
+        for (int _i657 = 0; _i657 < _list655.size; ++_i657)
         {
-          _elem664 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
-          struct.values.add(_elem664);
+          _elem656 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
+          struct.values.add(_elem656);
         }
       }
       struct.setValuesIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java
deleted file mode 100644
index f474602..0000000
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java
+++ /dev/null
@@ -1,684 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.hadoop.hive.metastore.api;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class FinalizeWriteIdRequest implements org.apache.thrift.TBase<FinalizeWriteIdRequest, FinalizeWriteIdRequest._Fields>, java.io.Serializable, Cloneable, Comparable<FinalizeWriteIdRequest> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FinalizeWriteIdRequest");
-
-  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
-  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)3);
-  private static final org.apache.thrift.protocol.TField COMMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("commit", org.apache.thrift.protocol.TType.BOOL, (short)4);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new FinalizeWriteIdRequestStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new FinalizeWriteIdRequestTupleSchemeFactory());
-  }
-
-  private String dbName; // required
-  private String tblName; // required
-  private long writeId; // required
-  private boolean commit; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    DB_NAME((short)1, "dbName"),
-    TBL_NAME((short)2, "tblName"),
-    WRITE_ID((short)3, "writeId"),
-    COMMIT((short)4, "commit");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // DB_NAME
-          return DB_NAME;
-        case 2: // TBL_NAME
-          return TBL_NAME;
-        case 3: // WRITE_ID
-          return WRITE_ID;
-        case 4: // COMMIT
-          return COMMIT;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __WRITEID_ISSET_ID = 0;
-  private static final int __COMMIT_ISSET_ID = 1;
-  private byte __isset_bitfield = 0;
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.COMMIT, new org.apache.thrift.meta_data.FieldMetaData("commit", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FinalizeWriteIdRequest.class, metaDataMap);
-  }
-
-  public FinalizeWriteIdRequest() {
-  }
-
-  public FinalizeWriteIdRequest(
-    String dbName,
-    String tblName,
-    long writeId,
-    boolean commit)
-  {
-    this();
-    this.dbName = dbName;
-    this.tblName = tblName;
-    this.writeId = writeId;
-    setWriteIdIsSet(true);
-    this.commit = commit;
-    setCommitIsSet(true);
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public FinalizeWriteIdRequest(FinalizeWriteIdRequest other) {
-    __isset_bitfield = other.__isset_bitfield;
-    if (other.isSetDbName()) {
-      this.dbName = other.dbName;
-    }
-    if (other.isSetTblName()) {
-      this.tblName = other.tblName;
-    }
-    this.writeId = other.writeId;
-    this.commit = other.commit;
-  }
-
-  public FinalizeWriteIdRequest deepCopy() {
-    return new FinalizeWriteIdRequest(this);
-  }
-
-  @Override
-  public void clear() {
-    this.dbName = null;
-    this.tblName = null;
-    setWriteIdIsSet(false);
-    this.writeId = 0;
-    setCommitIsSet(false);
-    this.commit = false;
-  }
-
-  public String getDbName() {
-    return this.dbName;
-  }
-
-  public void setDbName(String dbName) {
-    this.dbName = dbName;
-  }
-
-  public void unsetDbName() {
-    this.dbName = null;
-  }
-
-  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
-  public boolean isSetDbName() {
-    return this.dbName != null;
-  }
-
-  public void setDbNameIsSet(boolean value) {
-    if (!value) {
-      this.dbName = null;
-    }
-  }
-
-  public String getTblName() {
-    return this.tblName;
-  }
-
-  public void setTblName(String tblName) {
-    this.tblName = tblName;
-  }
-
-  public void unsetTblName() {
-    this.tblName = null;
-  }
-
-  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
-  public boolean isSetTblName() {
-    return this.tblName != null;
-  }
-
-  public void setTblNameIsSet(boolean value) {
-    if (!value) {
-      this.tblName = null;
-    }
-  }
-
-  public long getWriteId() {
-    return this.writeId;
-  }
-
-  public void setWriteId(long writeId) {
-    this.writeId = writeId;
-    setWriteIdIsSet(true);
-  }
-
-  public void unsetWriteId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID);
-  }
-
-  /** Returns true if field writeId is set (has been assigned a value) and false otherwise */
-  public boolean isSetWriteId() {
-    return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID);
-  }
-
-  public void setWriteIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value);
-  }
-
-  public boolean isCommit() {
-    return this.commit;
-  }
-
-  public void setCommit(boolean commit) {
-    this.commit = commit;
-    setCommitIsSet(true);
-  }
-
-  public void unsetCommit() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COMMIT_ISSET_ID);
-  }
-
-  /** Returns true if field commit is set (has been assigned a value) and false otherwise */
-  public boolean isSetCommit() {
-    return EncodingUtils.testBit(__isset_bitfield, __COMMIT_ISSET_ID);
-  }
-
-  public void setCommitIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COMMIT_ISSET_ID, value);
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case DB_NAME:
-      if (value == null) {
-        unsetDbName();
-      } else {
-        setDbName((String)value);
-      }
-      break;
-
-    case TBL_NAME:
-      if (value == null) {
-        unsetTblName();
-      } else {
-        setTblName((String)value);
-      }
-      break;
-
-    case WRITE_ID:
-      if (value == null) {
-        unsetWriteId();
-      } else {
-        setWriteId((Long)value);
-      }
-      break;
-
-    case COMMIT:
-      if (value == null) {
-        unsetCommit();
-      } else {
-        setCommit((Boolean)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case DB_NAME:
-      return getDbName();
-
-    case TBL_NAME:
-      return getTblName();
-
-    case WRITE_ID:
-      return getWriteId();
-
-    case COMMIT:
-      return isCommit();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case DB_NAME:
-      return isSetDbName();
-    case TBL_NAME:
-      return isSetTblName();
-    case WRITE_ID:
-      return isSetWriteId();
-    case COMMIT:
-      return isSetCommit();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof FinalizeWriteIdRequest)
-      return this.equals((FinalizeWriteIdRequest)that);
-    return false;
-  }
-
-  public boolean equals(FinalizeWriteIdRequest that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_dbName = true && this.isSetDbName();
-    boolean that_present_dbName = true && that.isSetDbName();
-    if (this_present_dbName || that_present_dbName) {
-      if (!(this_present_dbName && that_present_dbName))
-        return false;
-      if (!this.dbName.equals(that.dbName))
-        return false;
-    }
-
-    boolean this_present_tblName = true && this.isSetTblName();
-    boolean that_present_tblName = true && that.isSetTblName();
-    if (this_present_tblName || that_present_tblName) {
-      if (!(this_present_tblName && that_present_tblName))
-        return false;
-      if (!this.tblName.equals(that.tblName))
-        return false;
-    }
-
-    boolean this_present_writeId = true;
-    boolean that_present_writeId = true;
-    if (this_present_writeId || that_present_writeId) {
-      if (!(this_present_writeId && that_present_writeId))
-        return false;
-      if (this.writeId != that.writeId)
-        return false;
-    }
-
-    boolean this_present_commit = true;
-    boolean that_present_commit = true;
-    if (this_present_commit || that_present_commit) {
-      if (!(this_present_commit && that_present_commit))
-        return false;
-      if (this.commit != that.commit)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_dbName = true && (isSetDbName());
-    list.add(present_dbName);
-    if (present_dbName)
-      list.add(dbName);
-
-    boolean present_tblName = true && (isSetTblName());
-    list.add(present_tblName);
-    if (present_tblName)
-      list.add(tblName);
-
-    boolean present_writeId = true;
-    list.add(present_writeId);
-    if (present_writeId)
-      list.add(writeId);
-
-    boolean present_commit = true;
-    list.add(present_commit);
-    if (present_commit)
-      list.add(commit);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(FinalizeWriteIdRequest other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetDbName()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTblName()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetWriteId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetCommit()).compareTo(other.isSetCommit());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetCommit()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.commit, other.commit);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("FinalizeWriteIdRequest(");
-    boolean first = true;
-
-    sb.append("dbName:");
-    if (this.dbName == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.dbName);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("tblName:");
-    if (this.tblName == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.tblName);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("writeId:");
-    sb.append(this.writeId);
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("commit:");
-    sb.append(this.commit);
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!isSetDbName()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
-    }
-
-    if (!isSetTblName()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
-    }
-
-    if (!isSetWriteId()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString());
-    }
-
-    if (!isSetCommit()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'commit' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class FinalizeWriteIdRequestStandardSchemeFactory implements SchemeFactory {
-    public FinalizeWriteIdRequestStandardScheme getScheme() {
-      return new FinalizeWriteIdRequestStandardScheme();
-    }
-  }
-
-  private static class FinalizeWriteIdRequestStandardScheme extends StandardScheme<FinalizeWriteIdRequest> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // DB_NAME
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.dbName = iprot.readString();
-              struct.setDbNameIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // TBL_NAME
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.tblName = iprot.readString();
-              struct.setTblNameIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // WRITE_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.writeId = iprot.readI64();
-              struct.setWriteIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // COMMIT
-            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
-              struct.commit = iprot.readBool();
-              struct.setCommitIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.dbName != null) {
-        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
-        oprot.writeString(struct.dbName);
-        oprot.writeFieldEnd();
-      }
-      if (struct.tblName != null) {
-        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
-        oprot.writeString(struct.tblName);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
-      oprot.writeI64(struct.writeId);
-      oprot.writeFieldEnd();
-      oprot.writeFieldBegin(COMMIT_FIELD_DESC);
-      oprot.writeBool(struct.commit);
-      oprot.writeFieldEnd();
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class FinalizeWriteIdRequestTupleSchemeFactory implements SchemeFactory {
-    public FinalizeWriteIdRequestTupleScheme getScheme() {
-      return new FinalizeWriteIdRequestTupleScheme();
-    }
-  }
-
-  private static class FinalizeWriteIdRequestTupleScheme extends TupleScheme<FinalizeWriteIdRequest> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeString(struct.dbName);
-      oprot.writeString(struct.tblName);
-      oprot.writeI64(struct.writeId);
-      oprot.writeBool(struct.commit);
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.dbName = iprot.readString();
-      struct.setDbNameIsSet(true);
-      struct.tblName = iprot.readString();
-      struct.setTblNameIsSet(true);
-      struct.writeId = iprot.readI64();
-      struct.setWriteIdIsSet(true);
-      struct.commit = iprot.readBool();
-      struct.setCommitIsSet(true);
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java
deleted file mode 100644
index 8e8b504..0000000
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java
+++ /dev/null
@@ -1,283 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.hadoop.hive.metastore.api;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class FinalizeWriteIdResult implements org.apache.thrift.TBase<FinalizeWriteIdResult, FinalizeWriteIdResult._Fields>, java.io.Serializable, Cloneable, Comparable<FinalizeWriteIdResult> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FinalizeWriteIdResult");
-
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new FinalizeWriteIdResultStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new FinalizeWriteIdResultTupleSchemeFactory());
-  }
-
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-;
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FinalizeWriteIdResult.class, metaDataMap);
-  }
-
-  public FinalizeWriteIdResult() {
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public FinalizeWriteIdResult(FinalizeWriteIdResult other) {
-  }
-
-  public FinalizeWriteIdResult deepCopy() {
-    return new FinalizeWriteIdResult(this);
-  }
-
-  @Override
-  public void clear() {
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof FinalizeWriteIdResult)
-      return this.equals((FinalizeWriteIdResult)that);
-    return false;
-  }
-
-  public boolean equals(FinalizeWriteIdResult that) {
-    if (that == null)
-      return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(FinalizeWriteIdResult other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("FinalizeWriteIdResult(");
-    boolean first = true;
-
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class FinalizeWriteIdResultStandardSchemeFactory implements SchemeFactory {
-    public FinalizeWriteIdResultStandardScheme getScheme() {
-      return new FinalizeWriteIdResultStandardScheme();
-    }
-  }
-
-  private static class FinalizeWriteIdResultStandardScheme extends StandardScheme<FinalizeWriteIdResult> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class FinalizeWriteIdResultTupleSchemeFactory implements SchemeFactory {
-    public FinalizeWriteIdResultTupleScheme getScheme() {
-      return new FinalizeWriteIdResultTupleScheme();
-    }
-  }
-
-  private static class FinalizeWriteIdResultTupleScheme extends TupleScheme<FinalizeWriteIdResult> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
index 2551870..49a1be2 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
@@ -346,14 +346,14 @@ public class GetAllFunctionsResponse implements org.apache.thrift.TBase<GetAllFu
           case 1: // FUNCTIONS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list650 = iprot.readListBegin();
-                struct.functions = new ArrayList<Function>(_list650.size);
-                Function _elem651;
-                for (int _i652 = 0; _i652 < _list650.size; ++_i652)
+                org.apache.thrift.protocol.TList _list642 = iprot.readListBegin();
+                struct.functions = new ArrayList<Function>(_list642.size);
+                Function _elem643;
+                for (int _i644 = 0; _i644 < _list642.size; ++_i644)
                 {
-                  _elem651 = new Function();
-                  _elem651.read(iprot);
-                  struct.functions.add(_elem651);
+                  _elem643 = new Function();
+                  _elem643.read(iprot);
+                  struct.functions.add(_elem643);
                 }
                 iprot.readListEnd();
               }
@@ -380,9 +380,9 @@ public class GetAllFunctionsResponse implements org.apache.thrift.TBase<GetAllFu
           oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size()));
-            for (Function _iter653 : struct.functions)
+            for (Function _iter645 : struct.functions)
             {
-              _iter653.write(oprot);
+              _iter645.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -414,9 +414,9 @@ public class GetAllFunctionsResponse implements org.apache.thrift.TBase<GetAllFu
       if (struct.isSetFunctions()) {
         {
           oprot.writeI32(struct.functions.size());
-          for (Function _iter654 : struct.functions)
+          for (Function _iter646 : struct.functions)
           {
-            _iter654.write(oprot);
+            _iter646.write(oprot);
           }
         }
       }
@@ -428,14 +428,14 @@ public class GetAllFunctionsResponse implements org.apache.thrift.TBase<GetAllFu
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.functions = new ArrayList<Function>(_list655.size);
-          Function _elem656;
-          for (int _i657 = 0; _i657 < _list655.size; ++_i657)
+          org.apache.thrift.protocol.TList _list647 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.functions = new ArrayList<Function>(_list647.size);
+          Function _elem648;
+          for (int _i649 = 0; _i649 < _list647.size; ++_i649)
           {
-            _elem656 = new Function();
-            _elem656.read(iprot);
-            struct.functions.add(_elem656);
+            _elem648 = new Function();
+            _elem648.read(iprot);
+            struct.functions.add(_elem648);
           }
         }
         struct.setFunctionsIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java
deleted file mode 100644
index dab13fd..0000000
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java
+++ /dev/null
@@ -1,490 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.hadoop.hive.metastore.api;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class GetNextWriteIdRequest implements org.apache.thrift.TBase<GetNextWriteIdRequest, GetNextWriteIdRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetNextWriteIdRequest> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetNextWriteIdRequest");
-
-  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new GetNextWriteIdRequestStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new GetNextWriteIdRequestTupleSchemeFactory());
-  }
-
-  private String dbName; // required
-  private String tblName; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    DB_NAME((short)1, "dbName"),
-    TBL_NAME((short)2, "tblName");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // DB_NAME
-          return DB_NAME;
-        case 2: // TBL_NAME
-          return TBL_NAME;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetNextWriteIdRequest.class, metaDataMap);
-  }
-
-  public GetNextWriteIdRequest() {
-  }
-
-  public GetNextWriteIdRequest(
-    String dbName,
-    String tblName)
-  {
-    this();
-    this.dbName = dbName;
-    this.tblName = tblName;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public GetNextWriteIdRequest(GetNextWriteIdRequest other) {
-    if (other.isSetDbName()) {
-      this.dbName = other.dbName;
-    }
-    if (other.isSetTblName()) {
-      this.tblName = other.tblName;
-    }
-  }
-
-  public GetNextWriteIdRequest deepCopy() {
-    return new GetNextWriteIdRequest(this);
-  }
-
-  @Override
-  public void clear() {
-    this.dbName = null;
-    this.tblName = null;
-  }
-
-  public String getDbName() {
-    return this.dbName;
-  }
-
-  public void setDbName(String dbName) {
-    this.dbName = dbName;
-  }
-
-  public void unsetDbName() {
-    this.dbName = null;
-  }
-
-  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
-  public boolean isSetDbName() {
-    return this.dbName != null;
-  }
-
-  public void setDbNameIsSet(boolean value) {
-    if (!value) {
-      this.dbName = null;
-    }
-  }
-
-  public String getTblName() {
-    return this.tblName;
-  }
-
-  public void setTblName(String tblName) {
-    this.tblName = tblName;
-  }
-
-  public void unsetTblName() {
-    this.tblName = null;
-  }
-
-  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
-  public boolean isSetTblName() {
-    return this.tblName != null;
-  }
-
-  public void setTblNameIsSet(boolean value) {
-    if (!value) {
-      this.tblName = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case DB_NAME:
-      if (value == null) {
-        unsetDbName();
-      } else {
-        setDbName((String)value);
-      }
-      break;
-
-    case TBL_NAME:
-      if (value == null) {
-        unsetTblName();
-      } else {
-        setTblName((String)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case DB_NAME:
-      return getDbName();
-
-    case TBL_NAME:
-      return getTblName();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case DB_NAME:
-      return isSetDbName();
-    case TBL_NAME:
-      return isSetTblName();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof GetNextWriteIdRequest)
-      return this.equals((GetNextWriteIdRequest)that);
-    return false;
-  }
-
-  public boolean equals(GetNextWriteIdRequest that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_dbName = true && this.isSetDbName();
-    boolean that_present_dbName = true && that.isSetDbName();
-    if (this_present_dbName || that_present_dbName) {
-      if (!(this_present_dbName && that_present_dbName))
-        return false;
-      if (!this.dbName.equals(that.dbName))
-        return false;
-    }
-
-    boolean this_present_tblName = true && this.isSetTblName();
-    boolean that_present_tblName = true && that.isSetTblName();
-    if (this_present_tblName || that_present_tblName) {
-      if (!(this_present_tblName && that_present_tblName))
-        return false;
-      if (!this.tblName.equals(that.tblName))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_dbName = true && (isSetDbName());
-    list.add(present_dbName);
-    if (present_dbName)
-      list.add(dbName);
-
-    boolean present_tblName = true && (isSetTblName());
-    list.add(present_tblName);
-    if (present_tblName)
-      list.add(tblName);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(GetNextWriteIdRequest other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetDbName()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTblName()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("GetNextWriteIdRequest(");
-    boolean first = true;
-
-    sb.append("dbName:");
-    if (this.dbName == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.dbName);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("tblName:");
-    if (this.tblName == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.tblName);
-    }
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!isSetDbName()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
-    }
-
-    if (!isSetTblName()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class GetNextWriteIdRequestStandardSchemeFactory implements SchemeFactory {
-    public GetNextWriteIdRequestStandardScheme getScheme() {
-      return new GetNextWriteIdRequestStandardScheme();
-    }
-  }
-
-  private static class GetNextWriteIdRequestStandardScheme extends StandardScheme<GetNextWriteIdRequest> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // DB_NAME
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.dbName = iprot.readString();
-              struct.setDbNameIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // TBL_NAME
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.tblName = iprot.readString();
-              struct.setTblNameIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.dbName != null) {
-        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
-        oprot.writeString(struct.dbName);
-        oprot.writeFieldEnd();
-      }
-      if (struct.tblName != null) {
-        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
-        oprot.writeString(struct.tblName);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class GetNextWriteIdRequestTupleSchemeFactory implements SchemeFactory {
-    public GetNextWriteIdRequestTupleScheme getScheme() {
-      return new GetNextWriteIdRequestTupleScheme();
-    }
-  }
-
-  private static class GetNextWriteIdRequestTupleScheme extends TupleScheme<GetNextWriteIdRequest> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeString(struct.dbName);
-      oprot.writeString(struct.tblName);
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.dbName = iprot.readString();
-      struct.setDbNameIsSet(true);
-      struct.tblName = iprot.readString();
-      struct.setTblNameIsSet(true);
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java
deleted file mode 100644
index 97ad284..0000000
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java
+++ /dev/null
@@ -1,387 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.hadoop.hive.metastore.api;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class GetNextWriteIdResult implements org.apache.thrift.TBase<GetNextWriteIdResult, GetNextWriteIdResult._Fields>, java.io.Serializable, Cloneable, Comparable<GetNextWriteIdResult> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetNextWriteIdResult");
-
-  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)1);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new GetNextWriteIdResultStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new GetNextWriteIdResultTupleSchemeFactory());
-  }
-
-  private long writeId; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    WRITE_ID((short)1, "writeId");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // WRITE_ID
-          return WRITE_ID;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __WRITEID_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetNextWriteIdResult.class, metaDataMap);
-  }
-
-  public GetNextWriteIdResult() {
-  }
-
-  public GetNextWriteIdResult(
-    long writeId)
-  {
-    this();
-    this.writeId = writeId;
-    setWriteIdIsSet(true);
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public GetNextWriteIdResult(GetNextWriteIdResult other) {
-    __isset_bitfield = other.__isset_bitfield;
-    this.writeId = other.writeId;
-  }
-
-  public GetNextWriteIdResult deepCopy() {
-    return new GetNextWriteIdResult(this);
-  }
-
-  @Override
-  public void clear() {
-    setWriteIdIsSet(false);
-    this.writeId = 0;
-  }
-
-  public long getWriteId() {
-    return this.writeId;
-  }
-
-  public void setWriteId(long writeId) {
-    this.writeId = writeId;
-    setWriteIdIsSet(true);
-  }
-
-  public void unsetWriteId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID);
-  }
-
-  /** Returns true if field writeId is set (has been assigned a value) and false otherwise */
-  public boolean isSetWriteId() {
-    return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID);
-  }
-
-  public void setWriteIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value);
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case WRITE_ID:
-      if (value == null) {
-        unsetWriteId();
-      } else {
-        setWriteId((Long)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case WRITE_ID:
-      return getWriteId();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case WRITE_ID:
-      return isSetWriteId();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof GetNextWriteIdResult)
-      return this.equals((GetNextWriteIdResult)that);
-    return false;
-  }
-
-  public boolean equals(GetNextWriteIdResult that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_writeId = true;
-    boolean that_present_writeId = true;
-    if (this_present_writeId || that_present_writeId) {
-      if (!(this_present_writeId && that_present_writeId))
-        return false;
-      if (this.writeId != that.writeId)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_writeId = true;
-    list.add(present_writeId);
-    if (present_writeId)
-      list.add(writeId);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(GetNextWriteIdResult other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetWriteId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("GetNextWriteIdResult(");
-    boolean first = true;
-
-    sb.append("writeId:");
-    sb.append(this.writeId);
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!isSetWriteId()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class GetNextWriteIdResultStandardSchemeFactory implements SchemeFactory {
-    public GetNextWriteIdResultStandardScheme getScheme() {
-      return new GetNextWriteIdResultStandardScheme();
-    }
-  }
-
-  private static class GetNextWriteIdResultStandardScheme extends StandardScheme<GetNextWriteIdResult> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, GetNextWriteIdResult struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // WRITE_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.writeId = iprot.readI64();
-              struct.setWriteIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, GetNextWriteIdResult struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
-      oprot.writeI64(struct.writeId);
-      oprot.writeFieldEnd();
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class GetNextWriteIdResultTupleSchemeFactory implements SchemeFactory {
-    public GetNextWriteIdResultTupleScheme getScheme() {
-      return new GetNextWriteIdResultTupleScheme();
-    }
-  }
-
-  private static class GetNextWriteIdResultTupleScheme extends TupleScheme<GetNextWriteIdResult> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdResult struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeI64(struct.writeId);
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdResult struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.writeId = iprot.readI64();
-      struct.setWriteIdIsSet(true);
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java
index 00ee22b..225fda9 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java
@@ -525,13 +525,13 @@ public class GetTablesRequest implements org.apache.thrift.TBase<GetTablesReques
           case 2: // TBL_NAMES
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list666 = iprot.readListBegin();
-                struct.tblNames = new ArrayList<String>(_list666.size);
-                String _elem667;
-                for (int _i668 = 0; _i668 < _list666.size; ++_i668)
+                org.apache.thrift.protocol.TList _list658 = iprot.readListBegin();
+                struct.tblNames = new ArrayList<String>(_list658.size);
+                String _elem659;
+                for (int _i660 = 0; _i660 < _list658.size; ++_i660)
                 {
-                  _elem667 = iprot.readString();
-                  struct.tblNames.add(_elem667);
+                  _elem659 = iprot.readString();
+                  struct.tblNames.add(_elem659);
                 }
                 iprot.readListEnd();
               }
@@ -572,9 +572,9 @@ public class GetTablesRequest implements org.apache.thrift.TBase<GetTablesReques
           oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size()));
-            for (String _iter669 : struct.tblNames)
+            for (String _iter661 : struct.tblNames)
             {
-              oprot.writeString(_iter669);
+              oprot.writeString(_iter661);
             }
             oprot.writeListEnd();
           }
@@ -617,9 +617,9 @@ public class GetTablesRequest implements org.apache.thrift.TBase<GetTablesReques
       if (struct.isSetTblNames()) {
         {
           oprot.writeI32(struct.tblNames.size());
-          for (String _iter670 : struct.tblNames)
+          for (String _iter662 : struct.tblNames)
           {
-            oprot.writeString(_iter670);
+            oprot.writeString(_iter662);
           }
         }
       }
@@ -636,13 +636,13 @@ public class GetTablesRequest implements org.apache.thrift.TBase<GetTablesReques
       BitSet incoming = iprot.readBitSet(2);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.tblNames = new ArrayList<String>(_list671.size);
-          String _elem672;
-          for (int _i673 = 0; _i673 < _list671.size; ++_i673)
+          org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.tblNames = new ArrayList<String>(_list663.size);
+          String _elem664;
+          for (int _i665 = 0; _i665 < _list663.size; ++_i665)
           {
-            _elem672 = iprot.readString();
-            struct.tblNames.add(_elem672);
+            _elem664 = iprot.readString();
+            struct.tblNames.add(_elem664);
           }
         }
         struct.setTblNamesIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java
index d91fd62..91cb198 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java
@@ -354,14 +354,14 @@ public class GetTablesResult implements org.apache.thrift.TBase<GetTablesResult,
           case 1: // TABLES
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list674 = iprot.readListBegin();
-                struct.tables = new ArrayList<Table>(_list674.size);
-                Table _elem675;
-                for (int _i676 = 0; _i676 < _list674.size; ++_i676)
+                org.apache.thrift.protocol.TList _list666 = iprot.readListBegin();
+                struct.tables = new ArrayList<Table>(_list666.size);
+                Table _elem667;
+                for (int _i668 = 0; _i668 < _list666.size; ++_i668)
                 {
-                  _elem675 = new Table();
-                  _elem675.read(iprot);
-                  struct.tables.add(_elem675);
+                  _elem667 = new Table();
+                  _elem667.read(iprot);
+                  struct.tables.add(_elem667);
                 }
                 iprot.readListEnd();
               }
@@ -387,9 +387,9 @@ public class GetTablesResult implements org.apache.thrift.TBase<GetTablesResult,
         oprot.writeFieldBegin(TABLES_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size()));
-          for (Table _iter677 : struct.tables)
+          for (Table _iter669 : struct.tables)
           {
-            _iter677.write(oprot);
+            _iter669.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -414,9 +414,9 @@ public class GetTablesResult implements org.apache.thrift.TBase<GetTablesResult,
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.tables.size());
-        for (Table _iter678 : struct.tables)
+        for (Table _iter670 : struct.tables)
         {
-          _iter678.write(oprot);
+          _iter670.write(oprot);
         }
       }
     }
@@ -425,14 +425,14 @@ public class GetTablesResult implements org.apache.thrift.TBase<GetTablesResult,
     public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.tables = new ArrayList<Table>(_list679.size);
-        Table _elem680;
-        for (int _i681 = 0; _i681 < _list679.size; ++_i681)
+        org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.tables = new ArrayList<Table>(_list671.size);
+        Table _elem672;
+        for (int _i673 = 0; _i673 < _list671.size; ++_i673)
         {
-          _elem680 = new Table();
-          _elem680.read(iprot);
-          struct.tables.add(_elem680);
+          _elem672 = new Table();
+          _elem672.read(iprot);
+          struct.tables.add(_elem672);
         }
       }
       struct.setTablesIsSet(true);


[12/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index cb1bd59..1915150 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -350,14 +350,6 @@ public class ThriftHiveMetastore {
 
     public CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req) throws org.apache.thrift.TException;
 
-    public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws org.apache.thrift.TException;
-
-    public FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req) throws org.apache.thrift.TException;
-
-    public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) throws org.apache.thrift.TException;
-
-    public GetValidWriteIdsResult get_valid_write_ids(GetValidWriteIdsRequest req) throws org.apache.thrift.TException;
-
   }
 
   public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface {
@@ -670,14 +662,6 @@ public class ThriftHiveMetastore {
 
     public void cache_file_metadata(CacheFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
-    public void get_next_write_id(GetNextWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
-
-    public void finalize_write_id(FinalizeWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
-
-    public void heartbeat_write_id(HeartbeatWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
-
-    public void get_valid_write_ids(GetValidWriteIdsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
-
   }
 
   public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface {
@@ -5127,98 +5111,6 @@ public class ThriftHiveMetastore {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "cache_file_metadata failed: unknown result");
     }
 
-    public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws org.apache.thrift.TException
-    {
-      send_get_next_write_id(req);
-      return recv_get_next_write_id();
-    }
-
-    public void send_get_next_write_id(GetNextWriteIdRequest req) throws org.apache.thrift.TException
-    {
-      get_next_write_id_args args = new get_next_write_id_args();
-      args.setReq(req);
-      sendBase("get_next_write_id", args);
-    }
-
-    public GetNextWriteIdResult recv_get_next_write_id() throws org.apache.thrift.TException
-    {
-      get_next_write_id_result result = new get_next_write_id_result();
-      receiveBase(result, "get_next_write_id");
-      if (result.isSetSuccess()) {
-        return result.success;
-      }
-      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_next_write_id failed: unknown result");
-    }
-
-    public FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req) throws org.apache.thrift.TException
-    {
-      send_finalize_write_id(req);
-      return recv_finalize_write_id();
-    }
-
-    public void send_finalize_write_id(FinalizeWriteIdRequest req) throws org.apache.thrift.TException
-    {
-      finalize_write_id_args args = new finalize_write_id_args();
-      args.setReq(req);
-      sendBase("finalize_write_id", args);
-    }
-
-    public FinalizeWriteIdResult recv_finalize_write_id() throws org.apache.thrift.TException
-    {
-      finalize_write_id_result result = new finalize_write_id_result();
-      receiveBase(result, "finalize_write_id");
-      if (result.isSetSuccess()) {
-        return result.success;
-      }
-      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "finalize_write_id failed: unknown result");
-    }
-
-    public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) throws org.apache.thrift.TException
-    {
-      send_heartbeat_write_id(req);
-      return recv_heartbeat_write_id();
-    }
-
-    public void send_heartbeat_write_id(HeartbeatWriteIdRequest req) throws org.apache.thrift.TException
-    {
-      heartbeat_write_id_args args = new heartbeat_write_id_args();
-      args.setReq(req);
-      sendBase("heartbeat_write_id", args);
-    }
-
-    public HeartbeatWriteIdResult recv_heartbeat_write_id() throws org.apache.thrift.TException
-    {
-      heartbeat_write_id_result result = new heartbeat_write_id_result();
-      receiveBase(result, "heartbeat_write_id");
-      if (result.isSetSuccess()) {
-        return result.success;
-      }
-      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "heartbeat_write_id failed: unknown result");
-    }
-
-    public GetValidWriteIdsResult get_valid_write_ids(GetValidWriteIdsRequest req) throws org.apache.thrift.TException
-    {
-      send_get_valid_write_ids(req);
-      return recv_get_valid_write_ids();
-    }
-
-    public void send_get_valid_write_ids(GetValidWriteIdsRequest req) throws org.apache.thrift.TException
-    {
-      get_valid_write_ids_args args = new get_valid_write_ids_args();
-      args.setReq(req);
-      sendBase("get_valid_write_ids", args);
-    }
-
-    public GetValidWriteIdsResult recv_get_valid_write_ids() throws org.apache.thrift.TException
-    {
-      get_valid_write_ids_result result = new get_valid_write_ids_result();
-      receiveBase(result, "get_valid_write_ids");
-      if (result.isSetSuccess()) {
-        return result.success;
-      }
-      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_valid_write_ids failed: unknown result");
-    }
-
   }
   public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface {
     public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
@@ -10660,134 +10552,6 @@ public class ThriftHiveMetastore {
       }
     }
 
-    public void get_next_write_id(GetNextWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
-      checkReady();
-      get_next_write_id_call method_call = new get_next_write_id_call(req, resultHandler, this, ___protocolFactory, ___transport);
-      this.___currentMethod = method_call;
-      ___manager.call(method_call);
-    }
-
-    public static class get_next_write_id_call extends org.apache.thrift.async.TAsyncMethodCall {
-      private GetNextWriteIdRequest req;
-      public get_next_write_id_call(GetNextWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
-        super(client, protocolFactory, transport, resultHandler, false);
-        this.req = req;
-      }
-
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
-        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_next_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0));
-        get_next_write_id_args args = new get_next_write_id_args();
-        args.setReq(req);
-        args.write(prot);
-        prot.writeMessageEnd();
-      }
-
-      public GetNextWriteIdResult getResult() throws org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
-          throw new IllegalStateException("Method call not finished!");
-        }
-        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
-        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
-        return (new Client(prot)).recv_get_next_write_id();
-      }
-    }
-
-    public void finalize_write_id(FinalizeWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
-      checkReady();
-      finalize_write_id_call method_call = new finalize_write_id_call(req, resultHandler, this, ___protocolFactory, ___transport);
-      this.___currentMethod = method_call;
-      ___manager.call(method_call);
-    }
-
-    public static class finalize_write_id_call extends org.apache.thrift.async.TAsyncMethodCall {
-      private FinalizeWriteIdRequest req;
-      public finalize_write_id_call(FinalizeWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
-        super(client, protocolFactory, transport, resultHandler, false);
-        this.req = req;
-      }
-
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
-        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("finalize_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0));
-        finalize_write_id_args args = new finalize_write_id_args();
-        args.setReq(req);
-        args.write(prot);
-        prot.writeMessageEnd();
-      }
-
-      public FinalizeWriteIdResult getResult() throws org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
-          throw new IllegalStateException("Method call not finished!");
-        }
-        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
-        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
-        return (new Client(prot)).recv_finalize_write_id();
-      }
-    }
-
-    public void heartbeat_write_id(HeartbeatWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
-      checkReady();
-      heartbeat_write_id_call method_call = new heartbeat_write_id_call(req, resultHandler, this, ___protocolFactory, ___transport);
-      this.___currentMethod = method_call;
-      ___manager.call(method_call);
-    }
-
-    public static class heartbeat_write_id_call extends org.apache.thrift.async.TAsyncMethodCall {
-      private HeartbeatWriteIdRequest req;
-      public heartbeat_write_id_call(HeartbeatWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
-        super(client, protocolFactory, transport, resultHandler, false);
-        this.req = req;
-      }
-
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
-        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("heartbeat_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0));
-        heartbeat_write_id_args args = new heartbeat_write_id_args();
-        args.setReq(req);
-        args.write(prot);
-        prot.writeMessageEnd();
-      }
-
-      public HeartbeatWriteIdResult getResult() throws org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
-          throw new IllegalStateException("Method call not finished!");
-        }
-        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
-        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
-        return (new Client(prot)).recv_heartbeat_write_id();
-      }
-    }
-
-    public void get_valid_write_ids(GetValidWriteIdsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
-      checkReady();
-      get_valid_write_ids_call method_call = new get_valid_write_ids_call(req, resultHandler, this, ___protocolFactory, ___transport);
-      this.___currentMethod = method_call;
-      ___manager.call(method_call);
-    }
-
-    public static class get_valid_write_ids_call extends org.apache.thrift.async.TAsyncMethodCall {
-      private GetValidWriteIdsRequest req;
-      public get_valid_write_ids_call(GetValidWriteIdsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
-        super(client, protocolFactory, transport, resultHandler, false);
-        this.req = req;
-      }
-
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
-        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_valid_write_ids", org.apache.thrift.protocol.TMessageType.CALL, 0));
-        get_valid_write_ids_args args = new get_valid_write_ids_args();
-        args.setReq(req);
-        args.write(prot);
-        prot.writeMessageEnd();
-      }
-
-      public GetValidWriteIdsResult getResult() throws org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
-          throw new IllegalStateException("Method call not finished!");
-        }
-        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
-        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
-        return (new Client(prot)).recv_get_valid_write_ids();
-      }
-    }
-
   }
 
   public static class Processor<I extends Iface> extends com.facebook.fb303.FacebookService.Processor<I> implements org.apache.thrift.TProcessor {
@@ -10955,10 +10719,6 @@ public class ThriftHiveMetastore {
       processMap.put("put_file_metadata", new put_file_metadata());
       processMap.put("clear_file_metadata", new clear_file_metadata());
       processMap.put("cache_file_metadata", new cache_file_metadata());
-      processMap.put("get_next_write_id", new get_next_write_id());
-      processMap.put("finalize_write_id", new finalize_write_id());
-      processMap.put("heartbeat_write_id", new heartbeat_write_id());
-      processMap.put("get_valid_write_ids", new get_valid_write_ids());
       return processMap;
     }
 
@@ -14882,86 +14642,6 @@ public class ThriftHiveMetastore {
       }
     }
 
-    public static class get_next_write_id<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_next_write_id_args> {
-      public get_next_write_id() {
-        super("get_next_write_id");
-      }
-
-      public get_next_write_id_args getEmptyArgsInstance() {
-        return new get_next_write_id_args();
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public get_next_write_id_result getResult(I iface, get_next_write_id_args args) throws org.apache.thrift.TException {
-        get_next_write_id_result result = new get_next_write_id_result();
-        result.success = iface.get_next_write_id(args.req);
-        return result;
-      }
-    }
-
-    public static class finalize_write_id<I extends Iface> extends org.apache.thrift.ProcessFunction<I, finalize_write_id_args> {
-      public finalize_write_id() {
-        super("finalize_write_id");
-      }
-
-      public finalize_write_id_args getEmptyArgsInstance() {
-        return new finalize_write_id_args();
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public finalize_write_id_result getResult(I iface, finalize_write_id_args args) throws org.apache.thrift.TException {
-        finalize_write_id_result result = new finalize_write_id_result();
-        result.success = iface.finalize_write_id(args.req);
-        return result;
-      }
-    }
-
-    public static class heartbeat_write_id<I extends Iface> extends org.apache.thrift.ProcessFunction<I, heartbeat_write_id_args> {
-      public heartbeat_write_id() {
-        super("heartbeat_write_id");
-      }
-
-      public heartbeat_write_id_args getEmptyArgsInstance() {
-        return new heartbeat_write_id_args();
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public heartbeat_write_id_result getResult(I iface, heartbeat_write_id_args args) throws org.apache.thrift.TException {
-        heartbeat_write_id_result result = new heartbeat_write_id_result();
-        result.success = iface.heartbeat_write_id(args.req);
-        return result;
-      }
-    }
-
-    public static class get_valid_write_ids<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_valid_write_ids_args> {
-      public get_valid_write_ids() {
-        super("get_valid_write_ids");
-      }
-
-      public get_valid_write_ids_args getEmptyArgsInstance() {
-        return new get_valid_write_ids_args();
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public get_valid_write_ids_result getResult(I iface, get_valid_write_ids_args args) throws org.apache.thrift.TException {
-        get_valid_write_ids_result result = new get_valid_write_ids_result();
-        result.success = iface.get_valid_write_ids(args.req);
-        return result;
-      }
-    }
-
   }
 
   public static class AsyncProcessor<I extends AsyncIface> extends com.facebook.fb303.FacebookService.AsyncProcessor<I> {
@@ -15129,10 +14809,6 @@ public class ThriftHiveMetastore {
       processMap.put("put_file_metadata", new put_file_metadata());
       processMap.put("clear_file_metadata", new clear_file_metadata());
       processMap.put("cache_file_metadata", new cache_file_metadata());
-      processMap.put("get_next_write_id", new get_next_write_id());
-      processMap.put("finalize_write_id", new finalize_write_id());
-      processMap.put("heartbeat_write_id", new heartbeat_write_id());
-      processMap.put("get_valid_write_ids", new get_valid_write_ids());
       return processMap;
     }
 
@@ -24492,210 +24168,6 @@ public class ThriftHiveMetastore {
       }
     }
 
-    public static class get_next_write_id<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_next_write_id_args, GetNextWriteIdResult> {
-      public get_next_write_id() {
-        super("get_next_write_id");
-      }
-
-      public get_next_write_id_args getEmptyArgsInstance() {
-        return new get_next_write_id_args();
-      }
-
-      public AsyncMethodCallback<GetNextWriteIdResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<GetNextWriteIdResult>() { 
-          public void onComplete(GetNextWriteIdResult o) {
-            get_next_write_id_result result = new get_next_write_id_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            get_next_write_id_result result = new get_next_write_id_result();
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, get_next_write_id_args args, org.apache.thrift.async.AsyncMethodCallback<GetNextWriteIdResult> resultHandler) throws TException {
-        iface.get_next_write_id(args.req,resultHandler);
-      }
-    }
-
-    public static class finalize_write_id<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, finalize_write_id_args, FinalizeWriteIdResult> {
-      public finalize_write_id() {
-        super("finalize_write_id");
-      }
-
-      public finalize_write_id_args getEmptyArgsInstance() {
-        return new finalize_write_id_args();
-      }
-
-      public AsyncMethodCallback<FinalizeWriteIdResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<FinalizeWriteIdResult>() { 
-          public void onComplete(FinalizeWriteIdResult o) {
-            finalize_write_id_result result = new finalize_write_id_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            finalize_write_id_result result = new finalize_write_id_result();
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, finalize_write_id_args args, org.apache.thrift.async.AsyncMethodCallback<FinalizeWriteIdResult> resultHandler) throws TException {
-        iface.finalize_write_id(args.req,resultHandler);
-      }
-    }
-
-    public static class heartbeat_write_id<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, heartbeat_write_id_args, HeartbeatWriteIdResult> {
-      public heartbeat_write_id() {
-        super("heartbeat_write_id");
-      }
-
-      public heartbeat_write_id_args getEmptyArgsInstance() {
-        return new heartbeat_write_id_args();
-      }
-
-      public AsyncMethodCallback<HeartbeatWriteIdResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<HeartbeatWriteIdResult>() { 
-          public void onComplete(HeartbeatWriteIdResult o) {
-            heartbeat_write_id_result result = new heartbeat_write_id_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            heartbeat_write_id_result result = new heartbeat_write_id_result();
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, heartbeat_write_id_args args, org.apache.thrift.async.AsyncMethodCallback<HeartbeatWriteIdResult> resultHandler) throws TException {
-        iface.heartbeat_write_id(args.req,resultHandler);
-      }
-    }
-
-    public static class get_valid_write_ids<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_valid_write_ids_args, GetValidWriteIdsResult> {
-      public get_valid_write_ids() {
-        super("get_valid_write_ids");
-      }
-
-      public get_valid_write_ids_args getEmptyArgsInstance() {
-        return new get_valid_write_ids_args();
-      }
-
-      public AsyncMethodCallback<GetValidWriteIdsResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<GetValidWriteIdsResult>() { 
-          public void onComplete(GetValidWriteIdsResult o) {
-            get_valid_write_ids_result result = new get_valid_write_ids_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            get_valid_write_ids_result result = new get_valid_write_ids_result();
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, get_valid_write_ids_args args, org.apache.thrift.async.AsyncMethodCallback<GetValidWriteIdsResult> resultHandler) throws TException {
-        iface.get_valid_write_ids(args.req,resultHandler);
-      }
-    }
-
   }
 
   public static class getMetaConf_args implements org.apache.thrift.TBase<getMetaConf_args, getMetaConf_args._Fields>, java.io.Serializable, Cloneable, Comparable<getMetaConf_args>   {
@@ -30091,13 +29563,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list682 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list682.size);
-                  String _elem683;
-                  for (int _i684 = 0; _i684 < _list682.size; ++_i684)
+                  org.apache.thrift.protocol.TList _list674 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list674.size);
+                  String _elem675;
+                  for (int _i676 = 0; _i676 < _list674.size; ++_i676)
                   {
-                    _elem683 = iprot.readString();
-                    struct.success.add(_elem683);
+                    _elem675 = iprot.readString();
+                    struct.success.add(_elem675);
                   }
                   iprot.readListEnd();
                 }
@@ -30132,9 +29604,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter685 : struct.success)
+            for (String _iter677 : struct.success)
             {
-              oprot.writeString(_iter685);
+              oprot.writeString(_iter677);
             }
             oprot.writeListEnd();
           }
@@ -30173,9 +29645,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter686 : struct.success)
+            for (String _iter678 : struct.success)
             {
-              oprot.writeString(_iter686);
+              oprot.writeString(_iter678);
             }
           }
         }
@@ -30190,13 +29662,13 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list687 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list687.size);
-            String _elem688;
-            for (int _i689 = 0; _i689 < _list687.size; ++_i689)
+            org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list679.size);
+            String _elem680;
+            for (int _i681 = 0; _i681 < _list679.size; ++_i681)
             {
-              _elem688 = iprot.readString();
-              struct.success.add(_elem688);
+              _elem680 = iprot.readString();
+              struct.success.add(_elem680);
             }
           }
           struct.setSuccessIsSet(true);
@@ -30850,13 +30322,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list690 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list690.size);
-                  String _elem691;
-                  for (int _i692 = 0; _i692 < _list690.size; ++_i692)
+                  org.apache.thrift.protocol.TList _list682 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list682.size);
+                  String _elem683;
+                  for (int _i684 = 0; _i684 < _list682.size; ++_i684)
                   {
-                    _elem691 = iprot.readString();
-                    struct.success.add(_elem691);
+                    _elem683 = iprot.readString();
+                    struct.success.add(_elem683);
                   }
                   iprot.readListEnd();
                 }
@@ -30891,9 +30363,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter693 : struct.success)
+            for (String _iter685 : struct.success)
             {
-              oprot.writeString(_iter693);
+              oprot.writeString(_iter685);
             }
             oprot.writeListEnd();
           }
@@ -30932,9 +30404,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter694 : struct.success)
+            for (String _iter686 : struct.success)
             {
-              oprot.writeString(_iter694);
+              oprot.writeString(_iter686);
             }
           }
         }
@@ -30949,13 +30421,13 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list695 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list695.size);
-            String _elem696;
-            for (int _i697 = 0; _i697 < _list695.size; ++_i697)
+            org.apache.thrift.protocol.TList _list687 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list687.size);
+            String _elem688;
+            for (int _i689 = 0; _i689 < _list687.size; ++_i689)
             {
-              _elem696 = iprot.readString();
-              struct.success.add(_elem696);
+              _elem688 = iprot.readString();
+              struct.success.add(_elem688);
             }
           }
           struct.setSuccessIsSet(true);
@@ -35562,16 +35034,16 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                 {
-                  org.apache.thrift.protocol.TMap _map698 = iprot.readMapBegin();
-                  struct.success = new HashMap<String,Type>(2*_map698.size);
-                  String _key699;
-                  Type _val700;
-                  for (int _i701 = 0; _i701 < _map698.size; ++_i701)
+                  org.apache.thrift.protocol.TMap _map690 = iprot.readMapBegin();
+                  struct.success = new HashMap<String,Type>(2*_map690.size);
+                  String _key691;
+                  Type _val692;
+                  for (int _i693 = 0; _i693 < _map690.size; ++_i693)
                   {
-                    _key699 = iprot.readString();
-                    _val700 = new Type();
-                    _val700.read(iprot);
-                    struct.success.put(_key699, _val700);
+                    _key691 = iprot.readString();
+                    _val692 = new Type();
+                    _val692.read(iprot);
+                    struct.success.put(_key691, _val692);
                   }
                   iprot.readMapEnd();
                 }
@@ -35606,10 +35078,10 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (Map.Entry<String, Type> _iter702 : struct.success.entrySet())
+            for (Map.Entry<String, Type> _iter694 : struct.success.entrySet())
             {
-              oprot.writeString(_iter702.getKey());
-              _iter702.getValue().write(oprot);
+              oprot.writeString(_iter694.getKey());
+              _iter694.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -35648,10 +35120,10 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (Map.Entry<String, Type> _iter703 : struct.success.entrySet())
+            for (Map.Entry<String, Type> _iter695 : struct.success.entrySet())
             {
-              oprot.writeString(_iter703.getKey());
-              _iter703.getValue().write(oprot);
+              oprot.writeString(_iter695.getKey());
+              _iter695.getValue().write(oprot);
             }
           }
         }
@@ -35666,16 +35138,16 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TMap _map704 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new HashMap<String,Type>(2*_map704.size);
-            String _key705;
-            Type _val706;
-            for (int _i707 = 0; _i707 < _map704.size; ++_i707)
+            org.apache.thrift.protocol.TMap _map696 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new HashMap<String,Type>(2*_map696.size);
+            String _key697;
+            Type _val698;
+            for (int _i699 = 0; _i699 < _map696.size; ++_i699)
             {
-              _key705 = iprot.readString();
-              _val706 = new Type();
-              _val706.read(iprot);
-              struct.success.put(_key705, _val706);
+              _key697 = iprot.readString();
+              _val698 = new Type();
+              _val698.read(iprot);
+              struct.success.put(_key697, _val698);
             }
           }
           struct.setSuccessIsSet(true);
@@ -36710,14 +36182,14 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list708 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list708.size);
-                  FieldSchema _elem709;
-                  for (int _i710 = 0; _i710 < _list708.size; ++_i710)
+                  org.apache.thrift.protocol.TList _list700 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list700.size);
+                  FieldSchema _elem701;
+                  for (int _i702 = 0; _i702 < _list700.size; ++_i702)
                   {
-                    _elem709 = new FieldSchema();
-                    _elem709.read(iprot);
-                    struct.success.add(_elem709);
+                    _elem701 = new FieldSchema();
+                    _elem701.read(iprot);
+                    struct.success.add(_elem701);
                   }
                   iprot.readListEnd();
                 }
@@ -36770,9 +36242,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter711 : struct.success)
+            for (FieldSchema _iter703 : struct.success)
             {
-              _iter711.write(oprot);
+              _iter703.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -36827,9 +36299,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter712 : struct.success)
+            for (FieldSchema _iter704 : struct.success)
             {
-              _iter712.write(oprot);
+              _iter704.write(oprot);
             }
           }
         }
@@ -36850,14 +36322,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list713.size);
-            FieldSchema _elem714;
-            for (int _i715 = 0; _i715 < _list713.size; ++_i715)
+            org.apache.thrift.protocol.TList _list705 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list705.size);
+            FieldSchema _elem706;
+            for (int _i707 = 0; _i707 < _list705.size; ++_i707)
             {
-              _elem714 = new FieldSchema();
-              _elem714.read(iprot);
-              struct.success.add(_elem714);
+              _elem706 = new FieldSchema();
+              _elem706.read(iprot);
+              struct.success.add(_elem706);
             }
           }
           struct.setSuccessIsSet(true);
@@ -38011,14 +37483,14 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list716 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list716.size);
-                  FieldSchema _elem717;
-                  for (int _i718 = 0; _i718 < _list716.size; ++_i718)
+                  org.apache.thrift.protocol.TList _list708 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list708.size);
+                  FieldSchema _elem709;
+                  for (int _i710 = 0; _i710 < _list708.size; ++_i710)
                   {
-                    _elem717 = new FieldSchema();
-                    _elem717.read(iprot);
-                    struct.success.add(_elem717);
+                    _elem709 = new FieldSchema();
+                    _elem709.read(iprot);
+                    struct.success.add(_elem709);
                   }
                   iprot.readListEnd();
                 }
@@ -38071,9 +37543,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter719 : struct.success)
+            for (FieldSchema _iter711 : struct.success)
             {
-              _iter719.write(oprot);
+              _iter711.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -38128,9 +37600,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter720 : struct.success)
+            for (FieldSchema _iter712 : struct.success)
             {
-              _iter720.write(oprot);
+              _iter712.write(oprot);
             }
           }
         }
@@ -38151,14 +37623,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list721.size);
-            FieldSchema _elem722;
-            for (int _i723 = 0; _i723 < _list721.size; ++_i723)
+            org.apache.thrift.protocol.TList _list713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list713.size);
+            FieldSchema _elem714;
+            for (int _i715 = 0; _i715 < _list713.size; ++_i715)
             {
-              _elem722 = new FieldSchema();
-              _elem722.read(iprot);
-              struct.success.add(_elem722);
+              _elem714 = new FieldSchema();
+              _elem714.read(iprot);
+              struct.success.add(_elem714);
             }
           }
           struct.setSuccessIsSet(true);
@@ -39203,14 +38675,14 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list724 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list724.size);
-                  FieldSchema _elem725;
-                  for (int _i726 = 0; _i726 < _list724.size; ++_i726)
+                  org.apache.thrift.protocol.TList _list716 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list716.size);
+                  FieldSchema _elem717;
+                  for (int _i718 = 0; _i718 < _list716.size; ++_i718)
                   {
-                    _elem725 = new FieldSchema();
-                    _elem725.read(iprot);
-                    struct.success.add(_elem725);
+                    _elem717 = new FieldSchema();
+                    _elem717.read(iprot);
+                    struct.success.add(_elem717);
                   }
                   iprot.readListEnd();
                 }
@@ -39263,9 +38735,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter727 : struct.success)
+            for (FieldSchema _iter719 : struct.success)
             {
-              _iter727.write(oprot);
+              _iter719.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -39320,9 +38792,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter728 : struct.success)
+            for (FieldSchema _iter720 : struct.success)
             {
-              _iter728.write(oprot);
+              _iter720.write(oprot);
             }
           }
         }
@@ -39343,14 +38815,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list729.size);
-            FieldSchema _elem730;
-            for (int _i731 = 0; _i731 < _list729.size; ++_i731)
+            org.apache.thrift.protocol.TList _list721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list721.size);
+            FieldSchema _elem722;
+            for (int _i723 = 0; _i723 < _list721.size; ++_i723)
             {
-              _elem730 = new FieldSchema();
-              _elem730.read(iprot);
-              struct.success.add(_elem730);
+              _elem722 = new FieldSchema();
+              _elem722.read(iprot);
+              struct.success.add(_elem722);
             }
           }
           struct.setSuccessIsSet(true);
@@ -40504,14 +39976,14 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list732 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list732.size);
-                  FieldSchema _elem733;
-                  for (int _i734 = 0; _i734 < _list732.size; ++_i734)
+                  org.apache.thrift.protocol.TList _list724 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list724.size);
+                  FieldSchema _elem725;
+                  for (int _i726 = 0; _i726 < _list724.size; ++_i726)
                   {
-                    _elem733 = new FieldSchema();
-                    _elem733.read(iprot);
-                    struct.success.add(_elem733);
+                    _elem725 = new FieldSchema();
+                    _elem725.read(iprot);
+                    struct.success.add(_elem725);
                   }
                   iprot.readListEnd();
                 }
@@ -40564,9 +40036,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter735 : struct.success)
+            for (FieldSchema _iter727 : struct.success)
             {
-              _iter735.write(oprot);
+              _iter727.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -40621,9 +40093,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter736 : struct.success)
+            for (FieldSchema _iter728 : struct.success)
             {
-              _iter736.write(oprot);
+              _iter728.write(oprot);
             }
           }
         }
@@ -40644,14 +40116,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list737.size);
-            FieldSchema _elem738;
-            for (int _i739 = 0; _i739 < _list737.size; ++_i739)
+            org.apache.thrift.protocol.TList _list729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list729.size);
+            FieldSchema _elem730;
+            for (int _i731 = 0; _i731 < _list729.size; ++_i731)
             {
-              _elem738 = new FieldSchema();
-              _elem738.read(iprot);
-              struct.success.add(_elem738);
+              _elem730 = new FieldSchema();
+              _elem730.read(iprot);
+              struct.success.add(_elem730);
             }
           }
           struct.setSuccessIsSet(true);
@@ -43376,14 +42848,14 @@ public class ThriftHiveMetastore {
             case 2: // PRIMARY_KEYS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list740 = iprot.readListBegin();
-                  struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list740.size);
-                  SQLPrimaryKey _elem741;
-                  for (int _i742 = 0; _i742 < _list740.size; ++_i742)
+                  org.apache.thrift.protocol.TList _list732 = iprot.readListBegin();
+                  struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list732.size);
+                  SQLPrimaryKey _elem733;
+                  for (int _i734 = 0; _i734 < _list732.size; ++_i734)
                   {
-                    _elem741 = new SQLPrimaryKey();
-                    _elem741.read(iprot);
-                    struct.primaryKeys.add(_elem741);
+                    _elem733 = new SQLPrimaryKey();
+                    _elem733.read(iprot);
+                    struct.primaryKeys.add(_elem733);
                   }
                   iprot.readListEnd();
                 }
@@ -43395,14 +42867,14 @@ public class ThriftHiveMetastore {
             case 3: // FOREIGN_KEYS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list743 = iprot.readListBegin();
-                  struct.foreignKeys = new ArrayList<SQLForeignKey>(_list743.size);
-                  SQLForeignKey _elem744;
-                  for (int _i745 = 0; _i745 < _list743.size; ++_i745)
+                  org.apache.thrift.protocol.TList _list735 = iprot.readListBegin();
+                  struct.foreignKeys = new ArrayList<SQLForeignKey>(_list735.size);
+                  SQLForeignKey _elem736;
+                  for (int _i737 = 0; _i737 < _list735.size; ++_i737)
                   {
-                    _elem744 = new SQLForeignKey();
-                    _elem744.read(iprot);
-                    struct.foreignKeys.add(_elem744);
+                    _elem736 = new SQLForeignKey();
+                    _elem736.read(iprot);
+                    struct.foreignKeys.add(_elem736);
                   }
                   iprot.readListEnd();
                 }
@@ -43433,9 +42905,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size()));
-            for (SQLPrimaryKey _iter746 : struct.primaryKeys)
+            for (SQLPrimaryKey _iter738 : struct.primaryKeys)
             {
-              _iter746.write(oprot);
+              _iter738.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -43445,9 +42917,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size()));
-            for (SQLForeignKey _iter747 : struct.foreignKeys)
+            for (SQLForeignKey _iter739 : struct.foreignKeys)
             {
-              _iter747.write(oprot);
+              _iter739.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -43487,18 +42959,18 @@ public class ThriftHiveMetastore {
         if (struct.isSetPrimaryKeys()) {
           {
             oprot.writeI32(struct.primaryKeys.size());
-            for (SQLPrimaryKey _iter748 : struct.primaryKeys)
+            for (SQLPrimaryKey _iter740 : struct.primaryKeys)
             {
-              _iter748.write(oprot);
+              _iter740.write(oprot);
             }
           }
         }
         if (struct.isSetForeignKeys()) {
           {
             oprot.writeI32(struct.foreignKeys.size());
-            for (SQLForeignKey _iter749 : struct.foreignKeys)
+            for (SQLForeignKey _iter741 : struct.foreignKeys)
             {
-              _iter749.write(oprot);
+              _iter741.write(oprot);
             }
           }
         }
@@ -43515,28 +42987,28 @@ public class ThriftHiveMetastore {
         }
         if (incoming.get(1)) {
           {
-            org.apache.thrift.protocol.TList _list750 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list750.size);
-            SQLPrimaryKey _elem751;
-            for (int _i752 = 0; _i752 < _list750.size; ++_i752)
+            org.apache.thrift.protocol.TList _list742 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list742.size);
+            SQLPrimaryKey _elem743;
+            for (int _i744 = 0; _i744 < _list742.size; ++_i744)
             {
-              _elem751 = new SQLPrimaryKey();
-              _elem751.read(iprot);
-              struct.primaryKeys.add(_elem751);
+              _elem743 = new SQLPrimaryKey();
+              _elem743.read(iprot);
+              struct.primaryKeys.add(_elem743);
             }
           }
           struct.setPrimaryKeysIsSet(true);
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.foreignKeys = new ArrayList<SQLForeignKey>(_list753.size);
-            SQLForeignKey _elem754;
-            for (int _i755 = 0; _i755 < _list753.size; ++_i755)
+            org.apache.thrift.protocol.TList _list745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.foreignKeys = new ArrayList<SQLForeignKey>(_list745.size);
+            SQLForeignKey _elem746;
+            for (int _i747 = 0; _i747 < _list745.size; ++_i747)
             {
-              _elem754 = new SQLForeignKey();
-              _elem754.read(iprot);
-              struct.foreignKeys.add(_elem754);
+              _elem746 = new SQLForeignKey();
+              _elem746.read(iprot);
+              struct.foreignKeys.add(_elem746);
             }
           }
           struct.setForeignKeysIsSet(true);
@@ -49370,13 +48842,13 @@ public class ThriftHiveMetastore {
             case 3: // PART_NAMES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list756 = iprot.readListBegin();
-                  struct.partNames = new ArrayList<String>(_list756.size);
-                  String _elem757;
-                  for (int _i758 = 0; _i758 < _list756.size; ++_i758)
+                  org.apache.thrift.protocol.TList _list748 = iprot.readListBegin();
+                  struct.partNames = new ArrayList<String>(_list748.size);
+                  String _elem749;
+                  for (int _i750 = 0; _i750 < _list748.size; ++_i750)
                   {
-                    _elem757 = iprot.readString();
-                    struct.partNames.add(_elem757);
+                    _elem749 = iprot.readString();
+                    struct.partNames.add(_elem749);
                   }
                   iprot.readListEnd();
                 }
@@ -49412,9 +48884,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(PART_NAMES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size()));
-            for (String _iter759 : struct.partNames)
+            for (String _iter751 : struct.partNames)
             {
-              oprot.writeString(_iter759);
+              oprot.writeString(_iter751);
             }
             oprot.writeListEnd();
           }
@@ -49457,9 +48929,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetPartNames()) {
           {
             oprot.writeI32(struct.partNames.size());
-            for (String _iter760 : struct.partNames)
+            for (String _iter752 : struct.partNames)
             {
-              oprot.writeString(_iter760);
+              oprot.writeString(_iter752);
             }
           }
         }
@@ -49479,13 +48951,13 @@ public class ThriftHiveMetastore {
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.partNames = new ArrayList<String>(_list761.size);
-            String _elem762;
-            for (int _i763 = 0; _i763 < _list761.size; ++_i763)
+            org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.partNames = new ArrayList<String>(_list753.size);
+            String _elem754;
+            for (int _i755 = 0; _i755 < _list753.size; ++_i755)
             {
-              _elem762 = iprot.readString();
-              struct.partNames.add(_elem762);
+              _elem754 = iprot.readString();
+              struct.partNames.add(_elem754);
             }
           }
           struct.setPartNamesIsSet(true);
@@ -50710,13 +50182,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list764 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list764.size);
-                  String _elem765;
-                  for (int _i766 = 0; _i766 < _list764.size; ++_i766)
+                  org.apache.thrift.protocol.TList _list756 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list756.size);
+                  String _elem757;
+                  for (int _i758 = 0; _i758 < _list756.size; ++_i758)
                   {
-                    _elem765 = iprot.readString();
-                    struct.success.add(_elem765);
+                    _elem757 = iprot.readString();
+                    struct.success.add(_elem757);
                   }
                   iprot.readListEnd();
                 }
@@ -50751,9 +50223,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter767 : struct.success)
+            for (String _iter759 : struct.success)
             {
-              oprot.writeString(_iter767);
+              oprot.writeString(_iter759);
             }
             oprot.writeListEnd();
           }
@@ -50792,9 +50264,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter768 : struct.success)
+            for (String _iter760 : struct.success)
             {
-              oprot.writeString(_iter768);
+              oprot.writeString(_iter760);
             }
           }
         }
@@ -50809,13 +50281,13 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list769.size);
-            String _elem770;
-            for (int _i771 = 0; _i771 < _list769.size; ++_i771)
+            org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list761.size);
+            String _elem762;
+            for (int _i763 = 0; _i763 < _list761.size; ++_i763)
             {
-              _elem770 = iprot.readString();
-              struct.success.add(_elem770);
+              _elem762 = iprot.readString();
+              struct.success.add(_elem762);
             }
           }
           struct.setSuccessIsSet(true);
@@ -51789,13 +51261,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list772 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list772.size);
-                  String _elem773;
-                  for (int _i774 = 0; _i774 < _list772.size; ++_i774)
+                  org.apache.thrift.protocol.TList _list764 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list764.size);
+                  String _elem765;
+                  for (int _i766 = 0; _i766 < _list764.size; ++_i766)
                   {
-                    _elem773 = iprot.readString();
-                    struct.success.add(_elem773);
+                    _elem765 = iprot.readString();
+                    struct.success.add(_elem765);
                   }
                   iprot.readListEnd();
                 }
@@ -51830,9 +51302,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter775 : struct.success)
+            for (String _iter767 : struct.success)
             {
-              oprot.writeString(_iter775);
+              oprot.writeString(_iter767);
             }
             oprot.writeListEnd();
           }
@@ -51871,9 +51343,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter776 : struct.success)
+            for (String _iter768 : struct.success)
             {
-              oprot.writeString(_iter776);
+              oprot.writeString(_iter768);
             }
           }
         }
@@ -51888,13 +51360,13 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list777.size);
-            String _elem778;
-            for (int _i779 = 0; _i779 < _list777.size; ++_i779)
+            org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list769.size);
+            String _elem770;
+            for (int _i771 = 0; _i771 < _list769.size; ++_i771)
             {
-              _elem778 = iprot.readString();
-              struct.success.add(_elem778);
+              _elem770 = iprot.readString();
+              struct.success.add(_elem770);
             }
           }
           struct.setSuccessIsSet(true);
@@ -52399,13 +51871,13 @@ public class ThriftHiveMetastore {
             case 3: // TBL_TYPES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list780 = iprot.readListBegin();
-                  struct.tbl_types = new ArrayList<String>(_list780.size);
-                  String _elem781;
-                  for (int _i782 = 0; _i782 < _list780.size; ++_i782)
+                  org.apache.thrift.protocol.TList _list772 = iprot.readListBegin();
+                  struct.tbl_types = new ArrayList<String>(_list772.size);
+                  String _elem773;
+                  for (int _i774 = 0; _i774 < _list772.size; ++_i774)
                   {
-                    _elem781 = iprot.readString();
-                    struct.tbl_types.add(_elem781);
+                    _elem773 = iprot.readString();
+                    struct.tbl_types.add(_elem773);
                   }
                   iprot.readListEnd();
                 }
@@ -52441,9 +51913,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size()));
-            for (String _iter783 : struct.tbl_types)
+            for (String _iter775 : struct.tbl_types)
             {
-              oprot.writeString(_iter783);
+              oprot.writeString(_iter775);
             }
             oprot.writeListEnd();
           }
@@ -52486,9 +51958,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetTbl_types()) {
           {
             oprot.writeI32(struct.tbl_types.size());
-            for (String _iter784 : struct.tbl_types)
+            for (String _iter776 : struct.tbl_types)
             {
-              oprot.writeString(_iter784);
+              oprot.writeString(_iter776);
             }
           }
         }
@@ -52508,13 +51980,13 @@ public class ThriftHiveMetastore {
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.tbl_types = new ArrayList<String>(_list785.size);
-            String _elem786;
-            for (int _i787 = 0; _i787 < _list785.size; ++_i787)
+            org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.tbl_types = new ArrayList<String>(_list777.size);
+            String _elem778;
+            for (int _i779 = 0; _i779 < _list777.size; ++_i779)
             {
-              _elem786 = iprot.readString();
-              struct.tbl_types.add(_elem786);
+              _elem778 = iprot.readString();
+              struct.tbl_types.add(_elem778);
             }
           }
           struct.setTbl_typesIsSet(true);
@@ -52920,14 +52392,14 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list788 = iprot.readListBegin();
-                  struct.success = new ArrayList<TableMeta>(_list788.size);
-                  TableMeta _elem789;
-                  for (int _i790 = 0; _i790 < _list788.size; ++_i790)
+                  org.apache.thrift.protocol.TList _list780 = iprot.readListBegin();
+                  struct.success = new ArrayList<TableMeta>(_list780.size);
+                  TableMeta _elem781;
+                  for (int _i782 = 0; _i782 < _list780.size; ++_i782)
                   {
-                    _elem789 = new TableMeta();
-                    _elem789.read(iprot);
-                    struct.success.add(_elem789);
+                    _elem781 = new TableMeta();
+                    _elem781.read(iprot);
+                    struct.success.add(_elem781);
                   }
                   iprot.readListEnd();
                 }
@@ -52962,9 +52434,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (TableMeta _iter791 : struct.success)
+            for (TableMeta _iter783 : struct.success)
             {
-              _iter791.write(oprot);
+              _iter783.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -53003,9 +52475,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (TableMeta _iter792 : struct.success)
+            for (TableMeta _iter784 : struct.success)
             {
-              _iter792.write(oprot);
+              _iter784.write(oprot);
             }
           }
         }
@@ -53020,14 +52492,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<TableMeta>(_list793.size);
-            TableMeta _elem794;
-            for (int _i795 = 0; _i795 < _list793.size; ++_i795)
+            org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<TableMeta>(_list785.size);
+            TableMeta _elem786;
+            for (int _i787 = 0; _i787 < _list785.size; ++_i787)
             {
-              _elem794 = new TableMeta();
-              _elem794.read(iprot);
-              struct.success.add(_elem794);
+              _elem786 = new TableMeta();
+              _elem786.read(iprot);
+              struct.success.add(_elem786);
             }
           }
           struct.setSuccessIsSet(true);
@@ -53793,13 +53265,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list796 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list796.size);
-                  String _elem797;
-                  for (int _i798 = 0; _i798 < _list796.size; ++_i798)
+                  org.apache.thrift.protocol.TList _list788 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list788.size);
+                  String _elem789;
+                  for (int _i790 = 0; _i790 < _list788.size; ++_i790)
                   {
-                    _elem797 = iprot.readString();
-                    struct.success.add(_elem797);
+                    _elem789 = iprot.readString();
+                    struct.success.add(_elem789);
                   }
                   iprot.readListEnd();
                 }
@@ -53834,9 +53306,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter799 : struct.success)
+            for (String _iter791 : struct.success)
             {
-              oprot.writeString(_iter799);
+              oprot.writeString(_iter791);
             }
             oprot.writeListEnd();
           }
@@ -53875,9 +53347,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter800 : struct.success)
+            for (String _iter792 : struct.success)
             {
-              oprot.writeString(_iter800);
+              oprot.writeString(_iter792);
             }
           }
         }
@@ -53892,13 +53364,13 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list801.size);
-            String _elem802;
-            for (int _i803 = 0; _i803 < _list801.size; ++_i803)
+            org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list793.size);
+            String _elem794;
+            for (int _i795 = 0; _i795 < _list793.size; ++_i795)
             {
-              _elem802 = iprot.readString();
-              struct.success.add(_elem802);
+              _elem794 = iprot.readString();
+              struct.success.add(_elem794);
             }
           }
           struct.setSuccessIsSet(true);
@@ -55351,13 +54823,13 @@ public class ThriftHiveMetastore {
             case 2: // TBL_NAMES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list804 = iprot.readListBegin();
-                  struct.tbl_names = new ArrayList<String>(_list804.size);
-                  String _elem805;
-                  for (int _i806 = 0; _i806 < _list804.size; ++_i806)
+                  org.apache.thrift.protocol.TList _list796 = iprot.readListBegin();
+                  struct.tbl_names = new ArrayList<String>(_list796.size);
+                  String _elem797;
+                  for (int _i798 = 0; _i798 < _list796.size; ++_i798)
                   {
-                    _elem805 = iprot.readString();
-                    struct.tbl_names.add(_elem805);
+                    _elem797 = iprot.readString();
+                    struct.tbl_names.add(_elem797);
                   }
                   iprot.readListEnd();
                 }
@@ -55388,9 +54860,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size()));
-            for (String _iter807 : struct.tbl_names)
+            for (String _iter799 : struct.tbl_names)
             {
-              oprot.writeString(_iter807);
+              oprot.writeString(_iter799);
             }
             oprot.writeListEnd();
           }
@@ -55427,9 +54899,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetTbl_names()) {
           {
             oprot.writeI32(struct.tbl_names.size());
-            for (String _iter808 : struct.tbl_names)
+            for (String _iter800 : struct.tbl_names)
             {
-              oprot.writeString(_iter808);
+              oprot.writeString(_iter800);
             }
           }
         }
@@ -55445,13 +54917,13 @@ public class ThriftHiveMetastore {
         }
         if (incoming.get(1)) {
           {
-            org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.tbl_names = new ArrayList<String>(_list809.size);
-            String _elem810;
-            for (int _i811 = 0; _i811 < _list809.size; ++_i811)
+            org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.tbl_names = new ArrayList<String>(_list801.size);
+            String _elem802;
+            for (int _i803 = 0; _i803 < _list801.size; ++_i803)
             {
-              _elem810 = iprot.readString();
-              struct.tbl_names.add(_elem810);
+              _elem802 = iprot.readString();
+              struct.tbl_names.add(_elem802);
             }
           }
           struct.setTbl_namesIsSet(true);
@@ -55776,14 +55248,14 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list812 = iprot.readListBegin();
-                  struct.success = new ArrayList<Table>(_list812.size);
-                  Table _elem813;
-                  for (int _i814 = 0; _i814 < _list812.size; ++_i814)
+                  org.apache.thrift.protocol.TList _list804 = iprot.readListBegin();
+                  struct.success = new ArrayList<Table>(_list804.size);
+                  Table _elem805;
+                  for (int _i806 = 0; _i806 < _list804.size; ++_i806)
                   {
-                    _elem813 = new Table();
-                    _elem813.read(iprot);
-                    struct.success.add(_elem813);
+                    _elem805 = new Table();
+                    _elem805.read(iprot);
+                    struct.success.add(_elem805);
                   }
                   iprot.readListEnd();
                 }
@@ -55809,9 +55281,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (Table _iter815 : struct.success)
+            for (Table _iter807 : struct.success)
             {
-              _iter815.write(oprot);
+              _iter807.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -55842,9 +55314,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (Table _iter816 : struct.success)
+            for (Table _iter808 : struct.success)
             {
-              _iter816.write(oprot);
+              _iter808.write(oprot);
             }
           }
         }
@@ -55856,14 +55328,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<Table>(_list817.size);
-            Table _elem818;
-            for (int _i819 = 0; _i819 < _list817.size; ++_i819)
+            org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<Table>(_list809.size);
+            Table _elem810;
+            for (int _i811 = 0; _i811 < _list809.size; ++_i811)
             {
-              _elem818 = new Table();
-              _elem818.read(iprot);
-              struct.success.add(_elem818);
+              _elem810 = new Table();
+              _elem810.read(iprot);
+              struct.success.add(_elem810);
             }
           }
           struct.setSuccessIsSet(true);
@@ -58976,13 +58448,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list820 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list820.size);
-                  String _elem821;
-                  for (int _i822 = 0; _i822 < _list820.size; ++_i822)
+                  org.apache.thrift.protocol.TList _list812 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list812.size);
+                  String _elem813;
+                  for (int _i814 = 0; _i814 < _list812.size; ++_i814)
                   {
-                    _elem821 = iprot.readString();
-                    struct.success.add(_elem821);
+                    _elem813 = iprot.readString();
+                    struct.success.add(_elem813);
                   }
                   iprot.readListEnd();
                 }
@@ -59035,9 +58507,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter823 : struct.success)
+            for (String _iter815 : struct.success)
             {
-              oprot.writeString(_iter823);
+              oprot.writeString(_iter815);
             }
             oprot.writeListEnd();
           }
@@ -59092,9 +58564,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter824 : struct.success)
+            for (String _iter816 : struct.success)
             {
-              oprot.writeString(_iter824);
+              oprot.writeString(_iter816);
             }
           }
         }
@@ -59115,13 +58587,13 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list825.size);
-            String _elem826;
-            for (int _i827 = 0; _i827 < _list825.size; ++_i827)
+            org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list817.size);
+            String _elem818;
+            for (int _i819 = 0; _i819 < _list817.size; ++_i819)
             {
-              _elem826 = iprot.readString();
-              struct.success.add(_elem826);
+              _elem818 = iprot.readString();
+              struct.success.add(_elem818);
             }
           }
           struct.setSuccessIsSet(true);
@@ -64980,14 +64452,14 @@ public class ThriftHiveMetastore {
             case 1: // NEW_PARTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list828 = iprot.readListBegin();
-                  struct.new_parts = new ArrayList<Partition>(_list828.size);
-                  Partition _elem829;
-                  for (int _i830 = 0; _i830 < _list828.size; ++_i830)
+                  org.apache.thrift.protocol.TList _list820 = iprot.readListBegin();
+                  struct.new_parts = new ArrayList<Partition>(_list820.size);
+                  Partition _elem821;
+                  for (int _i822 = 0; _i822 < _list820.size; ++_i822)
                   {
-                    _elem829 = new Partition();
-                    _elem829.read(iprot);
-                    struct.new_parts.add(_elem829);
+                    _elem821 = new Partition();
+                    _elem821.read(iprot);
+                    struct.new_parts.add(_elem821);
                   }
                   iprot.readListEnd();
                 }
@@ -65013,9 +64485,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size()));
-            for (Partition _iter831 : struct.new_parts)
+            for (Partition _iter823 : struct.new_parts)
             {
-              _iter831.write(oprot);
+              _iter823.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -65046,9 +64518,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetNew_parts()) {
           {
             oprot.writeI32(struct.new_parts.size());
-            for (Partition _iter832 : struct.new_parts)
+            for (Partition _iter824 : struct.new_parts)
             {
-              _iter832.write(oprot);
+              _iter824.write(oprot);
             }
           }
         }
@@ -65060,14 +64532,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.new_parts = new ArrayList<Partition>(_list833.size);
-            Partition _elem834;
-            for (int _i835 = 0; _i835 < _list833.size; ++_i835)
+            org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.new_parts = new ArrayList<Partition>(_list825.size);
+            Partition _elem826;
+            for (int _i827 = 0; _i827 < _list825.size; ++_i827)
             {
-              _elem834 = new Partition();
-              _elem834.read(iprot);
-              struct.new_parts.add(_elem834);
+              _elem826 = new Partition();
+              _elem826.read(iprot);
+              struct.new_parts.add(_elem826);
             }
           }
           struct.setNew_partsIsSet(true);
@@ -66068,14 +65540,14 @@ public class ThriftHiveMetastore {
             case 1: // NEW_PARTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list836 = iprot.readListBegin();
-                  struct.new_parts = new ArrayList<PartitionSpec>(_list836.size);
-                  PartitionSpec _elem837;
-                  for (int _i838 = 0; _i838 < _list836.size; ++_i838)
+                  org.apache.thrift.protocol.TList _list828 = iprot.readListBegin();
+                  struct.new_parts = new ArrayList<PartitionSpec>(_list828.size);
+                  PartitionSpec _elem829;
+                  for (int _i830 = 0; _i830 < _list828.size; ++_i830)
                   {
-                    _elem837 = new PartitionSpec();
-                    _elem837.read(iprot);
-                    struct.new_parts.add(_elem837);
+                    _elem829 = new PartitionSpec();
+                    _elem829.read(iprot

<TRUNCATED>

[07/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index c351ffd..35d876a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -146,7 +146,6 @@ import org.apache.hadoop.hive.metastore.model.MTable;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.metastore.model.MType;
 import org.apache.hadoop.hive.metastore.model.MVersionTable;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
@@ -612,52 +611,15 @@ public class ObjectStore implements RawStore, Configurable {
     return result;
   }
 
+  /**
+   * if this is the commit of the first open call then an actual commit is
+   * called.
+   *
+   * @return Always returns true
+   */
   @Override
   @SuppressWarnings("nls")
   public boolean commitTransaction() {
-    if (!startCommitTransaction()) return false;
-
-    openTrasactionCalls--;
-    debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive());
-    if ((openTrasactionCalls == 0) && currentTransaction.isActive()) {
-      transactionStatus = TXN_STATUS.COMMITED;
-      currentTransaction.commit();
-    }
-
-    return true;
-  }
-
-  @Override
-  @CanNotRetry
-  public Boolean commitTransactionExpectDeadlock() {
-    if (!startCommitTransaction()) return false;
-
-    if (--openTrasactionCalls != 0) {
-      String msg = "commitTransactionExpectDeadlock cannot be called for a nested transaction";
-      LOG.error(msg);
-      throw new AssertionError(msg);
-    }
-
-    transactionStatus = TXN_STATUS.COMMITED;
-    try {
-      currentTransaction.commit();
-    } catch (Exception ex) {
-      Throwable candidate = ex;
-      while (candidate != null && !(candidate instanceof SQLException)) {
-        candidate = candidate.getCause();
-      }
-      if (candidate == null) throw ex;
-      if (DatabaseProduct.isDeadlock(dbType, (SQLException)candidate)) {
-        LOG.info("Deadlock exception during commit: " + candidate.getMessage());
-        return null;
-      }
-      throw ex;
-    }
-
-    return true;
-  }
-
-  private boolean startCommitTransaction() {
     if (TXN_STATUS.ROLLBACK == transactionStatus) {
       debugLog("Commit transaction: rollback");
       return false;
@@ -676,6 +638,13 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.error("Unbalanced calls to open/commit Transaction", e);
       throw e;
     }
+    openTrasactionCalls--;
+    debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive());
+
+    if ((openTrasactionCalls == 0) && currentTransaction.isActive()) {
+      transactionStatus = TXN_STATUS.COMMITED;
+      currentTransaction.commit();
+    }
     return true;
   }
 
@@ -1129,12 +1098,6 @@ public class ObjectStore implements RawStore, Configurable {
           pm.deletePersistentAll(partGrants);
         }
 
-        // TODO# temporary; will be removed with ACID. Otherwise, need to do direct delete w/o get.
-        List<MTableWrite> mtw = getTableWrites(dbName, tableName, -1, -1);
-        if (mtw != null && mtw.size() > 0) {
-          pm.deletePersistentAll(mtw);
-        }
-
         List<MPartitionColumnPrivilege> partColGrants = listTableAllPartitionColumnGrants(dbName,
             tableName);
         if (partColGrants != null && partColGrants.size() > 0) {
@@ -1154,11 +1117,6 @@ public class ObjectStore implements RawStore, Configurable {
           pm.deletePersistentAll(tabConstraints);
         }
 
-        List<MTableWrite> tableWrites = listAllTableWrites(dbName, tableName);
-        if (tableWrites != null && tableWrites.size() > 0) {
-          pm.deletePersistentAll(tableWrites);
-        }
-
         preDropStorageDescriptor(tbl.getSd());
         // then remove the table
         pm.deletePersistentAll(tbl);
@@ -1217,25 +1175,6 @@ public class ObjectStore implements RawStore, Configurable {
     return mConstraints;
   }
 
-
-  private List<MTableWrite> listAllTableWrites(String dbName, String tableName) {
-    List<MTableWrite> result = null;
-    Query query = null;
-    boolean success = false;
-    openTransaction();
-    try {
-      String queryStr = "table.tableName == t1 && table.database.name == t2";
-      query = pm.newQuery(MTableWrite.class, queryStr);
-      query.declareParameters("java.lang.String t1, java.lang.String t2");
-      result = new ArrayList<>((List<MTableWrite>) query.executeWithArray(tableName, dbName));
-      pm.retrieveAll(result);
-      success = true;
-    } finally {
-      closeTransaction(success, query);
-    }
-    return result;
-  }
-
   @Override
   public Table getTable(String dbName, String tableName) throws MetaException {
     boolean commited = false;
@@ -1527,8 +1466,6 @@ public class ObjectStore implements RawStore, Configurable {
         convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()),
         mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType);
     t.setRewriteEnabled(mtbl.isRewriteEnabled());
-    t.setMmNextWriteId(mtbl.getMmNextWriteId());
-    t.setMmWatermarkWriteId(mtbl.getMmWatermarkWriteId());
     return t;
   }
 
@@ -1567,8 +1504,7 @@ public class ObjectStore implements RawStore, Configurable {
         .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
         convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
         tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(),
-        tableType, tbl.isSetMmNextWriteId() ?  tbl.getMmNextWriteId() : 0,
-            tbl.isSetMmWatermarkWriteId() ?  tbl.getMmWatermarkWriteId() : -1);
+        tableType);
   }
 
   private List<MFieldSchema> convertToMFieldSchemas(List<FieldSchema> keys) {
@@ -3310,8 +3246,6 @@ public class ObjectStore implements RawStore, Configurable {
       oldt.setLastAccessTime(newt.getLastAccessTime());
       oldt.setViewOriginalText(newt.getViewOriginalText());
       oldt.setViewExpandedText(newt.getViewExpandedText());
-      oldt.setMmNextWriteId(newt.getMmNextWriteId());
-      oldt.setMmWatermarkWriteId(newt.getMmWatermarkWriteId());
       oldt.setRewriteEnabled(newt.isRewriteEnabled());
 
       // commit the changes
@@ -8595,193 +8529,4 @@ public class ObjectStore implements RawStore, Configurable {
       }
     }
   }
-
-  @Override
-  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
-    boolean success = false;
-    openTransaction();
-    try {
-      MTable mtbl = getMTable(tbl.getDbName(), tbl.getTableName());
-      MTableWrite tw = new MTableWrite(mtbl, writeId, String.valueOf(state), heartbeat, heartbeat);
-      pm.makePersistent(tw);
-      success = true;
-    } finally {
-      if (success) {
-        commitTransaction();
-      } else {
-        rollbackTransaction();
-      }
-    }
-  }
-
-  @Override
-  public void updateTableWrite(MTableWrite tw) {
-    boolean success = false;
-    openTransaction();
-    try {
-      pm.makePersistent(tw);
-      success = true;
-    } finally {
-      if (success) {
-        commitTransaction();
-      } else {
-        rollbackTransaction();
-      }
-    }
-  }
-
-  @Override
-  public MTableWrite getTableWrite(
-      String dbName, String tblName, long writeId) throws MetaException {
-    boolean success = false;
-    Query query = null;
-    openTransaction();
-    try {
-      query = pm.newQuery(MTableWrite.class,
-              "table.tableName == t1 && table.database.name == t2 && writeId == t3");
-      query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3");
-      @SuppressWarnings("unchecked")
-      List<MTableWrite> writes = (List<MTableWrite>) query.execute(tblName, dbName, writeId);
-      pm.retrieveAll(writes);
-      success = true;
-      if (writes == null || writes.isEmpty()) return null;
-      if (writes.size() > 1) {
-        throw new MetaException(
-            "More than one TableWrite for " + dbName + "." + tblName + " and " + writeId);
-      }
-      return writes.get(0);
-    } finally {
-      closeTransaction(success, query);
-    }
-  }
-
-  @Override
-  public List<Long> getTableWriteIds(String dbName, String tblName,
-      long watermarkId, long nextWriteId, char state) throws MetaException {
-    boolean success = false;
-    Query query = null;
-    openTransaction();
-    try {
-      boolean hasState = (state != '\0');
-      query = pm.newQuery("select writeId from org.apache.hadoop.hive.metastore.model.MTableWrite"
-          + " where table.tableName == t1 && table.database.name == t2 && writeId > t3"
-          + " && writeId < t4" + (hasState ? " && state == t5" : ""));
-      query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3, "
-          + "java.lang.Long t4" + (hasState ? ", java.lang.String t5" : ""));
-      query.setResult("writeId");
-      query.setOrdering("writeId asc");
-      @SuppressWarnings("unchecked")
-      List<Long> writes = (List<Long>) (hasState
-          ? query.executeWithArray(tblName, dbName, watermarkId, nextWriteId, String.valueOf(state))
-          : query.executeWithArray(tblName, dbName, watermarkId, nextWriteId));
-      success = true;
-      return (writes == null) ? new ArrayList<Long>() : new ArrayList<>(writes);
-    } finally {
-      closeTransaction(success, query);
-    }
-  }
-
-  @Override
-  public List<MTableWrite> getTableWrites(
-      String dbName, String tblName, long from, long to) throws MetaException {
-    boolean success = false;
-    dbName = HiveStringUtils.normalizeIdentifier(dbName);
-    tblName = HiveStringUtils.normalizeIdentifier(tblName);
-    Query query = null;
-    openTransaction();
-    try {
-      String queryStr = "table.tableName == t1 && table.database.name == t2 && writeId > t3",
-          argStr = "java.lang.String t1, java.lang.String t2, java.lang.Long t3";
-      if (to >= 0) {
-        queryStr += " && writeId < t4";
-        argStr += ", java.lang.Long t4";
-      }
-      query = pm.newQuery(MTableWrite.class, queryStr);
-      query.declareParameters(argStr);
-      query.setOrdering("writeId asc");
-      @SuppressWarnings("unchecked")
-      List<MTableWrite> writes = (List<MTableWrite>)(to >= 0
-         ? query.executeWithArray(tblName, dbName, from, to)
-         : query.executeWithArray(tblName, dbName, from));
-      pm.retrieveAll(writes);
-      success = true;
-      return (writes == null || writes.isEmpty()) ? null : new ArrayList<>(writes);
-    } finally {
-      closeTransaction(success, query);
-    }
-  }
-
-
-  @Override
-  public void deleteTableWrites(
-      String dbName, String tblName, long from, long to) throws MetaException {
-    boolean success = false;
-    Query query = null;
-    openTransaction();
-    try {
-      query = pm.newQuery(MTableWrite.class,
-          "table.tableName == t1 && table.database.name == t2 && writeId > t3 && writeId < t4");
-      query.declareParameters(
-          "java.lang.String t1, java.lang.String t2, java.lang.Long t3, java.lang.Long t4");
-      query.deletePersistentAll(tblName, dbName, from, to);
-      success = true;
-    } finally {
-      closeTransaction(success, query);
-    }
-  }
-
-  @Override
-  public List<FullTableName > getAllMmTablesForCleanup() throws MetaException {
-    boolean success = false;
-    Query query = null;
-    openTransaction();
-    try {
-      // If the table had no MM writes, there's nothing to clean up
-      query = pm.newQuery(MTable.class, "mmNextWriteId > 0");
-      @SuppressWarnings("unchecked")
-      List<MTable> tables = (List<MTable>) query.execute();
-      pm.retrieveAll(tables);
-      ArrayList<FullTableName> result = new ArrayList<>(tables.size());
-      for (MTable table : tables) {
-        if (MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
-          result.add(new FullTableName(table.getDatabase().getName(), table.getTableName()));
-        }
-      }
-      success = true;
-      return result;
-    } finally {
-      closeTransaction(success, query);
-    }
-  }
-
-  @Override
-  public Collection<String> getAllPartitionLocations(String dbName, String tblName) {
-    boolean success = false;
-    Query query = null;
-    openTransaction();
-    try {
-      String q = "select sd.location from org.apache.hadoop.hive.metastore.model.MPartition"
-          + " where table.tableName == t1 && table.database.name == t2";
-      query = pm.newQuery();
-      query.declareParameters("java.lang.String t1, java.lang.String t2");
-      @SuppressWarnings("unchecked")
-      List<String> tables = (List<String>) query.execute();
-      pm.retrieveAll(tables);
-      success = true;
-      return new ArrayList<>(tables);
-    } finally {
-      closeTransaction(success, query);
-    }
-  }
-
-  private void closeTransaction(boolean success, Query query) {
-    if (success) {
-      commitTransaction();
-    } else {
-      rollbackTransaction();
-    }
-    if (query != null) {
-      query.closeAll();
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index ded978c..9253723 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -23,13 +23,11 @@ import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
 import java.nio.ByteBuffer;
-import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
-import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -62,7 +60,6 @@ import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
@@ -97,15 +94,6 @@ public interface RawStore extends Configurable {
   public abstract boolean commitTransaction();
 
   /**
-   * Commits transaction and detects if the failure to do so is a deadlock or not.
-   * Must be called on the top level with regard to openTransaction calls; attempting to
-   * call this after several nested openTransaction calls will throw.
-   * @return true or false - same as commitTransaction; null in case of deadlock.
-   */
-  @CanNotRetry
-  public abstract Boolean commitTransactionExpectDeadlock();
-
-  /**
    * Rolls back the current transaction if it is active
    */
   @CanNotRetry
@@ -719,35 +707,4 @@ public interface RawStore extends Configurable {
   void addPrimaryKeys(List<SQLPrimaryKey> pks) throws InvalidObjectException, MetaException;
 
   void addForeignKeys(List<SQLForeignKey> fks) throws InvalidObjectException, MetaException;
-
-  void updateTableWrite(MTableWrite tw);
-
-  MTableWrite getTableWrite(String dbName, String tblName, long writeId) throws MetaException;
-
-  void createTableWrite(Table tbl, long writeId, char state, long heartbeat);
-
-  List<Long> getTableWriteIds(String dbName, String tblName, long watermarkId, long nextWriteId, char state) throws MetaException;
-
-
-  public static final class FullTableName {
-    public final String dbName, tblName;
-
-    public FullTableName(String dbName, String tblName) {
-      this.dbName = dbName;
-      this.tblName = tblName;
-    }
-
-    @Override
-    public String toString() {
-      return dbName + "." + tblName;
-    }
-  }
-
-  List<FullTableName> getAllMmTablesForCleanup() throws MetaException;
-
-  public List<MTableWrite> getTableWrites(String dbName, String tblName, long from, long to) throws MetaException;
-
-  Collection<String> getAllPartitionLocations(String dbName, String tblName);
-
-  void deleteTableWrites(String dbName, String tblName, long from, long to) throws MetaException;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index c91dd4c..0cde1f0 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -81,7 +81,6 @@ import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
@@ -367,11 +366,6 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  public Boolean commitTransactionExpectDeadlock() {
-    return null;
-  }
-
-  @Override
   public void rollbackTransaction() {
     rawStore.rollbackTransaction();
   }
@@ -1565,46 +1559,6 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  public void updateTableWrite(MTableWrite tw) {
-
-  }
-
-  @Override
-  public MTableWrite getTableWrite(String dbName, String tblName, long writeId) throws MetaException {
-    return null;
-  }
-
-  @Override
-  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
-
-  }
-
-  @Override
-  public List<Long> getTableWriteIds(String dbName, String tblName, long watermarkId, long nextWriteId, char state) throws MetaException {
-    return null;
-  }
-
-  @Override
-  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
-    return null;
-  }
-
-  @Override
-  public List<MTableWrite> getTableWrites(String dbName, String tblName, long from, long to) throws MetaException {
-    return null;
-  }
-
-  @Override
-  public Collection<String> getAllPartitionLocations(String dbName, String tblName) {
-    return null;
-  }
-
-  @Override
-  public void deleteTableWrites(String dbName, String tblName, long from, long to) throws MetaException {
-
-  }
-
-  @Override
   public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(
       String dbName, String tableName)
       throws MetaException, NoSuchObjectException {

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index 206196d..ed559f9 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.cache.CacheLoader;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hive.common.ObjectPair;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -36,7 +35,6 @@ import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
 import org.apache.hadoop.hive.metastore.RawStore;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.RawStore.CanNotRetry;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -75,7 +73,6 @@ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult;
 import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
@@ -134,27 +131,13 @@ public class HBaseStore implements RawStore {
   @Override
   public boolean commitTransaction() {
     if (--txnNestLevel == 0) {
-      commitInternal();
+      LOG.debug("Committing HBase transaction");
+      getHBase().commit();
     }
     return true;
   }
 
   @Override
-  @CanNotRetry
-  public Boolean commitTransactionExpectDeadlock() {
-    if (--txnNestLevel != 0) {
-      throw new AssertionError("Cannot be called on a nested transaction");
-    }
-    commitInternal();
-    return true;
-  }
-
-  private void commitInternal() {
-    LOG.debug("Committing HBase transaction");
-    getHBase().commit();
-  }
-
-  @Override
   public void rollbackTransaction() {
     txnNestLevel = 0;
     LOG.debug("Rolling back HBase transaction");
@@ -2875,57 +2858,4 @@ public class HBaseStore implements RawStore {
     // TODO: see if it makes sense to implement this here
     return null;
   }
-
-  @Override
-  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
-    // TODO: Auto-generated method stub
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void updateTableWrite(MTableWrite tw) {
-    // TODO: Auto-generated method stub
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public MTableWrite getTableWrite(String dbName, String tblName, long writeId) {
-    // TODO: Auto-generated method stub
-    throw new UnsupportedOperationException();
-  }
-
-
-  @Override
-  public List<Long> getTableWriteIds(
-      String dbName, String tblName, long watermarkId, long nextWriteId, char state) {
-    // TODO: Auto-generated method stub
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
-    // TODO: Auto-generated method stub
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public List<MTableWrite> getTableWrites(String dbName, String tblName,
-      long from, long to) throws MetaException {
-    // TODO: Auto-generated method stub
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Collection<String> getAllPartitionLocations(String dbName,
-      String tblName) {
-    // TODO: Auto-generated method stub
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void deleteTableWrites(String dbName, String tblName, long from,
-      long to) throws MetaException {
-    // TODO: Auto-generated method stub
-    throw new UnsupportedOperationException();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java
----------------------------------------------------------------------
diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java
index 18e9cb3..3759348 100644
--- a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java
+++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java
@@ -36,8 +36,6 @@ public class MTable {
   private String viewExpandedText;
   private boolean rewriteEnabled;
   private String tableType;
-  private long mmNextWriteId;
-  private long mmWatermarkWriteId;
 
   public MTable() {}
 
@@ -58,8 +56,7 @@ public class MTable {
   public MTable(String tableName, MDatabase database, MStorageDescriptor sd, String owner,
       int createTime, int lastAccessTime, int retention, List<MFieldSchema> partitionKeys,
       Map<String, String> parameters,
-      String viewOriginalText, String viewExpandedText, boolean rewriteEnabled, String tableType, long mmNextWriteId,
-      long mmWatermarkWriteId) {
+      String viewOriginalText, String viewExpandedText, boolean rewriteEnabled, String tableType) {
     this.tableName = tableName;
     this.database = database;
     this.sd = sd;
@@ -73,8 +70,6 @@ public class MTable {
     this.viewExpandedText = viewExpandedText;
     this.rewriteEnabled = rewriteEnabled;
     this.tableType = tableType;
-    this.mmWatermarkWriteId = mmWatermarkWriteId;
-    this.mmNextWriteId = mmNextWriteId;
   }
 
   /**
@@ -258,20 +253,4 @@ public class MTable {
   public String getTableType() {
     return tableType;
   }
-
-  public long getMmNextWriteId() {
-    return mmNextWriteId;
-  }
-
-  public long getMmWatermarkWriteId() {
-    return mmWatermarkWriteId;
-  }
-
-  public void setMmNextWriteId(long mmNextWriteId) {
-    this.mmNextWriteId = mmNextWriteId;
-  }
-
-  public void setMmWatermarkWriteId(long mmWatermarkWriteId) {
-    this.mmWatermarkWriteId = mmWatermarkWriteId;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java
----------------------------------------------------------------------
diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java
deleted file mode 100644
index b7f398a..0000000
--- a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore.model;
-
-public class MTableWrite {
-  private MTable table;
-  private long writeId;
-  private String state;
-  private long lastHeartbeat;
-  private long created;
-
-  public MTableWrite() {}
-
-  public MTableWrite(MTable table, long writeId, String state, long lastHeartbeat, long created) {
-    this.table = table;
-    this.writeId = writeId;
-    this.state = state;
-    this.lastHeartbeat = lastHeartbeat;
-    this.created = created;
-  }
-
-  public MTable getTable() {
-    return table;
-  }
-
-  public long getWriteId() {
-    return writeId;
-  }
-
-  public String getState() {
-    return state;
-  }
-
-  public long getLastHeartbeat() {
-    return lastHeartbeat;
-  }
-
-  public long getCreated() {
-    return created;
-  }
-
-  public void setTable(MTable table) {
-    this.table = table;
-  }
-
-  public void setWriteId(long writeId) {
-    this.writeId = writeId;
-  }
-
-  public void setState(String state) {
-    this.state = state;
-  }
-
-  public void setLastHeartbeat(long lastHeartbeat) {
-    this.lastHeartbeat = lastHeartbeat;
-  }
-
-  public void setCreated(long created) {
-    this.created = created;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/model/package.jdo
----------------------------------------------------------------------
diff --git a/metastore/src/model/package.jdo b/metastore/src/model/package.jdo
index 67e2c20..5a91b24 100644
--- a/metastore/src/model/package.jdo
+++ b/metastore/src/model/package.jdo
@@ -185,12 +185,6 @@
       <field name="tableType">
         <column name="TBL_TYPE" length="128" jdbc-type="VARCHAR"/>
       </field>
-      <field name="mmNextWriteId">
-        <column name="MM_NEXT_WRITE_ID" jdbc-type="BIGINT" default-value="0" />
-      </field>
-      <field name="mmWatermarkWriteId">
-        <column name="MM_WATERMARK_WRITE_ID" jdbc-type="BIGINT" default-value="-1" />
-      </field>
     </class>
 
     <class name="MConstraint" identity-type="application" table="KEY_CONSTRAINTS" detachable="true" objectid-class="MConstraint$PK">
@@ -1070,33 +1064,6 @@
       </field>
     </class>
 
-    <!-- using datastore identity here, cause composite application PKs are a PITA -->
-    <class name="MTableWrite" table="TBL_WRITES" identity-type="datastore" detachable="true">
-      <datastore-identity>
-        <column name="TW_ID"/>
-      </datastore-identity>
-      <index name="UniqueWrite" unique="true">
-        <column name="TBL_ID"/>
-        <column name="WRITE_ID"/>
-      </index>
-      <field name="writeId">
-        <column name="WRITE_ID" jdbc-type="BIGINT" allows-null="false"/>
-      </field>
-      <field name="table">
-        <column name="TBL_ID"/>
-      </field>
-      <field name="state">
-        <column name="STATE" length="1" jdbc-type="CHAR" allows-null="false"/>
-      </field>
-      <field name="created">
-        <column name="CREATED" jdbc-type="BIGINT" allows-null="false"/>
-      </field>
-      <field name="lastHeartbeat">
-        <column name="LAST_HEARTBEAT" jdbc-type="BIGINT" allows-null="false"/>
-      </field>
-    </class>
-
-
   </package>
 </jdo>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 7760bc7..2d19f6b 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -20,14 +20,12 @@ package org.apache.hadoop.hive.metastore;
 
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -60,7 +58,6 @@ import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
@@ -879,52 +876,4 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
     // TODO Auto-generated method stub
     return null;
   }
-
-  @Override
-  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
-  }
-
-  @Override
-  public void updateTableWrite(MTableWrite tw) {
-
-  }
-
-  @Override
-  public MTableWrite getTableWrite(String dbName, String tblName, long writeId) {
-    return null;
-  }
-
-  @Override
-  @CanNotRetry
-  public Boolean commitTransactionExpectDeadlock() {
-    return null;
-  }
-
-  @Override
-  public List<Long> getTableWriteIds(
-      String dbName, String tblName, long watermarkId, long nextWriteId, char state) {
-    return null;
-  }
-
-  @Override
-  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
-    return null;
-  }
-
-  @Override
-  public List<MTableWrite> getTableWrites(String dbName, String tblName,
-      long from, long to) throws MetaException {
-    return null;
-  }
-
-  @Override
-  public Collection<String> getAllPartitionLocations(String dbName,
-      String tblName) {
-    return null;
-  }
-
-  @Override
-  public void deleteTableWrites(String dbName, String tblName, long from,
-      long to) throws MetaException {
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index df05af1..bcdbedb 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.metastore;
 
 import java.nio.ByteBuffer;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
@@ -28,7 +27,6 @@ import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -61,7 +59,6 @@ import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
@@ -111,12 +108,6 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   }
 
   @Override
-  @CanNotRetry
-  public Boolean commitTransactionExpectDeadlock() {
-    return false;
-  }
-
-  @Override
   public void rollbackTransaction() {
   }
 
@@ -898,47 +889,6 @@ public class DummyRawStoreForJdoConnection implements RawStore {
     // TODO Auto-generated method stub
     return null;
   }
-
-  @Override
-  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
-  }
-
-  @Override
-  public void updateTableWrite(MTableWrite tw) {
-  }
-
-  @Override
-  public MTableWrite getTableWrite(String dbName, String tblName, long writeId) {
-    return null;
-  }
-
-  @Override
-  public List<Long> getTableWriteIds(
-      String dbName, String tblName, long watermarkId, long nextWriteId, char state) {
-    return null;
-  }
-
-  @Override
-  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
-    return null;
-  }
-
-  @Override
-  public List<MTableWrite> getTableWrites(String dbName, String tblName,
-      long from, long to) throws MetaException {
-    return null;
-  }
-
-  @Override
-  public Collection<String> getAllPartitionLocations(String dbName,
-      String tblName) {
-    return null;
-  }
-
-  @Override
-  public void deleteTableWrites(String dbName, String tblName, long from,
-      long to) throws MetaException {
-  }
 }
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 42bc8b2..e0302fb 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -17,22 +17,18 @@
  */
 package org.apache.hadoop.hive.metastore;
 
-import static org.junit.Assert.*;
-
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;
 import org.apache.hadoop.hive.common.metrics.metrics2.MetricsReporting;
 import org.apache.hadoop.hive.common.metrics.MetricsTestUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -54,14 +50,10 @@ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hive.common.util.MockFileSystem;
-import org.apache.hive.common.util.MockFileSystem.MockFile;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -245,151 +237,6 @@ public class TestObjectStore {
     objectStore.dropDatabase(DB1);
   }
 
-
-  /**
-   * Test table operations
-   */
-  @Test
-  public void testMmCleaner() throws Exception {
-    HiveConf conf = new HiveConf();
-    conf.set(ConfVars.HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT.varname, "3ms");
-    conf.set(ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT.varname, "20ms");
-    conf.set(ConfVars.HIVE_METASTORE_MM_ABORTED_GRACE_PERIOD.varname, "5ms");
-    conf.set("fs.mock.impl", MockFileSystem.class.getName());
-
-    MockFileSystem mfs = (MockFileSystem)(new Path("mock:///").getFileSystem(conf));
-    mfs.clear();
-    mfs.allowDelete = true;
-    // Don't add the files just yet...
-    MockFile[] files = new MockFile[9];
-    for (int i = 0; i < files.length; ++i) {
-      files[i] = new MockFile("mock:/foo/mm_" + i + "/1", 0, new byte[0]);
-    }
-
-    LongSupplier time = new LongSupplier();
-
-    MmCleanerThread mct = new MmCleanerThread(0);
-    mct.setHiveConf(conf);
-    mct.overrideTime(time);
-
-    Database db1 = new Database(DB1, "description", "locationurl", null);
-    objectStore.createDatabase(db1);
-    StorageDescriptor sd = createFakeSd("mock:/foo");
-    HashMap<String,String> params = new HashMap<String,String>();
-    params.put("EXTERNAL", "false");
-    params.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
-    params.put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, "insert_only");
-    Table tbl = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd,
-        null, params, null, null, "MANAGED_TABLE");
-    objectStore.createTable(tbl);
-
-    // Add write #0 so the watermark wouldn't advance; skip write #1, add #2 at 0, skip #3
-    createCompleteTableWrite(mfs, files, 0, time, tbl, HiveMetaStore.MM_WRITE_OPEN);
-    mfs.addFile(files[1]);
-    createCompleteTableWrite(mfs, files, 2, time, tbl, HiveMetaStore.MM_WRITE_OPEN);
-    mfs.addFile(files[3]);
-    tbl.setMmNextWriteId(4);
-    objectStore.alterTable(DB1, TABLE1, tbl);
-
-    mct.runOneIteration(objectStore);
-    List<Long> writes = getAbortedWrites();
-    assertEquals(0, writes.size()); // Missing write is not aborted before timeout.
-    time.value = 4; // Advance time.
-    mct.runOneIteration(objectStore);
-    writes = getAbortedWrites();
-    assertEquals(1, writes.size()); // Missing write is aborted after timeout.
-    assertEquals(1L, writes.get(0).longValue());
-    checkDeletedSet(files, 1);
-    // However, write #3 was not aborted as we cannot determine when it will time out.
-    createCompleteTableWrite(mfs, files, 4, time, tbl, HiveMetaStore.MM_WRITE_OPEN);
-    time.value = 8;
-    // It will now be aborted, since we have a following write.
-    mct.runOneIteration(objectStore);
-    writes = getAbortedWrites();
-    assertEquals(2, writes.size());
-    assertTrue(writes.contains(Long.valueOf(3)));
-    checkDeletedSet(files, 1, 3);
-
-    // Commit #0 and #2 and confirm that the watermark advances.
-    // It will only advance over #1, since #3 was aborted at 8 and grace period has not passed.
-    time.value = 10;
-    MTableWrite tw = objectStore.getTableWrite(DB1, TABLE1, 0);
-    tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_COMMITTED));
-    objectStore.updateTableWrite(tw);
-    tw = objectStore.getTableWrite(DB1, TABLE1, 2);
-    tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_COMMITTED));
-    objectStore.updateTableWrite(tw);
-    mct.runOneIteration(objectStore);
-    writes = getAbortedWrites();
-    assertEquals(1, writes.size());
-    assertEquals(3L, writes.get(0).longValue());
-    tbl = objectStore.getTable(DB1, TABLE1);
-    assertEquals(2L, tbl.getMmWatermarkWriteId());
-
-    // Now advance the time and see that watermark also advances over #3.
-    time.value = 16;
-    mct.runOneIteration(objectStore);
-    writes = getAbortedWrites();
-    assertEquals(0, writes.size());
-    tbl = objectStore.getTable(DB1, TABLE1);
-    assertEquals(3L, tbl.getMmWatermarkWriteId());
-
-    // Check that the open write gets aborted after some time; then the watermark advances.
-    time.value = 25;
-    mct.runOneIteration(objectStore);
-    writes = getAbortedWrites();
-    assertEquals(1, writes.size());
-    assertEquals(4L, writes.get(0).longValue());
-    time.value = 31;
-    mct.runOneIteration(objectStore);
-    tbl = objectStore.getTable(DB1, TABLE1);
-    assertEquals(4L, tbl.getMmWatermarkWriteId());
-    checkDeletedSet(files, 1, 3, 4); // The other two should still be deleted.
-
-    // Finally check that we cannot advance watermark if cleanup fails for some file.
-    createCompleteTableWrite(mfs, files, 5, time, tbl, HiveMetaStore.MM_WRITE_ABORTED);
-    createCompleteTableWrite(mfs, files, 6, time, tbl, HiveMetaStore.MM_WRITE_ABORTED);
-    createCompleteTableWrite(mfs, files, 7, time, tbl, HiveMetaStore.MM_WRITE_COMMITTED);
-    createCompleteTableWrite(mfs, files, 8, time, tbl, HiveMetaStore.MM_WRITE_ABORTED);
-    time.value = 37; // Skip the grace period.
-    files[6].cannotDelete = true;
-    mct.runOneIteration(objectStore);
-    checkDeletedSet(files, 1, 3, 4, 5, 8); // The other two should still be deleted.
-    tbl = objectStore.getTable(DB1, TABLE1);
-    assertEquals(5L, tbl.getMmWatermarkWriteId()); // Watermark only goes up to 5.
-    files[6].cannotDelete = false;
-    mct.runOneIteration(objectStore);
-    checkDeletedSet(files, 1, 3, 4, 5, 6, 8);
-    tbl = objectStore.getTable(DB1, TABLE1);
-    assertEquals(8L, tbl.getMmWatermarkWriteId()); // Now it advances all the way.
-
-    objectStore.dropTable(DB1, TABLE1);
-    objectStore.dropDatabase(DB1);
-  }
-
-  private void createCompleteTableWrite(MockFileSystem mfs, MockFile[] files,
-      int id, LongSupplier time, Table tbl, char state) throws MetaException, InvalidObjectException {
-    objectStore.createTableWrite(tbl, id, state, time.value);
-    mfs.addFile(files[id]);
-    tbl.setMmNextWriteId(id + 1);
-    objectStore.alterTable(DB1, TABLE1, tbl);
-  }
-
-  private void checkDeletedSet(MockFile[] files, int... deleted) {
-    for (int id : deleted) {
-      assertTrue("File " + id + " not deleted", files[id].isDeleted);
-    }
-    int count = 0;
-    for (MockFile file : files) {
-      if (file.isDeleted) ++count;
-    }
-    assertEquals(deleted.length, count); // Make sure nothing else is deleted.
-  }
-
-  private List<Long> getAbortedWrites() throws MetaException {
-    return objectStore.getTableWriteIds(DB1, TABLE1, -1, 10, HiveMetaStore.MM_WRITE_ABORTED);
-  }
-
   private StorageDescriptor createFakeSd(String location) {
     return new StorageDescriptor(null, location, null, null, false, 0,
         new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 7b74bd5..28fe420 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -39,12 +39,10 @@ import java.util.concurrent.locks.ReentrantLock;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.common.metrics.common.Metrics;
 import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
@@ -52,11 +50,8 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.conf.HiveVariableSource;
 import org.apache.hadoop.hive.conf.VariableSubstitution;
-import org.apache.hadoop.hive.metastore.LockComponentBuilder;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.api.DataOperationType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.LockComponent;
 import org.apache.hadoop.hive.metastore.api.Schema;
 import org.apache.hadoop.hive.ql.exec.ConditionalTask;
 import org.apache.hadoop.hive.ql.exec.ExplainTask;
@@ -78,7 +73,6 @@ import org.apache.hadoop.hive.ql.hooks.PostExecute;
 import org.apache.hadoop.hive.ql.hooks.PreExecute;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
 import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
 import org.apache.hadoop.hive.ql.lockmgr.LockException;
@@ -1540,13 +1534,6 @@ public class Driver implements CommandProcessor {
           return rollback(createProcessorResponse(ret));
         }
       }
-
-      try {
-        acquireWriteIds(plan, conf);
-      } catch (HiveException e) {
-        return handleHiveException(e, 1);
-      }
-
       ret = execute(true);
       if (ret != 0) {
         //if needRequireLock is false, the release here will do nothing because there is no lock
@@ -1608,48 +1595,6 @@ public class Driver implements CommandProcessor {
     }
   }
 
-
-  private static void acquireWriteIds(QueryPlan plan, HiveConf conf) throws HiveException {
-    // Output IDs are put directly into FileSinkDesc; here, we only need to take care of inputs.
-    Configuration fetchConf = null;
-    if (plan.getFetchTask() != null) {
-      fetchConf = plan.getFetchTask().getFetchConf();
-    }
-    for (ReadEntity input : plan.getInputs()) {
-      Utilities.LOG14535.debug("Looking at " + input);
-      Table t = extractTable(input);
-      if (t == null) continue;
-      Utilities.LOG14535.info("Checking " + t.getTableName() + " for being a MM table: " + t.getParameters());
-      if (!MetaStoreUtils.isInsertOnlyTable(t.getParameters())) {
-        ValidWriteIds.clearConf(conf, t.getDbName(), t.getTableName());
-        if (fetchConf != null) {
-          ValidWriteIds.clearConf(fetchConf, t.getDbName(), t.getTableName());
-        }
-        continue;
-      }
-      ValidWriteIds ids = Hive.get().getValidWriteIdsForTable(t.getDbName(), t.getTableName());
-      ids.addToConf(conf, t.getDbName(), t.getTableName());
-      if (fetchConf != null) {
-        ids.addToConf(fetchConf, t.getDbName(), t.getTableName());
-      }
-    }
-  }
-
-  private static Table extractTable(ReadEntity input) {
-    Table t = null;
-    switch (input.getType()) {
-      case TABLE:
-        t = input.getTable();
-        break;
-      case DUMMYPARTITION:
-      case PARTITION:
-        t = input.getPartition().getTable();
-        break;
-      default: return null;
-    }
-    return (t != null && !t.isTemporary()) ? t : null;
-  }
-
   private CommandProcessorResponse rollback(CommandProcessorResponse cpr) {
     //console.printError(cpr.toString());
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
index 1315b99..7ef4f49 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
@@ -89,7 +89,7 @@ public abstract class AbstractFileMergeOperator<T extends FileMergeDesc>
         .isListBucketingAlterTableConcatenate();
     listBucketingDepth = conf.getListBucketingDepth();
     Path specPath = conf.getOutputPath();
-    isMmTable = conf.getMmWriteId() != null;
+    isMmTable = conf.getTxnId() != null;
     if (isMmTable) {
       updatePaths(specPath, null);
     } else {
@@ -246,7 +246,7 @@ public abstract class AbstractFileMergeOperator<T extends FileMergeDesc>
           // There's always just one file that we have merged.
           // The union/DP/etc. should already be account for in the path.
           Utilities.writeMmCommitManifest(Lists.newArrayList(outPath),
-              tmpPath.getParent(), fs, taskId, conf.getMmWriteId(), null);
+              tmpPath.getParent(), fs, taskId, conf.getTxnId(), conf.getStmtId(), null);
           LOG.info("Merged into " + finalPath + "(" + fss.getLen() + " bytes).");
         }
       }
@@ -280,7 +280,8 @@ public abstract class AbstractFileMergeOperator<T extends FileMergeDesc>
     try {
       Path outputDir = conf.getOutputPath();
       FileSystem fs = outputDir.getFileSystem(hconf);
-      Long mmWriteId = conf.getMmWriteId();
+      Long mmWriteId = conf.getTxnId();
+      int stmtId = conf.getStmtId();
       if (mmWriteId == null) {
         Path backupPath = backupOutputPath(fs, outputDir);
         Utilities.mvFileToFinalPath(
@@ -297,7 +298,7 @@ public abstract class AbstractFileMergeOperator<T extends FileMergeDesc>
         // We don't expect missing buckets from mere (actually there should be no buckets),
         // so just pass null as bucketing context. Union suffix should also be accounted for.
         Utilities.handleMmTableFinalPath(outputDir.getParent(), null, hconf, success,
-            dpLevels, lbLevels, null, mmWriteId, reporter, false);
+            dpLevels, lbLevels, null, mmWriteId, stmtId, reporter, false);
       }
 
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
index 82f6074..1f223f5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
@@ -18,12 +18,12 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.hive.common.JavaUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
@@ -31,10 +31,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.ValidWriteIds;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.DriverContext;
-import org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.plan.CopyWork;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.util.StringUtils;
@@ -112,7 +109,7 @@ public class CopyTask extends Task<CopyWork> implements Serializable {
     if (!fs.exists(path)) return null;
     if (!isSourceMm) return matchFilesOneDir(fs, path, null);
     // TODO: this doesn't handle list bucketing properly. Does the original exim do that?
-    FileStatus[] mmDirs = fs.listStatus(path, new ValidWriteIds.AnyIdDirFilter());
+    FileStatus[] mmDirs = fs.listStatus(path, new JavaUtils.AnyIdDirFilter());
     if (mmDirs == null || mmDirs.length == 0) return null;
     List<FileStatus> allFiles = new ArrayList<FileStatus>();
     for (FileStatus mmDir : mmDirs) {

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 81e4744..a2186cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -57,8 +57,10 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidWriteIds;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
+import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -98,7 +100,6 @@ import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.TxnInfo;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.Context;
@@ -4035,7 +4036,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
           + " to false for this query if you want to force the conversion.");
     }
     Hive db = getHive();
-    ValidWriteIds ids = db.getValidWriteIdsForTable(tbl.getDbName(), tbl.getTableName());
+    String value = conf.get(ValidTxnList.VALID_TXNS_KEY);
+    ValidTxnList validTxnList = value == null ? new ValidReadTxnList() : new ValidReadTxnList(value);
     if (tbl.getPartitionKeys().size() > 0) {
       PartitionIterable parts = new PartitionIterable(db, tbl, null,
           HiveConf.getIntVar(conf, ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
@@ -4043,15 +4045,15 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       while (partIter.hasNext()) {
         Partition part = partIter.next();
         checkMmLb(part);
-        handleRemoveMm(part.getDataLocation(), ids, allMmDirs);
+        handleRemoveMm(part.getDataLocation(), validTxnList, allMmDirs);
       }
     } else {
       checkMmLb(tbl);
-      handleRemoveMm(tbl.getDataLocation(), ids, allMmDirs);
+      handleRemoveMm(tbl.getDataLocation(), validTxnList, allMmDirs);
     }
     List<Path> targetPaths = new ArrayList<>(allMmDirs.size());
     List<String> targetPrefix = new ArrayList<>(allMmDirs.size());
-    int prefixLen = ValidWriteIds.MM_PREFIX.length();
+    int prefixLen = JavaUtils.DELTA_PREFIX.length();
     for (int i = 0; i < allMmDirs.size(); ++i) {
       Path src = allMmDirs.get(i);
       Path tgt = src.getParent();
@@ -4082,7 +4084,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   }
 
   private void handleRemoveMm(
-      Path path, ValidWriteIds ids, List<Path> result) throws HiveException {
+      Path path, ValidTxnList validTxnList, List<Path> result) throws HiveException {
     // Note: doesn't take LB into account; that is not presently supported here (throws above).
     try {
       FileSystem fs = path.getFileSystem(conf);
@@ -4092,10 +4094,10 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
           ensureDelete(fs, childPath, "a non-directory file");
           continue;
         }
-        Long writeId = ValidWriteIds.extractWriteId(childPath);
+        Long writeId = JavaUtils.extractTxnId(childPath);
         if (writeId == null) {
           ensureDelete(fs, childPath, "an unknown directory");
-        } else if (!ids.isValid(writeId)) {
+        } else if (!validTxnList.isTxnValid(writeId)) {
           // Assume no concurrent active writes - we rely on locks here. We could check and fail.
           ensureDelete(fs, childPath, "an uncommitted directory");
         } else {
@@ -4122,9 +4124,19 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     // We will move all the files in the table/partition directories into the first MM
     // directory, then commit the first write ID.
     List<Path> srcs = new ArrayList<>(), tgts = new ArrayList<>();
+    long mmWriteId = 0;
+    try {
+      HiveTxnManager txnManager = SessionState.get().getTxnMgr();
+      mmWriteId = txnManager.openTxn(new Context(conf), conf.getUser());
+      txnManager.commitTxn();
+    } catch (Exception e) {
+      String errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage();
+      console.printError(errorMessage, "\n"
+          + org.apache.hadoop.util.StringUtils.stringifyException(e));
+    }
+    int stmtId = 0;
+    String mmDir = AcidUtils.deltaSubdir(mmWriteId, mmWriteId, stmtId);
     Hive db = getHive();
-    long mmWriteId = db.getNextTableWriteId(tbl.getDbName(), tbl.getTableName());
-    String mmDir = ValidWriteIds.getMmFilePrefix(mmWriteId);
     if (tbl.getPartitionKeys().size() > 0) {
       PartitionIterable parts = new PartitionIterable(db, tbl, null,
           HiveConf.getIntVar(conf, ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
@@ -4147,15 +4159,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     // Don't set inputs and outputs - the locks have already been taken so it's pointless.
     MoveWork mw = new MoveWork(null, null, null, null, false);
     mw.setMultiFilesDesc(new LoadMultiFilesDesc(srcs, tgts, true, null, null));
-    ImportCommitWork icw = new ImportCommitWork(tbl.getDbName(), tbl.getTableName(), mmWriteId);
-    // TODO# this is hacky and will be gone with ACID. The problem is getting the write ID above
-    //       modifies the table, but the table object above is preserved and modified without
-    //       getting this change, so saving it will overwrite write ID. Ideally, when we save
-    //       only specific fields, and not overwrite write ID every time we alter table.
-    //       There's probably some way in DN to achieve that, but for now let's just update the
-    //       original object here. This is safe due to DDL lock and the fact that converting
-    //       the table to MM here from non-MM should mean no concurrent write ID updates.
-    tbl.setMmNextWriteId(mmWriteId + 1);
+    ImportCommitWork icw = new ImportCommitWork(tbl.getDbName(), tbl.getTableName(), mmWriteId, stmtId);
     Task<?> mv = TaskFactory.get(mw, conf), ic = TaskFactory.get(icw, conf);
     mv.addDependentTask(ic);
     return Lists.<Task<?>>newArrayList(mv);
@@ -4568,20 +4572,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       Long mmWriteId = crtTbl.getInitialMmWriteId();
       if (crtTbl.isCTAS() || mmWriteId != null) {
         Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName());
-        if (mmWriteId != null) {
-          // TODO# this would be retrieved via ACID before the query runs; for now we rely on it
-          //       being zero at start; we can't create a write ID before we create the table here.
-          long initialWriteId = db.getNextTableWriteId(tbl.getDbName(), tbl.getTableName());
-          if (initialWriteId != mmWriteId) {
-            throw new HiveException("Initial write ID mismatch - expected " + mmWriteId
-                + " but got " + initialWriteId);
-          }
-          // CTAS create the table on a directory that already exists; import creates the table
-          // first  (in parallel with copies?), then commits after all the loads.
-          if (crtTbl.isCTAS()) {
-            db.commitMmTableWrite(tbl, initialWriteId);
-          }
-        }
         if (crtTbl.isCTAS()) {
           DataContainer dc = new DataContainer(createdTable.getTTable());
           SessionState.get().getLineageState().setLineage(

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index a3e4c9f..40330fa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -36,10 +36,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader;
@@ -129,7 +128,6 @@ public class FetchOperator implements Serializable {
 
   private transient StructObjectInspector outputOI;
   private transient Object[] row;
-  private transient Map<String, ValidWriteIds> writeIdMap;
 
   public FetchOperator(FetchWork work, JobConf job) throws HiveException {
     this(work, job, null, null);
@@ -276,7 +274,7 @@ public class FetchOperator implements Serializable {
       }
       FileSystem fs = currPath.getFileSystem(job);
       if (fs.exists(currPath)) {
-        if (extractWriteIdsForCurrentTable() != null) {
+        if (extractValidTxnList() != null) {
           return true;
         }
         for (FileStatus fStat : listStatusUnderPath(fs, currPath)) {
@@ -407,12 +405,12 @@ public class FetchOperator implements Serializable {
     if (inputFormat instanceof HiveInputFormat) {
       return StringUtils.escapeString(currPath.toString()); // No need to process here.
     }
-    ValidWriteIds ids = extractWriteIdsForCurrentTable();
-    if (ids != null) {
-      Utilities.LOG14535.info("Observing " + currDesc.getTableName() + ": " + ids);
+    ValidTxnList validTxnList = extractValidTxnList();
+    if (validTxnList != null) {
+      Utilities.LOG14535.info("Observing " + currDesc.getTableName() + ": " + validTxnList);
     }
 
-    Path[] dirs = HiveInputFormat.processPathsForMmRead(Lists.newArrayList(currPath), job, ids);
+    Path[] dirs = HiveInputFormat.processPathsForMmRead(Lists.newArrayList(currPath), job, validTxnList);
     if (dirs == null || dirs.length == 0) {
       return null; // No valid inputs. This condition is logged inside the call.
     }
@@ -423,11 +421,16 @@ public class FetchOperator implements Serializable {
     return str.toString();
   }
 
-  private ValidWriteIds extractWriteIdsForCurrentTable() {
-    if (writeIdMap == null) {
-      writeIdMap = new HashMap<String, ValidWriteIds>();
+  private ValidTxnList extractValidTxnList() {
+    ValidTxnList validTxnList;
+    if (org.apache.commons.lang.StringUtils.isBlank(currDesc.getTableName())) {
+      validTxnList = null; // i.e. not fetching from a table directly but from a temp location
+    } else {
+      String txnString = job.get(ValidTxnList.VALID_TXNS_KEY);
+      validTxnList = txnString == null ? new ValidReadTxnList() :
+          new ValidReadTxnList(txnString);
     }
-    return HiveInputFormat.extractWriteIds(writeIdMap, job, currDesc.getTableName());
+    return validTxnList;
   }
 
   private FetchInputFormatSplit[] splitSampling(SplitSample splitSample,

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
index bd822df..f6d27fb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
@@ -193,9 +193,4 @@ public class FetchTask extends Task<FetchWork> implements Serializable {
       fetch.clearFetchContext();
     }
   }
-
-  public Configuration getFetchConf() {
-    return fetch.getJobConf();
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 8febcc0..481b907 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -33,16 +33,11 @@ import java.util.Properties;
 import java.util.Set;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConfUtil;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -176,6 +171,8 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
     int acidLastBucket = -1;
     int acidFileOffset = -1;
     private boolean isMmTable;
+    private Long txnId;
+    private int stmtId;
 
     public FSPaths(Path specPath, boolean isMmTable) {
       this.isMmTable = isMmTable;
@@ -185,6 +182,8 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       } else {
         tmpPath = specPath;
         taskOutputTempPath = null; // Should not be used.
+        txnId = conf.getTransactionId();
+        stmtId = conf.getStatementId();
       }
       Utilities.LOG14535.info("new FSPaths for " + numFiles + " files, dynParts = " + bDynParts
           + ": tmpPath " + tmpPath + ", task path " + taskOutputTempPath
@@ -327,7 +326,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
           }
           outPaths[filesIdx] = getTaskOutPath(taskId);
         } else {
-          String subdirPath = ValidWriteIds.getMmFilePrefix(conf.getMmWriteId());
+          String subdirPath = AcidUtils.deltaSubdir(txnId, txnId, stmtId);
           if (unionPath != null) {
             // Create the union directory inside the MM directory.
             subdirPath += Path.SEPARATOR + unionPath;
@@ -731,10 +730,9 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       Utilities.copyTableJobPropertiesToConf(conf.getTableInfo(), jc);
       // only create bucket files only if no dynamic partitions,
       // buckets of dynamic partitions will be created for each newly created partition
-      if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID ||
-          conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) {
+      if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || conf.isMmTable()) {
         Path outPath = fsp.outPaths[filesIdx];
-        if ((conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY || conf.isMmTable())
+        if (conf.isMmTable()
             && !FileUtils.mkdir(fs, outPath.getParent(), hconf)) {
           LOG.warn("Unable to create directory with inheritPerms: " + outPath);
         }
@@ -880,8 +878,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       // for a given operator branch prediction should work quite nicely on it.
       // RecordUpdateer expects to get the actual row, not a serialized version of it.  Thus we
       // pass the row rather than recordValue.
-      if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID ||
-          conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) {
+      if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || conf.isMmTable()) {
         rowOutWriters[writerOffset].write(recordValue);
       } else if (conf.getWriteType() == AcidUtils.Operation.INSERT) {
         fpaths.updaters[writerOffset].insert(conf.getTransactionId(), row);
@@ -925,8 +922,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
   protected boolean areAllTrue(boolean[] statsFromRW) {
     // If we are doing an acid operation they will always all be true as RecordUpdaters always
     // collect stats
-    if (conf.getWriteType() != AcidUtils.Operation.NOT_ACID &&
-        conf.getWriteType() != AcidUtils.Operation.INSERT_ONLY) {
+    if (conf.getWriteType() != AcidUtils.Operation.NOT_ACID && !conf.isMmTable()) {
       return true;
     }
     for(boolean b : statsFromRW) {
@@ -1070,8 +1066,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
           // stats from the record writer and store in the previous fsp that is cached
           if (conf.isGatherStats() && isCollectRWStats) {
             SerDeStats stats = null;
-            if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID ||
-                conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) {
+            if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || conf.isMmTable()) {
               RecordWriter outWriter = prevFsp.outWriters[0];
               if (outWriter != null) {
                 stats = ((StatsProvidingRecordWriter) outWriter).getStats();
@@ -1173,8 +1168,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         // record writer already gathers the statistics, it can simply return the
         // accumulated statistics which will be aggregated in case of spray writers
         if (conf.isGatherStats() && isCollectRWStats) {
-          if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID ||
-              conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) {
+          if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || conf.isMmTable()) {
             for (int idx = 0; idx < fsp.outWriters.length; idx++) {
               RecordWriter outWriter = fsp.outWriters[idx];
               if (outWriter != null) {
@@ -1204,7 +1198,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       }
       if (conf.getMmWriteId() != null) {
         Utilities.writeMmCommitManifest(
-            commitPaths, specPath, fs, taskId, conf.getMmWriteId(), unionPath);
+            commitPaths, specPath, fs, taskId, conf.getMmWriteId(), conf.getStatementId(), unionPath);
       }
       // Only publish stats if this operator's flag was set to gather stats
       if (conf.isGatherStats()) {
@@ -1260,7 +1254,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
           MissingBucketsContext mbc = new MissingBucketsContext(
               conf.getTableInfo(), numBuckets, conf.getCompressed());
           Utilities.handleMmTableFinalPath(specPath, unionSuffix, hconf, success,
-              dpLevels, lbLevels, mbc, conf.getMmWriteId(), reporter, conf.isMmCtas());
+              dpLevels, lbLevels, mbc, conf.getMmWriteId(), conf.getStatementId(), reporter, conf.isMmCtas());
         }
       }
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java
index ba009b9..27db9a4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.hive.ql.exec;
 
 import org.apache.hadoop.hive.ql.DriverContext;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.util.StringUtils;
@@ -35,16 +33,13 @@ public class ImportCommitTask extends Task<ImportCommitWork> {
 
   @Override
   public int execute(DriverContext driverContext) {
-    Utilities.LOG14535.info("Executing ImportCommit for " + work.getMmWriteId());
+    Utilities.LOG14535.info("Executing ImportCommit for " + work.getTxnId());
 
     try {
       if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
         Utilities.LOG14535.info("Exiting due to explain");
         return 0;
       }
-      Hive db = getHive();
-      Table tbl = db.getTable(work.getDbName(), work.getTblName());
-      db.commitMmTableWrite(tbl, work.getMmWriteId());
       return 0;
     } catch (Exception e) {
       console.printError("Failed with exception " + e.getMessage(), "\n"

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java
index f62d237..5b59635 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java
@@ -26,16 +26,22 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level;
 public class ImportCommitWork implements Serializable {
   private static final long serialVersionUID = 1L;
   private String dbName, tblName;
-  private long mmWriteId;
+  private long txnId;
+  private int stmtId;
 
-  public ImportCommitWork(String dbName, String tblName, long mmWriteId) {
-    this.mmWriteId = mmWriteId;
+  public ImportCommitWork(String dbName, String tblName, long txnId, int stmtId) {
+    this.txnId = txnId;
+    this.stmtId = stmtId;
     this.dbName = dbName;
     this.tblName = tblName;
   }
 
-  public long getMmWriteId() {
-    return mmWriteId;
+  public long getTxnId() {
+    return txnId;
+  }
+
+  public int getStmtId() {
+    return stmtId;
   }
 
   public String getDbName() {

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index acf7404..c68fc0e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.model.MMasterKey;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
@@ -340,10 +339,10 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
 
         checkFileFormats(db, tbd, table);
 
-        boolean isAcid = work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&
-            work.getLoadTableWork().getWriteType() != AcidUtils.Operation.INSERT_ONLY;
-        if (tbd.isMmTable() && isAcid) {
-           throw new HiveException("ACID and MM are not supported");
+        boolean isFullAcidOp = work.getLoadTableWork().getWriteType() == AcidUtils.Operation.UPDATE ||
+            work.getLoadTableWork().getWriteType() == AcidUtils.Operation.DELETE;
+        if (tbd.isMmTable() && isFullAcidOp) {
+           throw new HiveException("UPDATE and DELETE operations are not supported for MM table");
         }
 
         // Create a data container
@@ -356,8 +355,8 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
                 "Only single-partition LoadTableDesc can skip commiting write ID");
           }
           db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getReplace(),
-              work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isAcid, hasFollowingStatsTask(),
-              tbd.getMmWriteId());
+              work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isFullAcidOp, hasFollowingStatsTask(),
+              tbd.getTxnId(), tbd.getStmtId());
           if (work.getOutputs() != null) {
             DDLTask.addIfAbsentByName(new WriteEntity(table,
               getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs());
@@ -414,13 +413,12 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
     db.validatePartitionNameCharacters(partVals);
     Utilities.LOG14535.info("loadPartition called from " + tbd.getSourcePath()
         + " into " + tbd.getTable().getTableName());
-    boolean isCommitMmWrite = tbd.isCommitMmWrite();
     db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(),
         tbd.getPartitionSpec(), tbd.getReplace(),
         tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(),
-        (work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&
-         work.getLoadTableWork().getWriteType() != AcidUtils.Operation.INSERT_ONLY),
-        hasFollowingStatsTask(), tbd.getMmWriteId(), isCommitMmWrite);
+        work.getLoadTableWork().getWriteType() == AcidUtils.Operation.UPDATE ||
+            work.getLoadTableWork().getWriteType() == AcidUtils.Operation.DELETE,
+        hasFollowingStatsTask(), tbd.getTxnId(), tbd.getStmtId());
     Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
 
     // See the comment inside updatePartitionBucketSortColumns.
@@ -464,11 +462,10 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
         tbd.getReplace(),
         dpCtx.getNumDPCols(),
         (tbd.getLbCtx() == null) ? 0 : tbd.getLbCtx().calculateListBucketingLevel(),
-        work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&
-            work.getLoadTableWork().getWriteType() != AcidUtils.Operation.INSERT_ONLY,
-        SessionState.get().getTxnMgr().getCurrentTxnId(), hasFollowingStatsTask(),
-        work.getLoadTableWork().getWriteType(),
-        tbd.getMmWriteId());
+        work.getLoadTableWork().getWriteType() == AcidUtils.Operation.UPDATE ||
+            work.getLoadTableWork().getWriteType() == AcidUtils.Operation.DELETE,
+        SessionState.get().getTxnMgr().getCurrentTxnId(), tbd.getStmtId(), hasFollowingStatsTask(),
+        work.getLoadTableWork().getWriteType());
 
     // publish DP columns to its subscribers
     if (dps != null && dps.size() > 0) {


[05/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
index 762d946..5bb52b4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
@@ -42,7 +42,8 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
   // Need to remember whether this is an acid compliant operation, and if so whether it is an
   // insert, update, or delete.
   private AcidUtils.Operation writeType;
-  private Long mmWriteId;
+  private Long txnId;
+  private int stmtId;
 
   // TODO: the below seems like they should just be combined into partitionDesc
   private org.apache.hadoop.hive.ql.plan.TableDesc table;
@@ -65,11 +66,11 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
       final org.apache.hadoop.hive.ql.plan.TableDesc table,
       final Map<String, String> partitionSpec,
       final boolean replace,
-      final AcidUtils.Operation writeType, Long mmWriteId) {
+      final AcidUtils.Operation writeType, Long txnId) {
     super(sourcePath);
     Utilities.LOG14535.info("creating part LTD from " + sourcePath + " to "
         + ((table.getProperties() == null) ? "null" : table.getTableName()));
-    init(table, partitionSpec, replace, writeType, mmWriteId);
+    init(table, partitionSpec, replace, writeType, txnId);
   }
 
   /**
@@ -83,15 +84,15 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
                        final TableDesc table,
                        final Map<String, String> partitionSpec,
                        final boolean replace,
-                       final Long mmWriteId) {
-    this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID, mmWriteId);
+                       final Long txnId) {
+    this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID, txnId);
   }
 
   public LoadTableDesc(final Path sourcePath,
       final org.apache.hadoop.hive.ql.plan.TableDesc table,
       final Map<String, String> partitionSpec,
-      final AcidUtils.Operation writeType, Long mmWriteId) {
-    this(sourcePath, table, partitionSpec, true, writeType, mmWriteId);
+      final AcidUtils.Operation writeType, Long txnId) {
+    this(sourcePath, table, partitionSpec, true, writeType, txnId);
   }
 
   /**
@@ -102,22 +103,22 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
    */
   public LoadTableDesc(final Path sourcePath,
                        final org.apache.hadoop.hive.ql.plan.TableDesc table,
-                       final Map<String, String> partitionSpec, Long mmWriteId) {
-    this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID, mmWriteId);
+                       final Map<String, String> partitionSpec, Long txnId) {
+    this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID, txnId);
   }
 
   public LoadTableDesc(final Path sourcePath,
       final org.apache.hadoop.hive.ql.plan.TableDesc table,
       final DynamicPartitionCtx dpCtx,
       final AcidUtils.Operation writeType,
-      boolean isReplace, Long mmWriteId) {
+      boolean isReplace, Long txnId) {
     super(sourcePath);
     Utilities.LOG14535.info("creating LTD from " + sourcePath + " to " + table.getTableName()/*, new Exception()*/);
     this.dpCtx = dpCtx;
     if (dpCtx != null && dpCtx.getPartSpec() != null && partitionSpec == null) {
-      init(table, dpCtx.getPartSpec(), isReplace, writeType, mmWriteId);
+      init(table, dpCtx.getPartSpec(), isReplace, writeType, txnId);
     } else {
-      init(table, new LinkedHashMap<String, String>(), isReplace, writeType, mmWriteId);
+      init(table, new LinkedHashMap<String, String>(), isReplace, writeType, txnId);
     }
   }
 
@@ -125,12 +126,12 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
       final org.apache.hadoop.hive.ql.plan.TableDesc table,
       final Map<String, String> partitionSpec,
       final boolean replace,
-      AcidUtils.Operation writeType, Long mmWriteId) {
+      AcidUtils.Operation writeType, Long txnId) {
     this.table = table;
     this.partitionSpec = partitionSpec;
     this.replace = replace;
     this.writeType = writeType;
-    this.mmWriteId = mmWriteId;
+    this.txnId = txnId;
   }
 
   @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -158,11 +159,11 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
 
   @Explain(displayName = "micromanaged table")
   public Boolean isMmTableExplain() {
-    return mmWriteId != null? true : null;
+    return txnId != null? true : null;
   }
 
   public boolean isMmTable() {
-    return mmWriteId != null;
+    return txnId != null;
   }
 
   public void setReplace(boolean replace) {
@@ -203,8 +204,20 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc
     return writeType;
   }
 
-  public Long getMmWriteId() {
-    return mmWriteId;
+  public Long getTxnId() {
+    return txnId;
+  }
+
+  public void setTxnId(Long txnId) {
+    this.txnId = txnId;
+  }
+
+  public int getStmtId() {
+    return stmtId;
+  }
+
+  public void setStmtId(int stmtId) {
+    this.stmtId = stmtId;
   }
 
   public void setIntermediateInMmWrite(boolean b) {

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
index 4a13e1f..55b9da9 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
@@ -141,7 +141,7 @@ public class TestExecDriver extends TestCase {
         db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
         db.createTable(src, cols, null, TextInputFormat.class,
             HiveIgnoreKeyTextOutputFormat.class);
-        db.loadTable(hadoopDataFile[i], src, false, true, false, false, false, null);
+        db.loadTable(hadoopDataFile[i], src, false, true, false, false, false, null, 0);
         i++;
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/queries/clientpositive/mm_all.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q
index e2c8e97..8971292 100644
--- a/ql/src/test/queries/clientpositive/mm_all.q
+++ b/ql/src/test/queries/clientpositive/mm_all.q
@@ -33,7 +33,6 @@ drop table part_mm;
 drop table simple_mm;
 create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 insert into table simple_mm select key from intermediate;
-insert overwrite table simple_mm select key from intermediate;
 select * from simple_mm order by key;
 insert into table simple_mm select key from intermediate;
 select * from simple_mm order by key;
@@ -193,47 +192,6 @@ set hive.merge.mapredfiles=false;
 -- TODO: need to include merge+union+DP, but it's broken for now
 
 
-drop table ctas0_mm;
-create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate;
-select * from ctas0_mm;
-drop table ctas0_mm;
-
-drop table ctas1_mm;
-create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
-  select * from intermediate union all select * from intermediate;
-select * from ctas1_mm;
-drop table ctas1_mm;
-
-
-
-drop table iow0_mm;
-create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
-insert overwrite table iow0_mm select key from intermediate;
-insert into table iow0_mm select key + 1 from intermediate;
-select * from iow0_mm order by key;
-insert overwrite table iow0_mm select key + 2 from intermediate;
-select * from iow0_mm order by key;
-drop table iow0_mm;
-
-
-drop table iow1_mm; 
-create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
-insert overwrite table iow1_mm partition (key2)
-select key as k1, key from intermediate union all select key as k1, key from intermediate;
-insert into table iow1_mm partition (key2)
-select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate;
-select * from iow1_mm order by key, key2;
-insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate;
-select * from iow1_mm order by key, key2;
-insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate;
-select * from iow1_mm order by key, key2;
-drop table iow1_mm;
-
-
-
-
 drop table load0_mm;
 create table load0_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only");
 load data local inpath '../../data/files/kv1.txt' into table load0_mm;
@@ -279,174 +237,11 @@ drop table load2_mm;
 drop table intermediate2;
 
 
-drop table intermediate_nonpart;
-drop table intermmediate_part;
-drop table intermmediate_nonpart;
-create table intermediate_nonpart(key int, p int);
-insert into intermediate_nonpart select * from intermediate;
-create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
-insert into intermmediate_nonpart select * from intermediate;
-create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
-insert into table intermmediate partition(p) select key, p from intermediate;
-
-set hive.exim.test.mode=true;
-
-export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart';
-export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart';
-export table intermediate to 'ql/test/data/exports/intermediate_part';
-export table intermmediate to 'ql/test/data/exports/intermmediate_part';
-
-drop table intermediate_nonpart;
-drop table intermmediate_part;
-drop table intermmediate_nonpart;
-
--- non-MM export to MM table, with and without partitions
-
-drop table import0_mm;
-create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
-import table import0_mm from 'ql/test/data/exports/intermediate_nonpart';
-select * from import0_mm order by key, p;
-drop table import0_mm;
-
-
-
-drop table import1_mm;
-create table import1_mm(key int) partitioned by (p int)
-  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
-import table import1_mm from 'ql/test/data/exports/intermediate_part';
-select * from import1_mm order by key, p;
-drop table import1_mm;
-
-
--- MM export into new MM table, non-part and part
-
---drop table import2_mm;
---import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart';
---desc import2_mm;
---select * from import2_mm order by key, p;
---drop table import2_mm;
---
---drop table import3_mm;
---import table import3_mm from 'ql/test/data/exports/intermmediate_part';
---desc import3_mm;
---select * from import3_mm order by key, p;
---drop table import3_mm;
-
--- MM export into existing MM table, non-part and partial part
-
-drop table import4_mm;
-create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
-import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart';
-select * from import4_mm order by key, p;
-drop table import4_mm;
-
-drop table import5_mm;
-create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
-import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part';
-select * from import5_mm order by key, p;
-drop table import5_mm;
-
--- MM export into existing non-MM table, non-part and part
-
-drop table import6_mm;
-create table import6_mm(key int, p int);
-import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart';
-select * from import6_mm order by key, p;
-drop table import6_mm;
-
-drop table import7_mm;
-create table import7_mm(key int) partitioned by (p int);
-import table import7_mm from 'ql/test/data/exports/intermmediate_part';
-select * from import7_mm order by key, p;
-drop table import7_mm;
-
-set hive.exim.test.mode=false;
-
-
-
 drop table multi0_1_mm;
 drop table multi0_2_mm;
 create table multi0_1_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
 create table multi0_2_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
 
-from intermediate
-insert overwrite table multi0_1_mm select key, p
-insert overwrite table multi0_2_mm select p, key;
-
-select * from multi0_1_mm order by key, key2;
-select * from multi0_2_mm order by key, key2;
-
-set hive.merge.mapredfiles=true;
-set hive.merge.sparkfiles=true;
-set hive.merge.tezfiles=true;
-
-from intermediate
-insert into table multi0_1_mm select p, key
-insert overwrite table multi0_2_mm select key, p;
-select * from multi0_1_mm order by key, key2;
-select * from multi0_2_mm order by key, key2;
-
-set hive.merge.mapredfiles=false;
-set hive.merge.sparkfiles=false;
-set hive.merge.tezfiles=false;
-
-drop table multi0_1_mm;
-drop table multi0_2_mm;
-
-
-drop table multi1_mm;
-create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
-from intermediate
-insert into table multi1_mm partition(p=1) select p, key
-insert into table multi1_mm partition(p=2) select key, p;
-select * from multi1_mm order by key, key2, p;
-from intermediate
-insert into table multi1_mm partition(p=2) select p, key
-insert overwrite table multi1_mm partition(p=1) select key, p;
-select * from multi1_mm order by key, key2, p;
-
-from intermediate
-insert into table multi1_mm partition(p) select p, key, p
-insert into table multi1_mm partition(p=1) select key, p;
-select key, key2, p from multi1_mm order by key, key2, p;
-
-from intermediate
-insert into table multi1_mm partition(p) select p, key, 1
-insert into table multi1_mm partition(p=1) select key, p;
-select key, key2, p from multi1_mm order by key, key2, p;
-drop table multi1_mm;
-
-
-
-
-set datanucleus.cache.collections=false;
-set hive.stats.autogather=true;
-
-drop table stats_mm;
-create table stats_mm(key int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
-insert overwrite table stats_mm  select key from intermediate;
-desc formatted stats_mm;
-
-insert into table stats_mm  select key from intermediate;
-desc formatted stats_mm;
-drop table stats_mm;
-
-drop table stats2_mm;
-create table stats2_mm tblproperties("transactional"="true", "transactional_properties"="insert_only") as select array(key, value) from src;
-desc formatted stats2_mm;
-drop table stats2_mm;
-
-
-set hive.optimize.skewjoin=true;
-set hive.skewjoin.key=2;
-set hive.optimize.metadataonly=false;
-
-CREATE TABLE skewjoin_mm(key INT, value STRING) STORED AS TEXTFILE tblproperties ("transactional"="true", "transactional_properties"="insert_only");
-FROM src src1 JOIN src src2 ON (src1.key = src2.key) INSERT OVERWRITE TABLE skewjoin_mm SELECT src1.key, src2.value;
-select count(distinct key) from skewjoin_mm;
-drop table skewjoin_mm;
-
-set hive.optimize.skewjoin=false;
 
 set hive.optimize.index.filter=true;
 set hive.auto.convert.join=false;

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/queries/clientpositive/mm_conversions.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_conversions.q b/ql/src/test/queries/clientpositive/mm_conversions.q
index 2dc7a74..62faeac 100644
--- a/ql/src/test/queries/clientpositive/mm_conversions.q
+++ b/ql/src/test/queries/clientpositive/mm_conversions.q
@@ -4,7 +4,8 @@ set hive.fetch.task.conversion=none;
 set tez.grouping.min-size=1;
 set tez.grouping.max-size=2;
 set hive.exec.dynamic.partition.mode=nonstrict;
-
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
 -- Force multiple writers when reading
 drop table intermediate;
@@ -13,29 +14,31 @@ insert into table intermediate partition(p='455') select distinct key from src w
 insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 1;
 insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 1;
 
-drop table simple_from_mm;
-create table simple_from_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
-insert into table simple_from_mm select key from intermediate;
-insert into table simple_from_mm select key from intermediate;
-select * from simple_from_mm s1 order by key;
-alter table simple_from_mm unset tblproperties('transactional_properties', 'transactional');
-select * from simple_from_mm s2 order by key;
-insert into table simple_from_mm select key from intermediate;
-select * from simple_from_mm s3 order by key;
-alter table simple_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only");
-select * from simple_from_mm s4 order by key;
-insert into table simple_from_mm select key from intermediate;
-select * from simple_from_mm s5 order by key;
-alter table simple_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false');
-select * from simple_from_mm s6 order by key;
-insert into table simple_from_mm select key from intermediate;
-select * from simple_from_mm s7 order by key;
-drop table simple_from_mm;
+drop table simple_from_mm1;
+create table simple_from_mm1(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+insert into table simple_from_mm1 select key from intermediate;
+insert into table simple_from_mm1 select key from intermediate;
+select * from simple_from_mm1 s1 order by key;
+alter table simple_from_mm1 unset tblproperties('transactional_properties', 'transactional');
+select * from simple_from_mm1 s2 order by key;
+insert into table simple_from_mm1 select key from intermediate;
+select * from simple_from_mm1 s3 order by key;
+drop table simple_from_mm1;
+
+drop table simple_from_mm2;
+create table simple_from_mm2(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+insert into table simple_from_mm2 select key from intermediate;
+insert into table simple_from_mm2 select key from intermediate;
+select * from simple_from_mm2 s1 order by key;
+alter table simple_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false');
+select * from simple_from_mm2 s2 order by key;
+insert into table simple_from_mm2 select key from intermediate;
+select * from simple_from_mm2 s3 order by key;
+drop table simple_from_mm2;
 
 drop table simple_to_mm;
 create table simple_to_mm(key int) stored as orc;
 insert into table simple_to_mm select key from intermediate;
-insert into table simple_to_mm select key from intermediate;
 select * from simple_to_mm s1 order by key;
 alter table simple_to_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only");
 select * from simple_to_mm s2 order by key;
@@ -44,27 +47,29 @@ insert into table simple_to_mm select key from intermediate;
 select * from simple_to_mm s3 order by key;
 drop table simple_to_mm;
 
-drop table part_from_mm;
-create table part_from_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
-insert into table part_from_mm partition(key_mm='455') select key from intermediate;
-insert into table part_from_mm partition(key_mm='455') select key from intermediate;
-insert into table part_from_mm partition(key_mm='456') select key from intermediate;
-select * from part_from_mm s1 order by key, key_mm;
-alter table part_from_mm unset tblproperties('transactional_properties', 'transactional');
-select * from part_from_mm s2 order by key, key_mm;
-insert into table part_from_mm partition(key_mm='456') select key from intermediate;
-insert into table part_from_mm partition(key_mm='457') select key from intermediate;
-select * from part_from_mm s3 order by key, key_mm;
-alter table part_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only");
-select * from part_from_mm s4 order by key, key_mm;
-insert into table part_from_mm partition(key_mm='456') select key from intermediate;
-insert into table part_from_mm partition(key_mm='455') select key from intermediate;
-select * from part_from_mm s5 order by key, key_mm;
-alter table part_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false');
-select * from part_from_mm s6 order by key, key_mm;
-insert into table part_from_mm partition(key_mm='457') select key from intermediate;
-select * from part_from_mm s7 order by key, key_mm;
-drop table part_from_mm;
+drop table part_from_mm1;
+create table part_from_mm1(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+insert into table part_from_mm1 partition(key_mm='455') select key from intermediate;
+insert into table part_from_mm1 partition(key_mm='455') select key from intermediate;
+insert into table part_from_mm1 partition(key_mm='456') select key from intermediate;
+select * from part_from_mm1 s1 order by key, key_mm;
+alter table part_from_mm1 unset tblproperties('transactional_properties', 'transactional');
+select * from part_from_mm1 s2 order by key, key_mm;
+insert into table part_from_mm1 partition(key_mm='456') select key from intermediate;
+insert into table part_from_mm1 partition(key_mm='457') select key from intermediate;
+select * from part_from_mm1 s3 order by key, key_mm;
+drop table part_from_mm1;
+
+drop table part_from_mm2;
+create table part_from_mm2(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+insert into table part_from_mm2 partition(key_mm='456') select key from intermediate;--fails here
+insert into table part_from_mm2 partition(key_mm='455') select key from intermediate;
+select * from part_from_mm2 s1 order by key, key_mm;
+alter table part_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false');
+select * from part_from_mm2 s2 order by key, key_mm;
+insert into table part_from_mm2 partition(key_mm='457') select key from intermediate;
+select * from part_from_mm2 s3 order by key, key_mm;
+drop table part_from_mm2;
 
 drop table part_to_mm;
 create table part_to_mm(key int) partitioned by (key_mm int) stored as orc;

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/queries/clientpositive/mm_exim.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_exim.q b/ql/src/test/queries/clientpositive/mm_exim.q
new file mode 100644
index 0000000..2cdb001
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/mm_exim.q
@@ -0,0 +1,98 @@
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+set hive.fetch.task.conversion=none;
+set tez.grouping.min-size=1;
+set tez.grouping.max-size=2;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+
+drop table intermediate;
+create table intermediate(key int) partitioned by (p int) stored as orc;
+insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2;
+insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2;
+insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2;
+
+drop table intermediate_nonpart;
+drop table intermmediate_part;
+drop table intermmediate_nonpart;
+create table intermediate_nonpart(key int, p int);
+insert into intermediate_nonpart select * from intermediate;
+create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
+insert into intermmediate_nonpart select * from intermediate;
+create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
+insert into table intermmediate partition(p) select key, p from intermediate;
+
+set hive.exim.test.mode=true;
+
+export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart';
+export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart';
+export table intermediate to 'ql/test/data/exports/intermediate_part';
+export table intermmediate to 'ql/test/data/exports/intermmediate_part';
+
+drop table intermediate_nonpart;
+drop table intermmediate_part;
+drop table intermmediate_nonpart;
+
+-- non-MM export to MM table, with and without partitions
+
+drop table import0_mm;
+create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
+import table import0_mm from 'ql/test/data/exports/intermediate_nonpart';
+select * from import0_mm order by key, p;
+drop table import0_mm;
+
+
+
+drop table import1_mm;
+create table import1_mm(key int) partitioned by (p int)
+  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
+import table import1_mm from 'ql/test/data/exports/intermediate_part';
+select * from import1_mm order by key, p;
+drop table import1_mm;
+
+
+-- MM export into new MM table, non-part and part
+
+--drop table import2_mm;
+--import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart';
+--desc import2_mm;
+--select * from import2_mm order by key, p;
+--drop table import2_mm;
+--
+--drop table import3_mm;
+--import table import3_mm from 'ql/test/data/exports/intermmediate_part';
+--desc import3_mm;
+--select * from import3_mm order by key, p;
+--drop table import3_mm;
+
+-- MM export into existing MM table, non-part and partial part
+
+drop table import4_mm;
+create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
+import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart';
+select * from import4_mm order by key, p;
+drop table import4_mm;
+
+drop table import5_mm;
+create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
+import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part';
+select * from import5_mm order by key, p;
+drop table import5_mm;
+
+-- MM export into existing non-MM table, non-part and part
+
+drop table import6_mm;
+create table import6_mm(key int, p int);
+import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart';
+select * from import6_mm order by key, p;
+drop table import6_mm;
+
+drop table import7_mm;
+create table import7_mm(key int) partitioned by (p int);
+import table import7_mm from 'ql/test/data/exports/intermmediate_part';
+select * from import7_mm order by key, p;
+drop table import7_mm;
+
+set hive.exim.test.mode=false;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/queries/clientpositive/mm_insertonly_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_insertonly_acid.q b/ql/src/test/queries/clientpositive/mm_insertonly_acid.q
deleted file mode 100644
index 7da99c5..0000000
--- a/ql/src/test/queries/clientpositive/mm_insertonly_acid.q
+++ /dev/null
@@ -1,16 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
-set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.support.concurrency=true;
-set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-
-
-drop table qtr_acid;
-create table qtr_acid (key int) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only");
-insert into table qtr_acid partition(p='123') select distinct key from src where key > 0 order by key asc limit 10;
-insert into table qtr_acid partition(p='456') select distinct key from src where key > 0 order by key desc limit 10;
-explain
-select * from qtr_acid order by key;
-select * from qtr_acid order by key;
-drop table qtr_acid;
\ No newline at end of file


[17/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 4e3b2af..9042cdb 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size839;
-            ::apache::thrift::protocol::TType _etype842;
-            xfer += iprot->readListBegin(_etype842, _size839);
-            this->success.resize(_size839);
-            uint32_t _i843;
-            for (_i843 = 0; _i843 < _size839; ++_i843)
+            uint32_t _size817;
+            ::apache::thrift::protocol::TType _etype820;
+            xfer += iprot->readListBegin(_etype820, _size817);
+            this->success.resize(_size817);
+            uint32_t _i821;
+            for (_i821 = 0; _i821 < _size817; ++_i821)
             {
-              xfer += iprot->readString(this->success[_i843]);
+              xfer += iprot->readString(this->success[_i821]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter844;
-      for (_iter844 = this->success.begin(); _iter844 != this->success.end(); ++_iter844)
+      std::vector<std::string> ::const_iterator _iter822;
+      for (_iter822 = this->success.begin(); _iter822 != this->success.end(); ++_iter822)
       {
-        xfer += oprot->writeString((*_iter844));
+        xfer += oprot->writeString((*_iter822));
       }
       xfer += oprot->writeListEnd();
     }
@@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size845;
-            ::apache::thrift::protocol::TType _etype848;
-            xfer += iprot->readListBegin(_etype848, _size845);
-            (*(this->success)).resize(_size845);
-            uint32_t _i849;
-            for (_i849 = 0; _i849 < _size845; ++_i849)
+            uint32_t _size823;
+            ::apache::thrift::protocol::TType _etype826;
+            xfer += iprot->readListBegin(_etype826, _size823);
+            (*(this->success)).resize(_size823);
+            uint32_t _i827;
+            for (_i827 = 0; _i827 < _size823; ++_i827)
             {
-              xfer += iprot->readString((*(this->success))[_i849]);
+              xfer += iprot->readString((*(this->success))[_i827]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size850;
-            ::apache::thrift::protocol::TType _etype853;
-            xfer += iprot->readListBegin(_etype853, _size850);
-            this->success.resize(_size850);
-            uint32_t _i854;
-            for (_i854 = 0; _i854 < _size850; ++_i854)
+            uint32_t _size828;
+            ::apache::thrift::protocol::TType _etype831;
+            xfer += iprot->readListBegin(_etype831, _size828);
+            this->success.resize(_size828);
+            uint32_t _i832;
+            for (_i832 = 0; _i832 < _size828; ++_i832)
             {
-              xfer += iprot->readString(this->success[_i854]);
+              xfer += iprot->readString(this->success[_i832]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter855;
-      for (_iter855 = this->success.begin(); _iter855 != this->success.end(); ++_iter855)
+      std::vector<std::string> ::const_iterator _iter833;
+      for (_iter833 = this->success.begin(); _iter833 != this->success.end(); ++_iter833)
       {
-        xfer += oprot->writeString((*_iter855));
+        xfer += oprot->writeString((*_iter833));
       }
       xfer += oprot->writeListEnd();
     }
@@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size856;
-            ::apache::thrift::protocol::TType _etype859;
-            xfer += iprot->readListBegin(_etype859, _size856);
-            (*(this->success)).resize(_size856);
-            uint32_t _i860;
-            for (_i860 = 0; _i860 < _size856; ++_i860)
+            uint32_t _size834;
+            ::apache::thrift::protocol::TType _etype837;
+            xfer += iprot->readListBegin(_etype837, _size834);
+            (*(this->success)).resize(_size834);
+            uint32_t _i838;
+            for (_i838 = 0; _i838 < _size834; ++_i838)
             {
-              xfer += iprot->readString((*(this->success))[_i860]);
+              xfer += iprot->readString((*(this->success))[_i838]);
             }
             xfer += iprot->readListEnd();
           }
@@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->success.clear();
-            uint32_t _size861;
-            ::apache::thrift::protocol::TType _ktype862;
-            ::apache::thrift::protocol::TType _vtype863;
-            xfer += iprot->readMapBegin(_ktype862, _vtype863, _size861);
-            uint32_t _i865;
-            for (_i865 = 0; _i865 < _size861; ++_i865)
+            uint32_t _size839;
+            ::apache::thrift::protocol::TType _ktype840;
+            ::apache::thrift::protocol::TType _vtype841;
+            xfer += iprot->readMapBegin(_ktype840, _vtype841, _size839);
+            uint32_t _i843;
+            for (_i843 = 0; _i843 < _size839; ++_i843)
             {
-              std::string _key866;
-              xfer += iprot->readString(_key866);
-              Type& _val867 = this->success[_key866];
-              xfer += _val867.read(iprot);
+              std::string _key844;
+              xfer += iprot->readString(_key844);
+              Type& _val845 = this->success[_key844];
+              xfer += _val845.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
     {
       xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::map<std::string, Type> ::const_iterator _iter868;
-      for (_iter868 = this->success.begin(); _iter868 != this->success.end(); ++_iter868)
+      std::map<std::string, Type> ::const_iterator _iter846;
+      for (_iter846 = this->success.begin(); _iter846 != this->success.end(); ++_iter846)
       {
-        xfer += oprot->writeString(_iter868->first);
-        xfer += _iter868->second.write(oprot);
+        xfer += oprot->writeString(_iter846->first);
+        xfer += _iter846->second.write(oprot);
       }
       xfer += oprot->writeMapEnd();
     }
@@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             (*(this->success)).clear();
-            uint32_t _size869;
-            ::apache::thrift::protocol::TType _ktype870;
-            ::apache::thrift::protocol::TType _vtype871;
-            xfer += iprot->readMapBegin(_ktype870, _vtype871, _size869);
-            uint32_t _i873;
-            for (_i873 = 0; _i873 < _size869; ++_i873)
+            uint32_t _size847;
+            ::apache::thrift::protocol::TType _ktype848;
+            ::apache::thrift::protocol::TType _vtype849;
+            xfer += iprot->readMapBegin(_ktype848, _vtype849, _size847);
+            uint32_t _i851;
+            for (_i851 = 0; _i851 < _size847; ++_i851)
             {
-              std::string _key874;
-              xfer += iprot->readString(_key874);
-              Type& _val875 = (*(this->success))[_key874];
-              xfer += _val875.read(iprot);
+              std::string _key852;
+              xfer += iprot->readString(_key852);
+              Type& _val853 = (*(this->success))[_key852];
+              xfer += _val853.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size876;
-            ::apache::thrift::protocol::TType _etype879;
-            xfer += iprot->readListBegin(_etype879, _size876);
-            this->success.resize(_size876);
-            uint32_t _i880;
-            for (_i880 = 0; _i880 < _size876; ++_i880)
+            uint32_t _size854;
+            ::apache::thrift::protocol::TType _etype857;
+            xfer += iprot->readListBegin(_etype857, _size854);
+            this->success.resize(_size854);
+            uint32_t _i858;
+            for (_i858 = 0; _i858 < _size854; ++_i858)
             {
-              xfer += this->success[_i880].read(iprot);
+              xfer += this->success[_i858].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter881;
-      for (_iter881 = this->success.begin(); _iter881 != this->success.end(); ++_iter881)
+      std::vector<FieldSchema> ::const_iterator _iter859;
+      for (_iter859 = this->success.begin(); _iter859 != this->success.end(); ++_iter859)
       {
-        xfer += (*_iter881).write(oprot);
+        xfer += (*_iter859).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size882;
-            ::apache::thrift::protocol::TType _etype885;
-            xfer += iprot->readListBegin(_etype885, _size882);
-            (*(this->success)).resize(_size882);
-            uint32_t _i886;
-            for (_i886 = 0; _i886 < _size882; ++_i886)
+            uint32_t _size860;
+            ::apache::thrift::protocol::TType _etype863;
+            xfer += iprot->readListBegin(_etype863, _size860);
+            (*(this->success)).resize(_size860);
+            uint32_t _i864;
+            for (_i864 = 0; _i864 < _size860; ++_i864)
             {
-              xfer += (*(this->success))[_i886].read(iprot);
+              xfer += (*(this->success))[_i864].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size887;
-            ::apache::thrift::protocol::TType _etype890;
-            xfer += iprot->readListBegin(_etype890, _size887);
-            this->success.resize(_size887);
-            uint32_t _i891;
-            for (_i891 = 0; _i891 < _size887; ++_i891)
+            uint32_t _size865;
+            ::apache::thrift::protocol::TType _etype868;
+            xfer += iprot->readListBegin(_etype868, _size865);
+            this->success.resize(_size865);
+            uint32_t _i869;
+            for (_i869 = 0; _i869 < _size865; ++_i869)
             {
-              xfer += this->success[_i891].read(iprot);
+              xfer += this->success[_i869].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter892;
-      for (_iter892 = this->success.begin(); _iter892 != this->success.end(); ++_iter892)
+      std::vector<FieldSchema> ::const_iterator _iter870;
+      for (_iter870 = this->success.begin(); _iter870 != this->success.end(); ++_iter870)
       {
-        xfer += (*_iter892).write(oprot);
+        xfer += (*_iter870).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size893;
-            ::apache::thrift::protocol::TType _etype896;
-            xfer += iprot->readListBegin(_etype896, _size893);
-            (*(this->success)).resize(_size893);
-            uint32_t _i897;
-            for (_i897 = 0; _i897 < _size893; ++_i897)
+            uint32_t _size871;
+            ::apache::thrift::protocol::TType _etype874;
+            xfer += iprot->readListBegin(_etype874, _size871);
+            (*(this->success)).resize(_size871);
+            uint32_t _i875;
+            for (_i875 = 0; _i875 < _size871; ++_i875)
             {
-              xfer += (*(this->success))[_i897].read(iprot);
+              xfer += (*(this->success))[_i875].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size898;
-            ::apache::thrift::protocol::TType _etype901;
-            xfer += iprot->readListBegin(_etype901, _size898);
-            this->success.resize(_size898);
-            uint32_t _i902;
-            for (_i902 = 0; _i902 < _size898; ++_i902)
+            uint32_t _size876;
+            ::apache::thrift::protocol::TType _etype879;
+            xfer += iprot->readListBegin(_etype879, _size876);
+            this->success.resize(_size876);
+            uint32_t _i880;
+            for (_i880 = 0; _i880 < _size876; ++_i880)
             {
-              xfer += this->success[_i902].read(iprot);
+              xfer += this->success[_i880].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter903;
-      for (_iter903 = this->success.begin(); _iter903 != this->success.end(); ++_iter903)
+      std::vector<FieldSchema> ::const_iterator _iter881;
+      for (_iter881 = this->success.begin(); _iter881 != this->success.end(); ++_iter881)
       {
-        xfer += (*_iter903).write(oprot);
+        xfer += (*_iter881).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size904;
-            ::apache::thrift::protocol::TType _etype907;
-            xfer += iprot->readListBegin(_etype907, _size904);
-            (*(this->success)).resize(_size904);
-            uint32_t _i908;
-            for (_i908 = 0; _i908 < _size904; ++_i908)
+            uint32_t _size882;
+            ::apache::thrift::protocol::TType _etype885;
+            xfer += iprot->readListBegin(_etype885, _size882);
+            (*(this->success)).resize(_size882);
+            uint32_t _i886;
+            for (_i886 = 0; _i886 < _size882; ++_i886)
             {
-              xfer += (*(this->success))[_i908].read(iprot);
+              xfer += (*(this->success))[_i886].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size909;
-            ::apache::thrift::protocol::TType _etype912;
-            xfer += iprot->readListBegin(_etype912, _size909);
-            this->success.resize(_size909);
-            uint32_t _i913;
-            for (_i913 = 0; _i913 < _size909; ++_i913)
+            uint32_t _size887;
+            ::apache::thrift::protocol::TType _etype890;
+            xfer += iprot->readListBegin(_etype890, _size887);
+            this->success.resize(_size887);
+            uint32_t _i891;
+            for (_i891 = 0; _i891 < _size887; ++_i891)
             {
-              xfer += this->success[_i913].read(iprot);
+              xfer += this->success[_i891].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter914;
-      for (_iter914 = this->success.begin(); _iter914 != this->success.end(); ++_iter914)
+      std::vector<FieldSchema> ::const_iterator _iter892;
+      for (_iter892 = this->success.begin(); _iter892 != this->success.end(); ++_iter892)
       {
-        xfer += (*_iter914).write(oprot);
+        xfer += (*_iter892).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size915;
-            ::apache::thrift::protocol::TType _etype918;
-            xfer += iprot->readListBegin(_etype918, _size915);
-            (*(this->success)).resize(_size915);
-            uint32_t _i919;
-            for (_i919 = 0; _i919 < _size915; ++_i919)
+            uint32_t _size893;
+            ::apache::thrift::protocol::TType _etype896;
+            xfer += iprot->readListBegin(_etype896, _size893);
+            (*(this->success)).resize(_size893);
+            uint32_t _i897;
+            for (_i897 = 0; _i897 < _size893; ++_i897)
             {
-              xfer += (*(this->success))[_i919].read(iprot);
+              xfer += (*(this->success))[_i897].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->primaryKeys.clear();
-            uint32_t _size920;
-            ::apache::thrift::protocol::TType _etype923;
-            xfer += iprot->readListBegin(_etype923, _size920);
-            this->primaryKeys.resize(_size920);
-            uint32_t _i924;
-            for (_i924 = 0; _i924 < _size920; ++_i924)
+            uint32_t _size898;
+            ::apache::thrift::protocol::TType _etype901;
+            xfer += iprot->readListBegin(_etype901, _size898);
+            this->primaryKeys.resize(_size898);
+            uint32_t _i902;
+            for (_i902 = 0; _i902 < _size898; ++_i902)
             {
-              xfer += this->primaryKeys[_i924].read(iprot);
+              xfer += this->primaryKeys[_i902].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->foreignKeys.clear();
-            uint32_t _size925;
-            ::apache::thrift::protocol::TType _etype928;
-            xfer += iprot->readListBegin(_etype928, _size925);
-            this->foreignKeys.resize(_size925);
-            uint32_t _i929;
-            for (_i929 = 0; _i929 < _size925; ++_i929)
+            uint32_t _size903;
+            ::apache::thrift::protocol::TType _etype906;
+            xfer += iprot->readListBegin(_etype906, _size903);
+            this->foreignKeys.resize(_size903);
+            uint32_t _i907;
+            for (_i907 = 0; _i907 < _size903; ++_i907)
             {
-              xfer += this->foreignKeys[_i929].read(iprot);
+              xfer += this->foreignKeys[_i907].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4578,10 +4578,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
-    std::vector<SQLPrimaryKey> ::const_iterator _iter930;
-    for (_iter930 = this->primaryKeys.begin(); _iter930 != this->primaryKeys.end(); ++_iter930)
+    std::vector<SQLPrimaryKey> ::const_iterator _iter908;
+    for (_iter908 = this->primaryKeys.begin(); _iter908 != this->primaryKeys.end(); ++_iter908)
     {
-      xfer += (*_iter930).write(oprot);
+      xfer += (*_iter908).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4590,10 +4590,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
-    std::vector<SQLForeignKey> ::const_iterator _iter931;
-    for (_iter931 = this->foreignKeys.begin(); _iter931 != this->foreignKeys.end(); ++_iter931)
+    std::vector<SQLForeignKey> ::const_iterator _iter909;
+    for (_iter909 = this->foreignKeys.begin(); _iter909 != this->foreignKeys.end(); ++_iter909)
     {
-      xfer += (*_iter931).write(oprot);
+      xfer += (*_iter909).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4621,10 +4621,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->primaryKeys)).size()));
-    std::vector<SQLPrimaryKey> ::const_iterator _iter932;
-    for (_iter932 = (*(this->primaryKeys)).begin(); _iter932 != (*(this->primaryKeys)).end(); ++_iter932)
+    std::vector<SQLPrimaryKey> ::const_iterator _iter910;
+    for (_iter910 = (*(this->primaryKeys)).begin(); _iter910 != (*(this->primaryKeys)).end(); ++_iter910)
     {
-      xfer += (*_iter932).write(oprot);
+      xfer += (*_iter910).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4633,10 +4633,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->foreignKeys)).size()));
-    std::vector<SQLForeignKey> ::const_iterator _iter933;
-    for (_iter933 = (*(this->foreignKeys)).begin(); _iter933 != (*(this->foreignKeys)).end(); ++_iter933)
+    std::vector<SQLForeignKey> ::const_iterator _iter911;
+    for (_iter911 = (*(this->foreignKeys)).begin(); _iter911 != (*(this->foreignKeys)).end(); ++_iter911)
     {
-      xfer += (*_iter933).write(oprot);
+      xfer += (*_iter911).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5976,14 +5976,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->partNames.clear();
-            uint32_t _size934;
-            ::apache::thrift::protocol::TType _etype937;
-            xfer += iprot->readListBegin(_etype937, _size934);
-            this->partNames.resize(_size934);
-            uint32_t _i938;
-            for (_i938 = 0; _i938 < _size934; ++_i938)
+            uint32_t _size912;
+            ::apache::thrift::protocol::TType _etype915;
+            xfer += iprot->readListBegin(_etype915, _size912);
+            this->partNames.resize(_size912);
+            uint32_t _i916;
+            for (_i916 = 0; _i916 < _size912; ++_i916)
             {
-              xfer += iprot->readString(this->partNames[_i938]);
+              xfer += iprot->readString(this->partNames[_i916]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6020,10 +6020,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
-    std::vector<std::string> ::const_iterator _iter939;
-    for (_iter939 = this->partNames.begin(); _iter939 != this->partNames.end(); ++_iter939)
+    std::vector<std::string> ::const_iterator _iter917;
+    for (_iter917 = this->partNames.begin(); _iter917 != this->partNames.end(); ++_iter917)
     {
-      xfer += oprot->writeString((*_iter939));
+      xfer += oprot->writeString((*_iter917));
     }
     xfer += oprot->writeListEnd();
   }
@@ -6055,10 +6055,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partNames)).size()));
-    std::vector<std::string> ::const_iterator _iter940;
-    for (_iter940 = (*(this->partNames)).begin(); _iter940 != (*(this->partNames)).end(); ++_iter940)
+    std::vector<std::string> ::const_iterator _iter918;
+    for (_iter918 = (*(this->partNames)).begin(); _iter918 != (*(this->partNames)).end(); ++_iter918)
     {
-      xfer += oprot->writeString((*_iter940));
+      xfer += oprot->writeString((*_iter918));
     }
     xfer += oprot->writeListEnd();
   }
@@ -6302,14 +6302,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size941;
-            ::apache::thrift::protocol::TType _etype944;
-            xfer += iprot->readListBegin(_etype944, _size941);
-            this->success.resize(_size941);
-            uint32_t _i945;
-            for (_i945 = 0; _i945 < _size941; ++_i945)
+            uint32_t _size919;
+            ::apache::thrift::protocol::TType _etype922;
+            xfer += iprot->readListBegin(_etype922, _size919);
+            this->success.resize(_size919);
+            uint32_t _i923;
+            for (_i923 = 0; _i923 < _size919; ++_i923)
             {
-              xfer += iprot->readString(this->success[_i945]);
+              xfer += iprot->readString(this->success[_i923]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6348,10 +6348,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter946;
-      for (_iter946 = this->success.begin(); _iter946 != this->success.end(); ++_iter946)
+      std::vector<std::string> ::const_iterator _iter924;
+      for (_iter924 = this->success.begin(); _iter924 != this->success.end(); ++_iter924)
       {
-        xfer += oprot->writeString((*_iter946));
+        xfer += oprot->writeString((*_iter924));
       }
       xfer += oprot->writeListEnd();
     }
@@ -6396,14 +6396,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size947;
-            ::apache::thrift::protocol::TType _etype950;
-            xfer += iprot->readListBegin(_etype950, _size947);
-            (*(this->success)).resize(_size947);
-            uint32_t _i951;
-            for (_i951 = 0; _i951 < _size947; ++_i951)
+            uint32_t _size925;
+            ::apache::thrift::protocol::TType _etype928;
+            xfer += iprot->readListBegin(_etype928, _size925);
+            (*(this->success)).resize(_size925);
+            uint32_t _i929;
+            for (_i929 = 0; _i929 < _size925; ++_i929)
             {
-              xfer += iprot->readString((*(this->success))[_i951]);
+              xfer += iprot->readString((*(this->success))[_i929]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6573,14 +6573,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size952;
-            ::apache::thrift::protocol::TType _etype955;
-            xfer += iprot->readListBegin(_etype955, _size952);
-            this->success.resize(_size952);
-            uint32_t _i956;
-            for (_i956 = 0; _i956 < _size952; ++_i956)
+            uint32_t _size930;
+            ::apache::thrift::protocol::TType _etype933;
+            xfer += iprot->readListBegin(_etype933, _size930);
+            this->success.resize(_size930);
+            uint32_t _i934;
+            for (_i934 = 0; _i934 < _size930; ++_i934)
             {
-              xfer += iprot->readString(this->success[_i956]);
+              xfer += iprot->readString(this->success[_i934]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6619,10 +6619,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift::
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter957;
-      for (_iter957 = this->success.begin(); _iter957 != this->success.end(); ++_iter957)
+      std::vector<std::string> ::const_iterator _iter935;
+      for (_iter935 = this->success.begin(); _iter935 != this->success.end(); ++_iter935)
       {
-        xfer += oprot->writeString((*_iter957));
+        xfer += oprot->writeString((*_iter935));
       }
       xfer += oprot->writeListEnd();
     }
@@ -6667,14 +6667,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size958;
-            ::apache::thrift::protocol::TType _etype961;
-            xfer += iprot->readListBegin(_etype961, _size958);
-            (*(this->success)).resize(_size958);
-            uint32_t _i962;
-            for (_i962 = 0; _i962 < _size958; ++_i962)
+            uint32_t _size936;
+            ::apache::thrift::protocol::TType _etype939;
+            xfer += iprot->readListBegin(_etype939, _size936);
+            (*(this->success)).resize(_size936);
+            uint32_t _i940;
+            for (_i940 = 0; _i940 < _size936; ++_i940)
             {
-              xfer += iprot->readString((*(this->success))[_i962]);
+              xfer += iprot->readString((*(this->success))[_i940]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6749,14 +6749,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->tbl_types.clear();
-            uint32_t _size963;
-            ::apache::thrift::protocol::TType _etype966;
-            xfer += iprot->readListBegin(_etype966, _size963);
-            this->tbl_types.resize(_size963);
-            uint32_t _i967;
-            for (_i967 = 0; _i967 < _size963; ++_i967)
+            uint32_t _size941;
+            ::apache::thrift::protocol::TType _etype944;
+            xfer += iprot->readListBegin(_etype944, _size941);
+            this->tbl_types.resize(_size941);
+            uint32_t _i945;
+            for (_i945 = 0; _i945 < _size941; ++_i945)
             {
-              xfer += iprot->readString(this->tbl_types[_i967]);
+              xfer += iprot->readString(this->tbl_types[_i945]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6793,10 +6793,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
-    std::vector<std::string> ::const_iterator _iter968;
-    for (_iter968 = this->tbl_types.begin(); _iter968 != this->tbl_types.end(); ++_iter968)
+    std::vector<std::string> ::const_iterator _iter946;
+    for (_iter946 = this->tbl_types.begin(); _iter946 != this->tbl_types.end(); ++_iter946)
     {
-      xfer += oprot->writeString((*_iter968));
+      xfer += oprot->writeString((*_iter946));
     }
     xfer += oprot->writeListEnd();
   }
@@ -6828,10 +6828,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
-    std::vector<std::string> ::const_iterator _iter969;
-    for (_iter969 = (*(this->tbl_types)).begin(); _iter969 != (*(this->tbl_types)).end(); ++_iter969)
+    std::vector<std::string> ::const_iterator _iter947;
+    for (_iter947 = (*(this->tbl_types)).begin(); _iter947 != (*(this->tbl_types)).end(); ++_iter947)
     {
-      xfer += oprot->writeString((*_iter969));
+      xfer += oprot->writeString((*_iter947));
     }
     xfer += oprot->writeListEnd();
   }
@@ -6872,14 +6872,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size970;
-            ::apache::thrift::protocol::TType _etype973;
-            xfer += iprot->readListBegin(_etype973, _size970);
-            this->success.resize(_size970);
-            uint32_t _i974;
-            for (_i974 = 0; _i974 < _size970; ++_i974)
+            uint32_t _size948;
+            ::apache::thrift::protocol::TType _etype951;
+            xfer += iprot->readListBegin(_etype951, _size948);
+            this->success.resize(_size948);
+            uint32_t _i952;
+            for (_i952 = 0; _i952 < _size948; ++_i952)
             {
-              xfer += this->success[_i974].read(iprot);
+              xfer += this->success[_i952].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -6918,10 +6918,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<TableMeta> ::const_iterator _iter975;
-      for (_iter975 = this->success.begin(); _iter975 != this->success.end(); ++_iter975)
+      std::vector<TableMeta> ::const_iterator _iter953;
+      for (_iter953 = this->success.begin(); _iter953 != this->success.end(); ++_iter953)
       {
-        xfer += (*_iter975).write(oprot);
+        xfer += (*_iter953).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -6966,14 +6966,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size976;
-            ::apache::thrift::protocol::TType _etype979;
-            xfer += iprot->readListBegin(_etype979, _size976);
-            (*(this->success)).resize(_size976);
-            uint32_t _i980;
-            for (_i980 = 0; _i980 < _size976; ++_i980)
+            uint32_t _size954;
+            ::apache::thrift::protocol::TType _etype957;
+            xfer += iprot->readListBegin(_etype957, _size954);
+            (*(this->success)).resize(_size954);
+            uint32_t _i958;
+            for (_i958 = 0; _i958 < _size954; ++_i958)
             {
-              xfer += (*(this->success))[_i980].read(iprot);
+              xfer += (*(this->success))[_i958].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -7111,14 +7111,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size981;
-            ::apache::thrift::protocol::TType _etype984;
-            xfer += iprot->readListBegin(_etype984, _size981);
-            this->success.resize(_size981);
-            uint32_t _i985;
-            for (_i985 = 0; _i985 < _size981; ++_i985)
+            uint32_t _size959;
+            ::apache::thrift::protocol::TType _etype962;
+            xfer += iprot->readListBegin(_etype962, _size959);
+            this->success.resize(_size959);
+            uint32_t _i963;
+            for (_i963 = 0; _i963 < _size959; ++_i963)
             {
-              xfer += iprot->readString(this->success[_i985]);
+              xfer += iprot->readString(this->success[_i963]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7157,10 +7157,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter986;
-      for (_iter986 = this->success.begin(); _iter986 != this->success.end(); ++_iter986)
+      std::vector<std::string> ::const_iterator _iter964;
+      for (_iter964 = this->success.begin(); _iter964 != this->success.end(); ++_iter964)
       {
-        xfer += oprot->writeString((*_iter986));
+        xfer += oprot->writeString((*_iter964));
       }
       xfer += oprot->writeListEnd();
     }
@@ -7205,14 +7205,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size987;
-            ::apache::thrift::protocol::TType _etype990;
-            xfer += iprot->readListBegin(_etype990, _size987);
-            (*(this->success)).resize(_size987);
-            uint32_t _i991;
-            for (_i991 = 0; _i991 < _size987; ++_i991)
+            uint32_t _size965;
+            ::apache::thrift::protocol::TType _etype968;
+            xfer += iprot->readListBegin(_etype968, _size965);
+            (*(this->success)).resize(_size965);
+            uint32_t _i969;
+            for (_i969 = 0; _i969 < _size965; ++_i969)
             {
-              xfer += iprot->readString((*(this->success))[_i991]);
+              xfer += iprot->readString((*(this->success))[_i969]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7522,14 +7522,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->tbl_names.clear();
-            uint32_t _size992;
-            ::apache::thrift::protocol::TType _etype995;
-            xfer += iprot->readListBegin(_etype995, _size992);
-            this->tbl_names.resize(_size992);
-            uint32_t _i996;
-            for (_i996 = 0; _i996 < _size992; ++_i996)
+            uint32_t _size970;
+            ::apache::thrift::protocol::TType _etype973;
+            xfer += iprot->readListBegin(_etype973, _size970);
+            this->tbl_names.resize(_size970);
+            uint32_t _i974;
+            for (_i974 = 0; _i974 < _size970; ++_i974)
             {
-              xfer += iprot->readString(this->tbl_names[_i996]);
+              xfer += iprot->readString(this->tbl_names[_i974]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7562,10 +7562,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr
   xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
-    std::vector<std::string> ::const_iterator _iter997;
-    for (_iter997 = this->tbl_names.begin(); _iter997 != this->tbl_names.end(); ++_iter997)
+    std::vector<std::string> ::const_iterator _iter975;
+    for (_iter975 = this->tbl_names.begin(); _iter975 != this->tbl_names.end(); ++_iter975)
     {
-      xfer += oprot->writeString((*_iter997));
+      xfer += oprot->writeString((*_iter975));
     }
     xfer += oprot->writeListEnd();
   }
@@ -7593,10 +7593,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th
   xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
-    std::vector<std::string> ::const_iterator _iter998;
-    for (_iter998 = (*(this->tbl_names)).begin(); _iter998 != (*(this->tbl_names)).end(); ++_iter998)
+    std::vector<std::string> ::const_iterator _iter976;
+    for (_iter976 = (*(this->tbl_names)).begin(); _iter976 != (*(this->tbl_names)).end(); ++_iter976)
     {
-      xfer += oprot->writeString((*_iter998));
+      xfer += oprot->writeString((*_iter976));
     }
     xfer += oprot->writeListEnd();
   }
@@ -7637,14 +7637,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size999;
-            ::apache::thrift::protocol::TType _etype1002;
-            xfer += iprot->readListBegin(_etype1002, _size999);
-            this->success.resize(_size999);
-            uint32_t _i1003;
-            for (_i1003 = 0; _i1003 < _size999; ++_i1003)
+            uint32_t _size977;
+            ::apache::thrift::protocol::TType _etype980;
+            xfer += iprot->readListBegin(_etype980, _size977);
+            this->success.resize(_size977);
+            uint32_t _i981;
+            for (_i981 = 0; _i981 < _size977; ++_i981)
             {
-              xfer += this->success[_i1003].read(iprot);
+              xfer += this->success[_i981].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -7675,10 +7675,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Table> ::const_iterator _iter1004;
-      for (_iter1004 = this->success.begin(); _iter1004 != this->success.end(); ++_iter1004)
+      std::vector<Table> ::const_iterator _iter982;
+      for (_iter982 = this->success.begin(); _iter982 != this->success.end(); ++_iter982)
       {
-        xfer += (*_iter1004).write(oprot);
+        xfer += (*_iter982).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -7719,14 +7719,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1005;
-            ::apache::thrift::protocol::TType _etype1008;
-            xfer += iprot->readListBegin(_etype1008, _size1005);
-            (*(this->success)).resize(_size1005);
-            uint32_t _i1009;
-            for (_i1009 = 0; _i1009 < _size1005; ++_i1009)
+            uint32_t _size983;
+            ::apache::thrift::protocol::TType _etype986;
+            xfer += iprot->readListBegin(_etype986, _size983);
+            (*(this->success)).resize(_size983);
+            uint32_t _i987;
+            for (_i987 = 0; _i987 < _size983; ++_i987)
             {
-              xfer += (*(this->success))[_i1009].read(iprot);
+              xfer += (*(this->success))[_i987].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -8362,14 +8362,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1010;
-            ::apache::thrift::protocol::TType _etype1013;
-            xfer += iprot->readListBegin(_etype1013, _size1010);
-            this->success.resize(_size1010);
-            uint32_t _i1014;
-            for (_i1014 = 0; _i1014 < _size1010; ++_i1014)
+            uint32_t _size988;
+            ::apache::thrift::protocol::TType _etype991;
+            xfer += iprot->readListBegin(_etype991, _size988);
+            this->success.resize(_size988);
+            uint32_t _i992;
+            for (_i992 = 0; _i992 < _size988; ++_i992)
             {
-              xfer += iprot->readString(this->success[_i1014]);
+              xfer += iprot->readString(this->success[_i992]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8424,10 +8424,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1015;
-      for (_iter1015 = this->success.begin(); _iter1015 != this->success.end(); ++_iter1015)
+      std::vector<std::string> ::const_iterator _iter993;
+      for (_iter993 = this->success.begin(); _iter993 != this->success.end(); ++_iter993)
       {
-        xfer += oprot->writeString((*_iter1015));
+        xfer += oprot->writeString((*_iter993));
       }
       xfer += oprot->writeListEnd();
     }
@@ -8480,14 +8480,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1016;
-            ::apache::thrift::protocol::TType _etype1019;
-            xfer += iprot->readListBegin(_etype1019, _size1016);
-            (*(this->success)).resize(_size1016);
-            uint32_t _i1020;
-            for (_i1020 = 0; _i1020 < _size1016; ++_i1020)
+            uint32_t _size994;
+            ::apache::thrift::protocol::TType _etype997;
+            xfer += iprot->readListBegin(_etype997, _size994);
+            (*(this->success)).resize(_size994);
+            uint32_t _i998;
+            for (_i998 = 0; _i998 < _size994; ++_i998)
             {
-              xfer += iprot->readString((*(this->success))[_i1020]);
+              xfer += iprot->readString((*(this->success))[_i998]);
             }
             xfer += iprot->readListEnd();
           }
@@ -9821,14 +9821,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size1021;
-            ::apache::thrift::protocol::TType _etype1024;
-            xfer += iprot->readListBegin(_etype1024, _size1021);
-            this->new_parts.resize(_size1021);
-            uint32_t _i1025;
-            for (_i1025 = 0; _i1025 < _size1021; ++_i1025)
+            uint32_t _size999;
+            ::apache::thrift::protocol::TType _etype1002;
+            xfer += iprot->readListBegin(_etype1002, _size999);
+            this->new_parts.resize(_size999);
+            uint32_t _i1003;
+            for (_i1003 = 0; _i1003 < _size999; ++_i1003)
             {
-              xfer += this->new_parts[_i1025].read(iprot);
+              xfer += this->new_parts[_i1003].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -9857,10 +9857,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<Partition> ::const_iterator _iter1026;
-    for (_iter1026 = this->new_parts.begin(); _iter1026 != this->new_parts.end(); ++_iter1026)
+    std::vector<Partition> ::const_iterator _iter1004;
+    for (_iter1004 = this->new_parts.begin(); _iter1004 != this->new_parts.end(); ++_iter1004)
     {
-      xfer += (*_iter1026).write(oprot);
+      xfer += (*_iter1004).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -9884,10 +9884,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<Partition> ::const_iterator _iter1027;
-    for (_iter1027 = (*(this->new_parts)).begin(); _iter1027 != (*(this->new_parts)).end(); ++_iter1027)
+    std::vector<Partition> ::const_iterator _iter1005;
+    for (_iter1005 = (*(this->new_parts)).begin(); _iter1005 != (*(this->new_parts)).end(); ++_iter1005)
     {
-      xfer += (*_iter1027).write(oprot);
+      xfer += (*_iter1005).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -10096,14 +10096,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size1028;
-            ::apache::thrift::protocol::TType _etype1031;
-            xfer += iprot->readListBegin(_etype1031, _size1028);
-            this->new_parts.resize(_size1028);
-            uint32_t _i1032;
-            for (_i1032 = 0; _i1032 < _size1028; ++_i1032)
+            uint32_t _size1006;
+            ::apache::thrift::protocol::TType _etype1009;
+            xfer += iprot->readListBegin(_etype1009, _size1006);
+            this->new_parts.resize(_size1006);
+            uint32_t _i1010;
+            for (_i1010 = 0; _i1010 < _size1006; ++_i1010)
             {
-              xfer += this->new_parts[_i1032].read(iprot);
+              xfer += this->new_parts[_i1010].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -10132,10 +10132,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<PartitionSpec> ::const_iterator _iter1033;
-    for (_iter1033 = this->new_parts.begin(); _iter1033 != this->new_parts.end(); ++_iter1033)
+    std::vector<PartitionSpec> ::const_iterator _iter1011;
+    for (_iter1011 = this->new_parts.begin(); _iter1011 != this->new_parts.end(); ++_iter1011)
     {
-      xfer += (*_iter1033).write(oprot);
+      xfer += (*_iter1011).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -10159,10 +10159,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift:
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<PartitionSpec> ::const_iterator _iter1034;
-    for (_iter1034 = (*(this->new_parts)).begin(); _iter1034 != (*(this->new_parts)).end(); ++_iter1034)
+    std::vector<PartitionSpec> ::const_iterator _iter1012;
+    for (_iter1012 = (*(this->new_parts)).begin(); _iter1012 != (*(this->new_parts)).end(); ++_iter1012)
     {
-      xfer += (*_iter1034).write(oprot);
+      xfer += (*_iter1012).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -10387,14 +10387,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1035;
-            ::apache::thrift::protocol::TType _etype1038;
-            xfer += iprot->readListBegin(_etype1038, _size1035);
-            this->part_vals.resize(_size1035);
-            uint32_t _i1039;
-            for (_i1039 = 0; _i1039 < _size1035; ++_i1039)
+            uint32_t _size1013;
+            ::apache::thrift::protocol::TType _etype1016;
+            xfer += iprot->readListBegin(_etype1016, _size1013);
+            this->part_vals.resize(_size1013);
+            uint32_t _i1017;
+            for (_i1017 = 0; _i1017 < _size1013; ++_i1017)
             {
-              xfer += iprot->readString(this->part_vals[_i1039]);
+              xfer += iprot->readString(this->part_vals[_i1017]);
             }
             xfer += iprot->readListEnd();
           }
@@ -10431,10 +10431,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1040;
-    for (_iter1040 = this->part_vals.begin(); _iter1040 != this->part_vals.end(); ++_iter1040)
+    std::vector<std::string> ::const_iterator _iter1018;
+    for (_iter1018 = this->part_vals.begin(); _iter1018 != this->part_vals.end(); ++_iter1018)
     {
-      xfer += oprot->writeString((*_iter1040));
+      xfer += oprot->writeString((*_iter1018));
     }
     xfer += oprot->writeListEnd();
   }
@@ -10466,10 +10466,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1041;
-    for (_iter1041 = (*(this->part_vals)).begin(); _iter1041 != (*(this->part_vals)).end(); ++_iter1041)
+    std::vector<std::string> ::const_iterator _iter1019;
+    for (_iter1019 = (*(this->part_vals)).begin(); _iter1019 != (*(this->part_vals)).end(); ++_iter1019)
     {
-      xfer += oprot->writeString((*_iter1041));
+      xfer += oprot->writeString((*_iter1019));
     }
     xfer += oprot->writeListEnd();
   }
@@ -10941,14 +10941,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1042;
-            ::apache::thrift::protocol::TType _etype1045;
-            xfer += iprot->readListBegin(_etype1045, _size1042);
-            this->part_vals.resize(_size1042);
-            uint32_t _i1046;
-            for (_i1046 = 0; _i1046 < _size1042; ++_i1046)
+            uint32_t _size1020;
+            ::apache::thrift::protocol::TType _etype1023;
+            xfer += iprot->readListBegin(_etype1023, _size1020);
+            this->part_vals.resize(_size1020);
+            uint32_t _i1024;
+            for (_i1024 = 0; _i1024 < _size1020; ++_i1024)
             {
-              xfer += iprot->readString(this->part_vals[_i1046]);
+              xfer += iprot->readString(this->part_vals[_i1024]);
             }
             xfer += iprot->readListEnd();
           }
@@ -10993,10 +10993,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1047;
-    for (_iter1047 = this->part_vals.begin(); _iter1047 != this->part_vals.end(); ++_iter1047)
+    std::vector<std::string> ::const_iterator _iter1025;
+    for (_iter1025 = this->part_vals.begin(); _iter1025 != this->part_vals.end(); ++_iter1025)
     {
-      xfer += oprot->writeString((*_iter1047));
+      xfer += oprot->writeString((*_iter1025));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11032,10 +11032,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1048;
-    for (_iter1048 = (*(this->part_vals)).begin(); _iter1048 != (*(this->part_vals)).end(); ++_iter1048)
+    std::vector<std::string> ::const_iterator _iter1026;
+    for (_iter1026 = (*(this->part_vals)).begin(); _iter1026 != (*(this->part_vals)).end(); ++_iter1026)
     {
-      xfer += oprot->writeString((*_iter1048));
+      xfer += oprot->writeString((*_iter1026));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11838,14 +11838,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1049;
-            ::apache::thrift::protocol::TType _etype1052;
-            xfer += iprot->readListBegin(_etype1052, _size1049);
-            this->part_vals.resize(_size1049);
-            uint32_t _i1053;
-            for (_i1053 = 0; _i1053 < _size1049; ++_i1053)
+            uint32_t _size1027;
+            ::apache::thrift::protocol::TType _etype1030;
+            xfer += iprot->readListBegin(_etype1030, _size1027);
+            this->part_vals.resize(_size1027);
+            uint32_t _i1031;
+            for (_i1031 = 0; _i1031 < _size1027; ++_i1031)
             {
-              xfer += iprot->readString(this->part_vals[_i1053]);
+              xfer += iprot->readString(this->part_vals[_i1031]);
             }
             xfer += iprot->readListEnd();
           }
@@ -11890,10 +11890,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1054;
-    for (_iter1054 = this->part_vals.begin(); _iter1054 != this->part_vals.end(); ++_iter1054)
+    std::vector<std::string> ::const_iterator _iter1032;
+    for (_iter1032 = this->part_vals.begin(); _iter1032 != this->part_vals.end(); ++_iter1032)
     {
-      xfer += oprot->writeString((*_iter1054));
+      xfer += oprot->writeString((*_iter1032));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11929,10 +11929,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1055;
-    for (_iter1055 = (*(this->part_vals)).begin(); _iter1055 != (*(this->part_vals)).end(); ++_iter1055)
+    std::vector<std::string> ::const_iterator _iter1033;
+    for (_iter1033 = (*(this->part_vals)).begin(); _iter1033 != (*(this->part_vals)).end(); ++_iter1033)
     {
-      xfer += oprot->writeString((*_iter1055));
+      xfer += oprot->writeString((*_iter1033));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12141,14 +12141,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read(
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1056;
-            ::apache::thrift::protocol::TType _etype1059;
-            xfer += iprot->readListBegin(_etype1059, _size1056);
-            this->part_vals.resize(_size1056);
-            uint32_t _i1060;
-            for (_i1060 = 0; _i1060 < _size1056; ++_i1060)
+            uint32_t _size1034;
+            ::apache::thrift::protocol::TType _etype1037;
+            xfer += iprot->readListBegin(_etype1037, _size1034);
+            this->part_vals.resize(_size1034);
+            uint32_t _i1038;
+            for (_i1038 = 0; _i1038 < _size1034; ++_i1038)
             {
-              xfer += iprot->readString(this->part_vals[_i1060]);
+              xfer += iprot->readString(this->part_vals[_i1038]);
             }
             xfer += iprot->readListEnd();
           }
@@ -12201,10 +12201,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1061;
-    for (_iter1061 = this->part_vals.begin(); _iter1061 != this->part_vals.end(); ++_iter1061)
+    std::vector<std::string> ::const_iterator _iter1039;
+    for (_iter1039 = this->part_vals.begin(); _iter1039 != this->part_vals.end(); ++_iter1039)
     {
-      xfer += oprot->writeString((*_iter1061));
+      xfer += oprot->writeString((*_iter1039));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12244,10 +12244,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1062;
-    for (_iter1062 = (*(this->part_vals)).begin(); _iter1062 != (*(this->part_vals)).end(); ++_iter1062)
+    std::vector<std::string> ::const_iterator _iter1040;
+    for (_iter1040 = (*(this->part_vals)).begin(); _iter1040 != (*(this->part_vals)).end(); ++_iter1040)
     {
-      xfer += oprot->writeString((*_iter1062));
+      xfer += oprot->writeString((*_iter1040));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13253,14 +13253,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1063;
-            ::apache::thrift::protocol::TType _etype1066;
-            xfer += iprot->readListBegin(_etype1066, _size1063);
-            this->part_vals.resize(_size1063);
-            uint32_t _i1067;
-            for (_i1067 = 0; _i1067 < _size1063; ++_i1067)
+            uint32_t _size1041;
+            ::apache::thrift::protocol::TType _etype1044;
+            xfer += iprot->readListBegin(_etype1044, _size1041);
+            this->part_vals.resize(_size1041);
+            uint32_t _i1045;
+            for (_i1045 = 0; _i1045 < _size1041; ++_i1045)
             {
-              xfer += iprot->readString(this->part_vals[_i1067]);
+              xfer += iprot->readString(this->part_vals[_i1045]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13297,10 +13297,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1068;
-    for (_iter1068 = this->part_vals.begin(); _iter1068 != this->part_vals.end(); ++_iter1068)
+    std::vector<std::string> ::const_iterator _iter1046;
+    for (_iter1046 = this->part_vals.begin(); _iter1046 != this->part_vals.end(); ++_iter1046)
     {
-      xfer += oprot->writeString((*_iter1068));
+      xfer += oprot->writeString((*_iter1046));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13332,10 +13332,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1069;
-    for (_iter1069 = (*(this->part_vals)).begin(); _iter1069 != (*(this->part_vals)).end(); ++_iter1069)
+    std::vector<std::string> ::const_iterator _iter1047;
+    for (_iter1047 = (*(this->part_vals)).begin(); _iter1047 != (*(this->part_vals)).end(); ++_iter1047)
     {
-      xfer += oprot->writeString((*_iter1069));
+      xfer += oprot->writeString((*_iter1047));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13524,17 +13524,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->partitionSpecs.clear();
-            uint32_t _size1070;
-            ::apache::thrift::protocol::TType _ktype1071;
-            ::apache::thrift::protocol::TType _vtype1072;
-            xfer += iprot->readMapBegin(_ktype1071, _vtype1072, _size1070);
-            uint32_t _i1074;
-            for (_i1074 = 0; _i1074 < _size1070; ++_i1074)
+            uint32_t _size1048;
+            ::apache::thrift::protocol::TType _ktype1049;
+            ::apache::thrift::protocol::TType _vtype1050;
+            xfer += iprot->readMapBegin(_ktype1049, _vtype1050, _size1048);
+            uint32_t _i1052;
+            for (_i1052 = 0; _i1052 < _size1048; ++_i1052)
             {
-              std::string _key1075;
-              xfer += iprot->readString(_key1075);
-              std::string& _val1076 = this->partitionSpecs[_key1075];
-              xfer += iprot->readString(_val1076);
+              std::string _key1053;
+              xfer += iprot->readString(_key1053);
+              std::string& _val1054 = this->partitionSpecs[_key1053];
+              xfer += iprot->readString(_val1054);
             }
             xfer += iprot->readMapEnd();
           }
@@ -13595,11 +13595,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
-    std::map<std::string, std::string> ::const_iterator _iter1077;
-    for (_iter1077 = this->partitionSpecs.begin(); _iter1077 != this->partitionSpecs.end(); ++_iter1077)
+    std::map<std::string, std::string> ::const_iterator _iter1055;
+    for (_iter1055 = this->partitionSpecs.begin(); _iter1055 != this->partitionSpecs.end(); ++_iter1055)
     {
-      xfer += oprot->writeString(_iter1077->first);
-      xfer += oprot->writeString(_iter1077->second);
+      xfer += oprot->writeString(_iter1055->first);
+      xfer += oprot->writeString(_iter1055->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -13639,11 +13639,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
-    std::map<std::string, std::string> ::const_iterator _iter1078;
-    for (_iter1078 = (*(this->partitionSpecs)).begin(); _iter1078 != (*(this->partitionSpecs)).end(); ++_iter1078)
+    std::map<std::string, std::string> ::const_iterator _iter1056;
+    for (_iter1056 = (*(this->partitionSpecs)).begin(); _iter1056 != (*(this->partitionSpecs)).end(); ++_iter1056)
     {
-      xfer += oprot->writeString(_iter1078->first);
-      xfer += oprot->writeString(_iter1078->second);
+      xfer += oprot->writeString(_iter1056->first);
+      xfer += oprot->writeString(_iter1056->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -13888,17 +13888,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->partitionSpecs.clear();
-            uint32_t _size1079;
-            ::apache::thrift::protocol::TType _ktype1080;
-            ::apache::thrift::protocol::TType _vtype1081;
-            xfer += iprot->readMapBegin(_ktype1080, _vtype1081, _size1079);
-            uint32_t _i1083;
-            for (_i1083 = 0; _i1083 < _size1079; ++_i1083)
+            uint32_t _size1057;
+            ::apache::thrift::protocol::TType _ktype1058;
+            ::apache::thrift::protocol::TType _vtype1059;
+            xfer += iprot->readMapBegin(_ktype1058, _vtype1059, _size1057);
+            uint32_t _i1061;
+            for (_i1061 = 0; _i1061 < _size1057; ++_i1061)
             {
-              std::string _key1084;
-              xfer += iprot->readString(_key1084);
-              std::string& _val1085 = this->partitionSpecs[_key1084];
-              xfer += iprot->readString(_val1085);
+              std::string _key1062;
+              xfer += iprot->readString(_key1062);
+              std::string& _val1063 = this->partitionSpecs[_key1062];
+              xfer += iprot->readString(_val1063);
             }
             xfer += iprot->readMapEnd();
           }
@@ -13959,11 +13959,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
-    std::map<std::string, std::string> ::const_iterator _iter1086;
-    for (_iter1086 = this->partitionSpecs.begin(); _iter1086 != this->partitionSpecs.end(); ++_iter1086)
+    std::map<std::string, std::string> ::const_iterator _iter1064;
+    for (_iter1064 = this->partitionSpecs.begin(); _iter1064 != this->partitionSpecs.end(); ++_iter1064)
     {
-      xfer += oprot->writeString(_iter1086->first);
-      xfer += oprot->writeString(_iter1086->second);
+      xfer += oprot->writeString(_iter1064->first);
+      xfer += oprot->writeString(_iter1064->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -14003,11 +14003,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift::
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
-    std::map<std::string, std::string> ::const_iterator _iter1087;
-    for (_iter1087 = (*(this->partitionSpecs)).begin(); _iter1087 != (*(this->partitionSpecs)).end(); ++_iter1087)
+    std::map<std::string, std::string> ::const_iterator _iter1065;
+    for (_iter1065 = (*(this->partitionSpecs)).begin(); _iter1065 != (*(this->partitionSpecs)).end(); ++_iter1065)
     {
-      xfer += oprot->writeString(_iter1087->first);
-      xfer += oprot->writeString(_iter1087->second);
+      xfer += oprot->writeString(_iter1065->first);
+      xfer += oprot->writeString(_iter1065->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -14064,14 +14064,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1088;
-            ::apache::thrift::protocol::TType _etype1091;
-            xfer += iprot->readListBegin(_etype1091, _size1088);
-            this->success.resize(_size1088);
-            uint32_t _i1092;
-            for (_i1092 = 0; _i1092 < _size1088; ++_i1092)
+            uint32_t _size1066;
+            ::apache::thrift::protocol::TType _etype1069;
+            xfer += iprot->readListBegin(_etype1069, _size1066);
+            this->success.resize(_size1066);
+            uint32_t _i1070;
+            for (_i1070 = 0; _i1070 < _size1066; ++_i1070)
             {
-              xfer += this->success[_i1092].read(iprot);
+              xfer += this->success[_i1070].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -14134,10 +14134,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter1093;
-      for (_iter1093 = this->success.begin(); _iter1093 != this->success.end(); ++_iter1093)
+      std::vector<Partition> ::const_iterator _iter1071;
+      for (_iter1071 = this->success.begin(); _iter1071 != this->success.end(); ++_iter1071)
       {
-        xfer += (*_iter1093).write(oprot);
+        xfer += (*_iter1071).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -14194,14 +14194,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1094;
-            ::apache::thrift::protocol::TType _etype1097;
-            xfer += iprot->readListBegin(_etype1097, _size1094);
-            (*(this->success)).resize(_size1094);
-            uint32_t _i1098;
-            for (_i1098 = 0; _i1098 < _size1094; ++_i1098)
+            uint32_t _size1072;
+            ::apache::thrift::protocol::TType _etype1075;
+            xfer += iprot->readListBegin(_etype1075, _size1072);
+            (*(this->success)).resize(_size1072);
+            uint32_t _i1076;
+            for (_i1076 = 0; _i1076 < _size1072; ++_i1076)
             {
-              xfer += (*(this->success))[_i1098].read(iprot);
+              xfer += (*(this->success))[_i1076].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -14300,14 +14300,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1099;
-            ::apache::thrift::protocol::TType _etype1102;
-            xfer += iprot->readListBegin(_etype1102, _size1099);
-            this->part_vals.resize(_size1099);
-            uint32_t _i1103;
-            for (_i1103 = 0; _i1103 < _size1099; ++_i1103)
+            uint32_t _size1077;
+            ::apache::thrift::protocol::TType _etype1080;
+            xfer += iprot->readListBegin(_etype1080, _size1077);
+            this->part_vals.resize(_size1077);
+            uint32_t _i1081;
+            for (_i1081 = 0; _i1081 < _size1077; ++_i1081)
             {
-              xfer += iprot->readString(this->part_vals[_i1103]);
+              xfer += iprot->readString(this->part_vals[_i1081]);
             }
             xfer += iprot->readListEnd();
           }
@@ -14328,14 +14328,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->group_names.clear();
-            uint32_t _size1104;
-            ::apache::thrift::protocol::TType _etype1107;
-            xfer += iprot->readListBegin(_etype1107, _size1104);
-            this->group_names.resize(_size1104);
-            uint32_t _i1108;
-            for (_i1108 = 0; _i1108 < _size1104; ++_i1108)
+            uint32_t _size1082;
+            ::apache::thrift::protocol::TType _etype1085;
+            xfer += iprot->readListBegin(_etype1085, _size1082);
+            this->group_names.resize(_size1082);
+            uint32_t _i1086;
+            for (_i1086 = 0; _i1086 < _size1082; ++_i1086)
             {
-              xfer += iprot->readString(this->group_names[_i1108]);
+              xfer += iprot->readString(this->group_names[_i1086]);
             }
             xfer += iprot->readListEnd();
           }
@@ -14372,10 +14372,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1109;
-    for (_iter1109 = this->part_vals.begin(); _iter1109 != this->part_vals.end(); ++_iter1109)
+    std::vector<std::string> ::const_iterator _iter1087;
+    for (_iter1087 = this->part_vals.begin(); _iter1087 != this->part_vals.end(); ++_iter1087)
     {
-      xfer += oprot->writeString((*_iter1109));
+      xfer += oprot->writeString((*_iter1087));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14388,10 +14388,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
-    std::vector<std::string> ::const_iterator _iter1110;
-    for (_iter1110 = this->group_names.begin(); _iter1110 != this->group_names.end(); ++_iter1110)
+    std::vector<std::string> ::const_iterator _iter1088;
+    for (_iter1088 = this->group_names.begin(); _iter1088 != this->group_names.end(); ++_iter1088)
     {
-      xfer += oprot->writeString((*_iter1110));
+      xfer += oprot->writeString((*_iter1088));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14423,10 +14423,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1111;
-    for (_iter1111 = (*(this->part_vals)).begin(); _iter1111 != (*(this->part_vals)).end(); ++_iter1111)
+    std::vector<std::string> ::const_iterator _iter1089;
+    for (_iter1089 = (*(this->part_vals)).begin(); _iter1089 != (*(this->part_vals)).end(); ++_iter1089)
     {
-      xfer += oprot->writeString((*_iter1111));
+      xfer += oprot->writeString((*_iter1089));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14439,10 +14439,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
-    std::vector<std::string> ::const_iterator _iter1112;
-    for (_iter1112 = (*(this->group_names)).begin(); _iter1112 != (*(this->group_names)).end(); ++_iter1112)
+    std::vector<std::string> ::const_iterator _iter1090;
+    for (_iter1090 = (*(this->group_names)).begin(); _iter1090 != (*(this->group_names)).end(); ++_iter1090)
     {
-      xfer += oprot->writeString((*_iter1112));
+      xfer += oprot->writeString((*_iter1090));
     }
     xfer += oprot->writeListEnd();
   }
@@ -15001,14 +15001,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1113;
-            ::apache::thrift::protocol::TType _etype1116;
-            xfer += iprot->readListBegin(_etype1116, _size1113);
-            this->success.resize(_size1113);
-            uint32_t _i1117;
-            for (_i1117 = 0; _i1117 < _size1113; ++_i1117)
+            uint32_t _size1091;
+            ::apache::thrift::protocol::TType _etype1094;
+            xfer += iprot->readListBegin(_etype1094, _size1091);
+            this->success.resize(_size1091);
+            uint32_t _i1095;
+            for (_i1095 = 0; _i1095 < _size1091; ++_i1095)
             {
-              xfer += this->success[_i1117].read(iprot);
+              xfer += this->success[_i1095].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -15055,10 +15055,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter1118;
-      for (_iter1118 = this->success.begin(); _iter1118 != this->success.end(); ++_iter1118)
+      std::vector<Partition> ::const_iterator _iter1096;
+      for (_iter1096 = this->success.begin(); _iter1096 != this->success.end(); ++_iter1096)
       {
-        xfer += (*_iter1118).write(oprot);
+        xfer += (*_iter1096).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -15107,14 +15107,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1119;
-            ::apache::thrift::protocol::TType _etype1122;
-            xfer += iprot->readListBegin(_etype1122, _size1119);
-            (*(this->success)).resize(_size1119);
-            uint32_t _i1123;
-            for (_i1123 = 0; _i1123 < _size1119; ++_i1123)
+            uint32_t _size1097;
+            ::apache::thrift::protocol::TType _etype1100;
+            xfer += iprot->readListBegin(_etype1100, _size1097);
+            (*(this->success)).resize(_size1097);
+            uint32_t _i1101;
+            for (_i1101 = 0; _i1101 < _size1097; ++_i1101)
             {
-              xfer += (*(this->success))[_i1123].read(iprot);
+              xfer += (*(this->success))[_i1101].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -15213,14 +15213,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->group_names.clear();
-            uint32_t _size1124;
-            ::apache::thrift::protocol::TType _etype1127;
-            xfer += iprot->readListBegin(_etype1127, _size1124);
-            this->group_names.resize(_size1124);
-            uint32_t _i1128;
-            for (_i1128 = 0; _i1128 < _size1124; ++_i1128)
+            uint32_t _size1102;
+            ::apache::thrift::protocol::TType _etype1105;
+            xfer += iprot->readListBegin(_etype1105, _size1102);
+            this->group_names.resize(_size1102);
+            uint32_t _i1106;
+            for (_i1106 = 0; _i1106 < _size1102; ++_i1106)
             {
-              xfer += iprot->readString(this->group_names[_i1128]);
+              xfer += iprot->readString(this->group_names[_i1106]);
             }
             xfer += iprot->readListEnd();
           }
@@ -15265,10 +15265,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
-    std::vector<std::string> ::const_iterator _iter1129;
-    for (_iter1129 = this->group_names.begin(); _iter1129 != this->group_names.end(); ++_iter1129)
+    std::vector<std::string> ::const_iterator _iter1107;
+    for (_iter1107 = this->group_names.begin(); _iter1107 != this->group_names.end(); ++_iter1107)
     {
-      xfer += oprot->writeString((*_iter1129));
+      xfer += oprot->writeString((*_iter1107));
     }
     xfer += oprot->writeListEnd();
   }
@@ -15308,10 +15308,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
-    std::vector<std::string> ::const_iterator _iter1130;
-    for (_iter1130 = (*(this->group_names)).begin(); _iter1130 != (*(this->group_names)).end(); ++_iter1130)
+    std::vector<std::string> ::const_iterator _iter1108;
+    for (_iter1108 = (*(this->group_names)).begin(); _iter1108 != (*(this->group_names)).end(); ++_iter1108)
     {
-      xfer += oprot->writeString((*_iter1130));
+      xfer += oprot->writeString((*_iter1108));
     }
     xfer += oprot->writeListEnd();
   }
@@ -15352,14 +15352,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1131;
-            ::apache::thrift::protocol::TType _etype1134;
-            xfer += iprot->readListBegin(_etype1134, _size1131);
-            this->success.resize(_size1131);
-            uint32_t _i1135;
-            for (_i1135 = 0; _i1135 < _size1131; ++_i1135)
+            uint32_t _size1109;
+            ::apache::thrift::protocol::TType _etype1112;
+            xfer += iprot->readListBegin(_etype1112, _size1109);
+            this->success.resize(_size1109);
+            uint32_t _i1113;
+            for (_i1113 = 0; _i1113 < _size1109; +

<TRUNCATED>

[13/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java
deleted file mode 100644
index 90f103a..0000000
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java
+++ /dev/null
@@ -1,490 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.hadoop.hive.metastore.api;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class GetValidWriteIdsRequest implements org.apache.thrift.TBase<GetValidWriteIdsRequest, GetValidWriteIdsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetValidWriteIdsRequest> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetValidWriteIdsRequest");
-
-  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new GetValidWriteIdsRequestStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new GetValidWriteIdsRequestTupleSchemeFactory());
-  }
-
-  private String dbName; // required
-  private String tblName; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    DB_NAME((short)1, "dbName"),
-    TBL_NAME((short)2, "tblName");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // DB_NAME
-          return DB_NAME;
-        case 2: // TBL_NAME
-          return TBL_NAME;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetValidWriteIdsRequest.class, metaDataMap);
-  }
-
-  public GetValidWriteIdsRequest() {
-  }
-
-  public GetValidWriteIdsRequest(
-    String dbName,
-    String tblName)
-  {
-    this();
-    this.dbName = dbName;
-    this.tblName = tblName;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public GetValidWriteIdsRequest(GetValidWriteIdsRequest other) {
-    if (other.isSetDbName()) {
-      this.dbName = other.dbName;
-    }
-    if (other.isSetTblName()) {
-      this.tblName = other.tblName;
-    }
-  }
-
-  public GetValidWriteIdsRequest deepCopy() {
-    return new GetValidWriteIdsRequest(this);
-  }
-
-  @Override
-  public void clear() {
-    this.dbName = null;
-    this.tblName = null;
-  }
-
-  public String getDbName() {
-    return this.dbName;
-  }
-
-  public void setDbName(String dbName) {
-    this.dbName = dbName;
-  }
-
-  public void unsetDbName() {
-    this.dbName = null;
-  }
-
-  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
-  public boolean isSetDbName() {
-    return this.dbName != null;
-  }
-
-  public void setDbNameIsSet(boolean value) {
-    if (!value) {
-      this.dbName = null;
-    }
-  }
-
-  public String getTblName() {
-    return this.tblName;
-  }
-
-  public void setTblName(String tblName) {
-    this.tblName = tblName;
-  }
-
-  public void unsetTblName() {
-    this.tblName = null;
-  }
-
-  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
-  public boolean isSetTblName() {
-    return this.tblName != null;
-  }
-
-  public void setTblNameIsSet(boolean value) {
-    if (!value) {
-      this.tblName = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case DB_NAME:
-      if (value == null) {
-        unsetDbName();
-      } else {
-        setDbName((String)value);
-      }
-      break;
-
-    case TBL_NAME:
-      if (value == null) {
-        unsetTblName();
-      } else {
-        setTblName((String)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case DB_NAME:
-      return getDbName();
-
-    case TBL_NAME:
-      return getTblName();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case DB_NAME:
-      return isSetDbName();
-    case TBL_NAME:
-      return isSetTblName();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof GetValidWriteIdsRequest)
-      return this.equals((GetValidWriteIdsRequest)that);
-    return false;
-  }
-
-  public boolean equals(GetValidWriteIdsRequest that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_dbName = true && this.isSetDbName();
-    boolean that_present_dbName = true && that.isSetDbName();
-    if (this_present_dbName || that_present_dbName) {
-      if (!(this_present_dbName && that_present_dbName))
-        return false;
-      if (!this.dbName.equals(that.dbName))
-        return false;
-    }
-
-    boolean this_present_tblName = true && this.isSetTblName();
-    boolean that_present_tblName = true && that.isSetTblName();
-    if (this_present_tblName || that_present_tblName) {
-      if (!(this_present_tblName && that_present_tblName))
-        return false;
-      if (!this.tblName.equals(that.tblName))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_dbName = true && (isSetDbName());
-    list.add(present_dbName);
-    if (present_dbName)
-      list.add(dbName);
-
-    boolean present_tblName = true && (isSetTblName());
-    list.add(present_tblName);
-    if (present_tblName)
-      list.add(tblName);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(GetValidWriteIdsRequest other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetDbName()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTblName()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("GetValidWriteIdsRequest(");
-    boolean first = true;
-
-    sb.append("dbName:");
-    if (this.dbName == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.dbName);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("tblName:");
-    if (this.tblName == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.tblName);
-    }
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!isSetDbName()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
-    }
-
-    if (!isSetTblName()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class GetValidWriteIdsRequestStandardSchemeFactory implements SchemeFactory {
-    public GetValidWriteIdsRequestStandardScheme getScheme() {
-      return new GetValidWriteIdsRequestStandardScheme();
-    }
-  }
-
-  private static class GetValidWriteIdsRequestStandardScheme extends StandardScheme<GetValidWriteIdsRequest> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // DB_NAME
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.dbName = iprot.readString();
-              struct.setDbNameIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // TBL_NAME
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.tblName = iprot.readString();
-              struct.setTblNameIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.dbName != null) {
-        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
-        oprot.writeString(struct.dbName);
-        oprot.writeFieldEnd();
-      }
-      if (struct.tblName != null) {
-        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
-        oprot.writeString(struct.tblName);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class GetValidWriteIdsRequestTupleSchemeFactory implements SchemeFactory {
-    public GetValidWriteIdsRequestTupleScheme getScheme() {
-      return new GetValidWriteIdsRequestTupleScheme();
-    }
-  }
-
-  private static class GetValidWriteIdsRequestTupleScheme extends TupleScheme<GetValidWriteIdsRequest> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeString(struct.dbName);
-      oprot.writeString(struct.tblName);
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.dbName = iprot.readString();
-      struct.setDbNameIsSet(true);
-      struct.tblName = iprot.readString();
-      struct.setTblNameIsSet(true);
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java
deleted file mode 100644
index 4a42e1a..0000000
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java
+++ /dev/null
@@ -1,740 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.hadoop.hive.metastore.api;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class GetValidWriteIdsResult implements org.apache.thrift.TBase<GetValidWriteIdsResult, GetValidWriteIdsResult._Fields>, java.io.Serializable, Cloneable, Comparable<GetValidWriteIdsResult> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetValidWriteIdsResult");
-
-  private static final org.apache.thrift.protocol.TField LOW_WATERMARK_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("lowWatermarkId", org.apache.thrift.protocol.TType.I64, (short)1);
-  private static final org.apache.thrift.protocol.TField HIGH_WATERMARK_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("highWatermarkId", org.apache.thrift.protocol.TType.I64, (short)2);
-  private static final org.apache.thrift.protocol.TField ARE_IDS_VALID_FIELD_DESC = new org.apache.thrift.protocol.TField("areIdsValid", org.apache.thrift.protocol.TType.BOOL, (short)3);
-  private static final org.apache.thrift.protocol.TField IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("ids", org.apache.thrift.protocol.TType.LIST, (short)4);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new GetValidWriteIdsResultStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new GetValidWriteIdsResultTupleSchemeFactory());
-  }
-
-  private long lowWatermarkId; // required
-  private long highWatermarkId; // required
-  private boolean areIdsValid; // optional
-  private List<Long> ids; // optional
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    LOW_WATERMARK_ID((short)1, "lowWatermarkId"),
-    HIGH_WATERMARK_ID((short)2, "highWatermarkId"),
-    ARE_IDS_VALID((short)3, "areIdsValid"),
-    IDS((short)4, "ids");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // LOW_WATERMARK_ID
-          return LOW_WATERMARK_ID;
-        case 2: // HIGH_WATERMARK_ID
-          return HIGH_WATERMARK_ID;
-        case 3: // ARE_IDS_VALID
-          return ARE_IDS_VALID;
-        case 4: // IDS
-          return IDS;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __LOWWATERMARKID_ISSET_ID = 0;
-  private static final int __HIGHWATERMARKID_ISSET_ID = 1;
-  private static final int __AREIDSVALID_ISSET_ID = 2;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.ARE_IDS_VALID,_Fields.IDS};
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.LOW_WATERMARK_ID, new org.apache.thrift.meta_data.FieldMetaData("lowWatermarkId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.HIGH_WATERMARK_ID, new org.apache.thrift.meta_data.FieldMetaData("highWatermarkId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.ARE_IDS_VALID, new org.apache.thrift.meta_data.FieldMetaData("areIdsValid", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    tmpMap.put(_Fields.IDS, new org.apache.thrift.meta_data.FieldMetaData("ids", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetValidWriteIdsResult.class, metaDataMap);
-  }
-
-  public GetValidWriteIdsResult() {
-  }
-
-  public GetValidWriteIdsResult(
-    long lowWatermarkId,
-    long highWatermarkId)
-  {
-    this();
-    this.lowWatermarkId = lowWatermarkId;
-    setLowWatermarkIdIsSet(true);
-    this.highWatermarkId = highWatermarkId;
-    setHighWatermarkIdIsSet(true);
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public GetValidWriteIdsResult(GetValidWriteIdsResult other) {
-    __isset_bitfield = other.__isset_bitfield;
-    this.lowWatermarkId = other.lowWatermarkId;
-    this.highWatermarkId = other.highWatermarkId;
-    this.areIdsValid = other.areIdsValid;
-    if (other.isSetIds()) {
-      List<Long> __this__ids = new ArrayList<Long>(other.ids);
-      this.ids = __this__ids;
-    }
-  }
-
-  public GetValidWriteIdsResult deepCopy() {
-    return new GetValidWriteIdsResult(this);
-  }
-
-  @Override
-  public void clear() {
-    setLowWatermarkIdIsSet(false);
-    this.lowWatermarkId = 0;
-    setHighWatermarkIdIsSet(false);
-    this.highWatermarkId = 0;
-    setAreIdsValidIsSet(false);
-    this.areIdsValid = false;
-    this.ids = null;
-  }
-
-  public long getLowWatermarkId() {
-    return this.lowWatermarkId;
-  }
-
-  public void setLowWatermarkId(long lowWatermarkId) {
-    this.lowWatermarkId = lowWatermarkId;
-    setLowWatermarkIdIsSet(true);
-  }
-
-  public void unsetLowWatermarkId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOWWATERMARKID_ISSET_ID);
-  }
-
-  /** Returns true if field lowWatermarkId is set (has been assigned a value) and false otherwise */
-  public boolean isSetLowWatermarkId() {
-    return EncodingUtils.testBit(__isset_bitfield, __LOWWATERMARKID_ISSET_ID);
-  }
-
-  public void setLowWatermarkIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOWWATERMARKID_ISSET_ID, value);
-  }
-
-  public long getHighWatermarkId() {
-    return this.highWatermarkId;
-  }
-
-  public void setHighWatermarkId(long highWatermarkId) {
-    this.highWatermarkId = highWatermarkId;
-    setHighWatermarkIdIsSet(true);
-  }
-
-  public void unsetHighWatermarkId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HIGHWATERMARKID_ISSET_ID);
-  }
-
-  /** Returns true if field highWatermarkId is set (has been assigned a value) and false otherwise */
-  public boolean isSetHighWatermarkId() {
-    return EncodingUtils.testBit(__isset_bitfield, __HIGHWATERMARKID_ISSET_ID);
-  }
-
-  public void setHighWatermarkIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HIGHWATERMARKID_ISSET_ID, value);
-  }
-
-  public boolean isAreIdsValid() {
-    return this.areIdsValid;
-  }
-
-  public void setAreIdsValid(boolean areIdsValid) {
-    this.areIdsValid = areIdsValid;
-    setAreIdsValidIsSet(true);
-  }
-
-  public void unsetAreIdsValid() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AREIDSVALID_ISSET_ID);
-  }
-
-  /** Returns true if field areIdsValid is set (has been assigned a value) and false otherwise */
-  public boolean isSetAreIdsValid() {
-    return EncodingUtils.testBit(__isset_bitfield, __AREIDSVALID_ISSET_ID);
-  }
-
-  public void setAreIdsValidIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AREIDSVALID_ISSET_ID, value);
-  }
-
-  public int getIdsSize() {
-    return (this.ids == null) ? 0 : this.ids.size();
-  }
-
-  public java.util.Iterator<Long> getIdsIterator() {
-    return (this.ids == null) ? null : this.ids.iterator();
-  }
-
-  public void addToIds(long elem) {
-    if (this.ids == null) {
-      this.ids = new ArrayList<Long>();
-    }
-    this.ids.add(elem);
-  }
-
-  public List<Long> getIds() {
-    return this.ids;
-  }
-
-  public void setIds(List<Long> ids) {
-    this.ids = ids;
-  }
-
-  public void unsetIds() {
-    this.ids = null;
-  }
-
-  /** Returns true if field ids is set (has been assigned a value) and false otherwise */
-  public boolean isSetIds() {
-    return this.ids != null;
-  }
-
-  public void setIdsIsSet(boolean value) {
-    if (!value) {
-      this.ids = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case LOW_WATERMARK_ID:
-      if (value == null) {
-        unsetLowWatermarkId();
-      } else {
-        setLowWatermarkId((Long)value);
-      }
-      break;
-
-    case HIGH_WATERMARK_ID:
-      if (value == null) {
-        unsetHighWatermarkId();
-      } else {
-        setHighWatermarkId((Long)value);
-      }
-      break;
-
-    case ARE_IDS_VALID:
-      if (value == null) {
-        unsetAreIdsValid();
-      } else {
-        setAreIdsValid((Boolean)value);
-      }
-      break;
-
-    case IDS:
-      if (value == null) {
-        unsetIds();
-      } else {
-        setIds((List<Long>)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case LOW_WATERMARK_ID:
-      return getLowWatermarkId();
-
-    case HIGH_WATERMARK_ID:
-      return getHighWatermarkId();
-
-    case ARE_IDS_VALID:
-      return isAreIdsValid();
-
-    case IDS:
-      return getIds();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case LOW_WATERMARK_ID:
-      return isSetLowWatermarkId();
-    case HIGH_WATERMARK_ID:
-      return isSetHighWatermarkId();
-    case ARE_IDS_VALID:
-      return isSetAreIdsValid();
-    case IDS:
-      return isSetIds();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof GetValidWriteIdsResult)
-      return this.equals((GetValidWriteIdsResult)that);
-    return false;
-  }
-
-  public boolean equals(GetValidWriteIdsResult that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_lowWatermarkId = true;
-    boolean that_present_lowWatermarkId = true;
-    if (this_present_lowWatermarkId || that_present_lowWatermarkId) {
-      if (!(this_present_lowWatermarkId && that_present_lowWatermarkId))
-        return false;
-      if (this.lowWatermarkId != that.lowWatermarkId)
-        return false;
-    }
-
-    boolean this_present_highWatermarkId = true;
-    boolean that_present_highWatermarkId = true;
-    if (this_present_highWatermarkId || that_present_highWatermarkId) {
-      if (!(this_present_highWatermarkId && that_present_highWatermarkId))
-        return false;
-      if (this.highWatermarkId != that.highWatermarkId)
-        return false;
-    }
-
-    boolean this_present_areIdsValid = true && this.isSetAreIdsValid();
-    boolean that_present_areIdsValid = true && that.isSetAreIdsValid();
-    if (this_present_areIdsValid || that_present_areIdsValid) {
-      if (!(this_present_areIdsValid && that_present_areIdsValid))
-        return false;
-      if (this.areIdsValid != that.areIdsValid)
-        return false;
-    }
-
-    boolean this_present_ids = true && this.isSetIds();
-    boolean that_present_ids = true && that.isSetIds();
-    if (this_present_ids || that_present_ids) {
-      if (!(this_present_ids && that_present_ids))
-        return false;
-      if (!this.ids.equals(that.ids))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_lowWatermarkId = true;
-    list.add(present_lowWatermarkId);
-    if (present_lowWatermarkId)
-      list.add(lowWatermarkId);
-
-    boolean present_highWatermarkId = true;
-    list.add(present_highWatermarkId);
-    if (present_highWatermarkId)
-      list.add(highWatermarkId);
-
-    boolean present_areIdsValid = true && (isSetAreIdsValid());
-    list.add(present_areIdsValid);
-    if (present_areIdsValid)
-      list.add(areIdsValid);
-
-    boolean present_ids = true && (isSetIds());
-    list.add(present_ids);
-    if (present_ids)
-      list.add(ids);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(GetValidWriteIdsResult other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(isSetLowWatermarkId()).compareTo(other.isSetLowWatermarkId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetLowWatermarkId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lowWatermarkId, other.lowWatermarkId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetHighWatermarkId()).compareTo(other.isSetHighWatermarkId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetHighWatermarkId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.highWatermarkId, other.highWatermarkId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetAreIdsValid()).compareTo(other.isSetAreIdsValid());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetAreIdsValid()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.areIdsValid, other.areIdsValid);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetIds()).compareTo(other.isSetIds());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetIds()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ids, other.ids);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("GetValidWriteIdsResult(");
-    boolean first = true;
-
-    sb.append("lowWatermarkId:");
-    sb.append(this.lowWatermarkId);
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("highWatermarkId:");
-    sb.append(this.highWatermarkId);
-    first = false;
-    if (isSetAreIdsValid()) {
-      if (!first) sb.append(", ");
-      sb.append("areIdsValid:");
-      sb.append(this.areIdsValid);
-      first = false;
-    }
-    if (isSetIds()) {
-      if (!first) sb.append(", ");
-      sb.append("ids:");
-      if (this.ids == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.ids);
-      }
-      first = false;
-    }
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!isSetLowWatermarkId()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'lowWatermarkId' is unset! Struct:" + toString());
-    }
-
-    if (!isSetHighWatermarkId()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'highWatermarkId' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class GetValidWriteIdsResultStandardSchemeFactory implements SchemeFactory {
-    public GetValidWriteIdsResultStandardScheme getScheme() {
-      return new GetValidWriteIdsResultStandardScheme();
-    }
-  }
-
-  private static class GetValidWriteIdsResultStandardScheme extends StandardScheme<GetValidWriteIdsResult> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // LOW_WATERMARK_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.lowWatermarkId = iprot.readI64();
-              struct.setLowWatermarkIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // HIGH_WATERMARK_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.highWatermarkId = iprot.readI64();
-              struct.setHighWatermarkIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // ARE_IDS_VALID
-            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
-              struct.areIdsValid = iprot.readBool();
-              struct.setAreIdsValidIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // IDS
-            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
-              {
-                org.apache.thrift.protocol.TList _list642 = iprot.readListBegin();
-                struct.ids = new ArrayList<Long>(_list642.size);
-                long _elem643;
-                for (int _i644 = 0; _i644 < _list642.size; ++_i644)
-                {
-                  _elem643 = iprot.readI64();
-                  struct.ids.add(_elem643);
-                }
-                iprot.readListEnd();
-              }
-              struct.setIdsIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      oprot.writeFieldBegin(LOW_WATERMARK_ID_FIELD_DESC);
-      oprot.writeI64(struct.lowWatermarkId);
-      oprot.writeFieldEnd();
-      oprot.writeFieldBegin(HIGH_WATERMARK_ID_FIELD_DESC);
-      oprot.writeI64(struct.highWatermarkId);
-      oprot.writeFieldEnd();
-      if (struct.isSetAreIdsValid()) {
-        oprot.writeFieldBegin(ARE_IDS_VALID_FIELD_DESC);
-        oprot.writeBool(struct.areIdsValid);
-        oprot.writeFieldEnd();
-      }
-      if (struct.ids != null) {
-        if (struct.isSetIds()) {
-          oprot.writeFieldBegin(IDS_FIELD_DESC);
-          {
-            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.ids.size()));
-            for (long _iter645 : struct.ids)
-            {
-              oprot.writeI64(_iter645);
-            }
-            oprot.writeListEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class GetValidWriteIdsResultTupleSchemeFactory implements SchemeFactory {
-    public GetValidWriteIdsResultTupleScheme getScheme() {
-      return new GetValidWriteIdsResultTupleScheme();
-    }
-  }
-
-  private static class GetValidWriteIdsResultTupleScheme extends TupleScheme<GetValidWriteIdsResult> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeI64(struct.lowWatermarkId);
-      oprot.writeI64(struct.highWatermarkId);
-      BitSet optionals = new BitSet();
-      if (struct.isSetAreIdsValid()) {
-        optionals.set(0);
-      }
-      if (struct.isSetIds()) {
-        optionals.set(1);
-      }
-      oprot.writeBitSet(optionals, 2);
-      if (struct.isSetAreIdsValid()) {
-        oprot.writeBool(struct.areIdsValid);
-      }
-      if (struct.isSetIds()) {
-        {
-          oprot.writeI32(struct.ids.size());
-          for (long _iter646 : struct.ids)
-          {
-            oprot.writeI64(_iter646);
-          }
-        }
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.lowWatermarkId = iprot.readI64();
-      struct.setLowWatermarkIdIsSet(true);
-      struct.highWatermarkId = iprot.readI64();
-      struct.setHighWatermarkIdIsSet(true);
-      BitSet incoming = iprot.readBitSet(2);
-      if (incoming.get(0)) {
-        struct.areIdsValid = iprot.readBool();
-        struct.setAreIdsValidIsSet(true);
-      }
-      if (incoming.get(1)) {
-        {
-          org.apache.thrift.protocol.TList _list647 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-          struct.ids = new ArrayList<Long>(_list647.size);
-          long _elem648;
-          for (int _i649 = 0; _i649 < _list647.size; ++_i649)
-          {
-            _elem648 = iprot.readI64();
-            struct.ids.add(_elem648);
-          }
-        }
-        struct.setIdsIsSet(true);
-      }
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java
deleted file mode 100644
index 0c1849c..0000000
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java
+++ /dev/null
@@ -1,589 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.hadoop.hive.metastore.api;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class HeartbeatWriteIdRequest implements org.apache.thrift.TBase<HeartbeatWriteIdRequest, HeartbeatWriteIdRequest._Fields>, java.io.Serializable, Cloneable, Comparable<HeartbeatWriteIdRequest> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatWriteIdRequest");
-
-  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
-  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)3);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new HeartbeatWriteIdRequestStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new HeartbeatWriteIdRequestTupleSchemeFactory());
-  }
-
-  private String dbName; // required
-  private String tblName; // required
-  private long writeId; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    DB_NAME((short)1, "dbName"),
-    TBL_NAME((short)2, "tblName"),
-    WRITE_ID((short)3, "writeId");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // DB_NAME
-          return DB_NAME;
-        case 2: // TBL_NAME
-          return TBL_NAME;
-        case 3: // WRITE_ID
-          return WRITE_ID;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __WRITEID_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HeartbeatWriteIdRequest.class, metaDataMap);
-  }
-
-  public HeartbeatWriteIdRequest() {
-  }
-
-  public HeartbeatWriteIdRequest(
-    String dbName,
-    String tblName,
-    long writeId)
-  {
-    this();
-    this.dbName = dbName;
-    this.tblName = tblName;
-    this.writeId = writeId;
-    setWriteIdIsSet(true);
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public HeartbeatWriteIdRequest(HeartbeatWriteIdRequest other) {
-    __isset_bitfield = other.__isset_bitfield;
-    if (other.isSetDbName()) {
-      this.dbName = other.dbName;
-    }
-    if (other.isSetTblName()) {
-      this.tblName = other.tblName;
-    }
-    this.writeId = other.writeId;
-  }
-
-  public HeartbeatWriteIdRequest deepCopy() {
-    return new HeartbeatWriteIdRequest(this);
-  }
-
-  @Override
-  public void clear() {
-    this.dbName = null;
-    this.tblName = null;
-    setWriteIdIsSet(false);
-    this.writeId = 0;
-  }
-
-  public String getDbName() {
-    return this.dbName;
-  }
-
-  public void setDbName(String dbName) {
-    this.dbName = dbName;
-  }
-
-  public void unsetDbName() {
-    this.dbName = null;
-  }
-
-  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
-  public boolean isSetDbName() {
-    return this.dbName != null;
-  }
-
-  public void setDbNameIsSet(boolean value) {
-    if (!value) {
-      this.dbName = null;
-    }
-  }
-
-  public String getTblName() {
-    return this.tblName;
-  }
-
-  public void setTblName(String tblName) {
-    this.tblName = tblName;
-  }
-
-  public void unsetTblName() {
-    this.tblName = null;
-  }
-
-  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
-  public boolean isSetTblName() {
-    return this.tblName != null;
-  }
-
-  public void setTblNameIsSet(boolean value) {
-    if (!value) {
-      this.tblName = null;
-    }
-  }
-
-  public long getWriteId() {
-    return this.writeId;
-  }
-
-  public void setWriteId(long writeId) {
-    this.writeId = writeId;
-    setWriteIdIsSet(true);
-  }
-
-  public void unsetWriteId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID);
-  }
-
-  /** Returns true if field writeId is set (has been assigned a value) and false otherwise */
-  public boolean isSetWriteId() {
-    return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID);
-  }
-
-  public void setWriteIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value);
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case DB_NAME:
-      if (value == null) {
-        unsetDbName();
-      } else {
-        setDbName((String)value);
-      }
-      break;
-
-    case TBL_NAME:
-      if (value == null) {
-        unsetTblName();
-      } else {
-        setTblName((String)value);
-      }
-      break;
-
-    case WRITE_ID:
-      if (value == null) {
-        unsetWriteId();
-      } else {
-        setWriteId((Long)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case DB_NAME:
-      return getDbName();
-
-    case TBL_NAME:
-      return getTblName();
-
-    case WRITE_ID:
-      return getWriteId();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case DB_NAME:
-      return isSetDbName();
-    case TBL_NAME:
-      return isSetTblName();
-    case WRITE_ID:
-      return isSetWriteId();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof HeartbeatWriteIdRequest)
-      return this.equals((HeartbeatWriteIdRequest)that);
-    return false;
-  }
-
-  public boolean equals(HeartbeatWriteIdRequest that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_dbName = true && this.isSetDbName();
-    boolean that_present_dbName = true && that.isSetDbName();
-    if (this_present_dbName || that_present_dbName) {
-      if (!(this_present_dbName && that_present_dbName))
-        return false;
-      if (!this.dbName.equals(that.dbName))
-        return false;
-    }
-
-    boolean this_present_tblName = true && this.isSetTblName();
-    boolean that_present_tblName = true && that.isSetTblName();
-    if (this_present_tblName || that_present_tblName) {
-      if (!(this_present_tblName && that_present_tblName))
-        return false;
-      if (!this.tblName.equals(that.tblName))
-        return false;
-    }
-
-    boolean this_present_writeId = true;
-    boolean that_present_writeId = true;
-    if (this_present_writeId || that_present_writeId) {
-      if (!(this_present_writeId && that_present_writeId))
-        return false;
-      if (this.writeId != that.writeId)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_dbName = true && (isSetDbName());
-    list.add(present_dbName);
-    if (present_dbName)
-      list.add(dbName);
-
-    boolean present_tblName = true && (isSetTblName());
-    list.add(present_tblName);
-    if (present_tblName)
-      list.add(tblName);
-
-    boolean present_writeId = true;
-    list.add(present_writeId);
-    if (present_writeId)
-      list.add(writeId);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(HeartbeatWriteIdRequest other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetDbName()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTblName()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetWriteId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("HeartbeatWriteIdRequest(");
-    boolean first = true;
-
-    sb.append("dbName:");
-    if (this.dbName == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.dbName);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("tblName:");
-    if (this.tblName == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.tblName);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("writeId:");
-    sb.append(this.writeId);
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!isSetDbName()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
-    }
-
-    if (!isSetTblName()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
-    }
-
-    if (!isSetWriteId()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class HeartbeatWriteIdRequestStandardSchemeFactory implements SchemeFactory {
-    public HeartbeatWriteIdRequestStandardScheme getScheme() {
-      return new HeartbeatWriteIdRequestStandardScheme();
-    }
-  }
-
-  private static class HeartbeatWriteIdRequestStandardScheme extends StandardScheme<HeartbeatWriteIdRequest> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // DB_NAME
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.dbName = iprot.readString();
-              struct.setDbNameIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // TBL_NAME
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.tblName = iprot.readString();
-              struct.setTblNameIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // WRITE_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.writeId = iprot.readI64();
-              struct.setWriteIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.dbName != null) {
-        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
-        oprot.writeString(struct.dbName);
-        oprot.writeFieldEnd();
-      }
-      if (struct.tblName != null) {
-        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
-        oprot.writeString(struct.tblName);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
-      oprot.writeI64(struct.writeId);
-      oprot.writeFieldEnd();
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class HeartbeatWriteIdRequestTupleSchemeFactory implements SchemeFactory {
-    public HeartbeatWriteIdRequestTupleScheme getScheme() {
-      return new HeartbeatWriteIdRequestTupleScheme();
-    }
-  }
-
-  private static class HeartbeatWriteIdRequestTupleScheme extends TupleScheme<HeartbeatWriteIdRequest> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeString(struct.dbName);
-      oprot.writeString(struct.tblName);
-      oprot.writeI64(struct.writeId);
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.dbName = iprot.readString();
-      struct.setDbNameIsSet(true);
-      struct.tblName = iprot.readString();
-      struct.setTblNameIsSet(true);
-      struct.writeId = iprot.readI64();
-      struct.setWriteIdIsSet(true);
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java
deleted file mode 100644
index ae6f25e..0000000
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java
+++ /dev/null
@@ -1,283 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.hadoop.hive.metastore.api;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class HeartbeatWriteIdResult implements org.apache.thrift.TBase<HeartbeatWriteIdResult, HeartbeatWriteIdResult._Fields>, java.io.Serializable, Cloneable, Comparable<HeartbeatWriteIdResult> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatWriteIdResult");
-
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new HeartbeatWriteIdResultStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new HeartbeatWriteIdResultTupleSchemeFactory());
-  }
-
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-;
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HeartbeatWriteIdResult.class, metaDataMap);
-  }
-
-  public HeartbeatWriteIdResult() {
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public HeartbeatWriteIdResult(HeartbeatWriteIdResult other) {
-  }
-
-  public HeartbeatWriteIdResult deepCopy() {
-    return new HeartbeatWriteIdResult(this);
-  }
-
-  @Override
-  public void clear() {
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof HeartbeatWriteIdResult)
-      return this.equals((HeartbeatWriteIdResult)that);
-    return false;
-  }
-
-  public boolean equals(HeartbeatWriteIdResult that) {
-    if (that == null)
-      return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(HeartbeatWriteIdResult other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("HeartbeatWriteIdResult(");
-    boolean first = true;
-
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class HeartbeatWriteIdResultStandardSchemeFactory implements SchemeFactory {
-    public HeartbeatWriteIdResultStandardScheme getScheme() {
-      return new HeartbeatWriteIdResultStandardScheme();
-    }
-  }
-
-  private static class HeartbeatWriteIdResultStandardScheme extends StandardScheme<HeartbeatWriteIdResult> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class HeartbeatWriteIdResultTupleSchemeFactory implements SchemeFactory {
-    public HeartbeatWriteIdResultTupleScheme getScheme() {
-      return new HeartbeatWriteIdResultTupleScheme();
-    }
-  }
-
-  private static class HeartbeatWriteIdResultTupleScheme extends TupleScheme<HeartbeatWriteIdResult> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
index dc6a5ad..800219f 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
@@ -53,8 +53,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
   private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)13);
   private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14);
   private static final org.apache.thrift.protocol.TField REWRITE_ENABLED_FIELD_DESC = new org.apache.thrift.protocol.TField("rewriteEnabled", org.apache.thrift.protocol.TType.BOOL, (short)15);
-  private static final org.apache.thrift.protocol.TField MM_NEXT_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("mmNextWriteId", org.apache.thrift.protocol.TType.I64, (short)16);
-  private static final org.apache.thrift.protocol.TField MM_WATERMARK_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("mmWatermarkWriteId", org.apache.thrift.protocol.TType.I64, (short)17);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -77,8 +75,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
   private PrincipalPrivilegeSet privileges; // optional
   private boolean temporary; // optional
   private boolean rewriteEnabled; // optional
-  private long mmNextWriteId; // optional
-  private long mmWatermarkWriteId; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -96,9 +92,7 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
     TABLE_TYPE((short)12, "tableType"),
     PRIVILEGES((short)13, "privileges"),
     TEMPORARY((short)14, "temporary"),
-    REWRITE_ENABLED((short)15, "rewriteEnabled"),
-    MM_NEXT_WRITE_ID((short)16, "mmNextWriteId"),
-    MM_WATERMARK_WRITE_ID((short)17, "mmWatermarkWriteId");
+    REWRITE_ENABLED((short)15, "rewriteEnabled");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -143,10 +137,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
           return TEMPORARY;
         case 15: // REWRITE_ENABLED
           return REWRITE_ENABLED;
-        case 16: // MM_NEXT_WRITE_ID
-          return MM_NEXT_WRITE_ID;
-        case 17: // MM_WATERMARK_WRITE_ID
-          return MM_WATERMARK_WRITE_ID;
         default:
           return null;
       }
@@ -192,10 +182,8 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
   private static final int __RETENTION_ISSET_ID = 2;
   private static final int __TEMPORARY_ISSET_ID = 3;
   private static final int __REWRITEENABLED_ISSET_ID = 4;
-  private static final int __MMNEXTWRITEID_ISSET_ID = 5;
-  private static final int __MMWATERMARKWRITEID_ISSET_ID = 6;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.MM_NEXT_WRITE_ID,_Fields.MM_WATERMARK_WRITE_ID};
+  private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -232,10 +220,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
     tmpMap.put(_Fields.REWRITE_ENABLED, new org.apache.thrift.meta_data.FieldMetaData("rewriteEnabled", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    tmpMap.put(_Fields.MM_NEXT_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("mmNextWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.MM_WATERMARK_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("mmWatermarkWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap);
   }
@@ -322,8 +306,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
     }
     this.temporary = other.temporary;
     this.rewriteEnabled = other.rewriteEnabled;
-    this.mmNextWriteId = other.mmNextWriteId;
-    this.mmWatermarkWriteId = other.mmWatermarkWriteId;
   }
 
   public Table deepCopy() {
@@ -352,10 +334,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
 
     setRewriteEnabledIsSet(false);
     this.rewriteEnabled = false;
-    setMmNextWriteIdIsSet(false);
-    this.mmNextWriteId = 0;
-    setMmWatermarkWriteIdIsSet(false);
-    this.mmWatermarkWriteId = 0;
   }
 
   public String getTableName() {
@@ -724,50 +702,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REWRITEENABLED_ISSET_ID, value);
   }
 
-  public long getMmNextWriteId() {
-    return this.mmNextWriteId;
-  }
-
-  public void setMmNextWriteId(long mmNextWriteId) {
-    this.mmNextWriteId = mmNextWriteId;
-    setMmNextWriteIdIsSet(true);
-  }
-
-  public void unsetMmNextWriteId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MMNEXTWRITEID_ISSET_ID);
-  }
-
-  /** Returns true if field mmNextWriteId is set (has been assigned a value) and false otherwise */
-  public boolean isSetMmNextWriteId() {
-    return EncodingUtils.testBit(__isset_bitfield, __MMNEXTWRITEID_ISSET_ID);
-  }
-
-  public void setMmNextWriteIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MMNEXTWRITEID_ISSET_ID, value);
-  }
-
-  public long getMmWatermarkWriteId() {
-    return this.mmWatermarkWriteId;
-  }
-
-  public void setMmWatermarkWriteId(long mmWatermarkWriteId) {
-    this.mmWatermarkWriteId = mmWatermarkWriteId;
-    setMmWatermarkWriteIdIsSet(true);
-  }
-
-  public void unsetMmWatermarkWriteId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MMWATERMARKWRITEID_ISSET_ID);
-  }
-
-  /** Returns true if field mmWatermarkWriteId is set (has been assigned a value) and false otherwise */
-  public boolean isSetMmWatermarkWriteId() {
-    return EncodingUtils.testBit(__isset_bitfield, __MMWATERMARKWRITEID_ISSET_ID);
-  }
-
-  public void setMmWatermarkWriteIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MMWATERMARKWRITEID_ISSET_ID, value);
-  }
-
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case TABLE_NAME:
@@ -890,22 +824,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
       }
       break;
 
-    case MM_NEXT_WRITE_ID:
-      if (value == null) {
-        unsetMmNextWriteId();
-      } else {
-        setMmNextWriteId((Long)value);
-      }
-      break;
-
-    case MM_WATERMARK_WRITE_ID:
-      if (value == null) {
-        unsetMmWatermarkWriteId();
-      } else {
-        setMmWatermarkWriteId((Long)value);
-      }
-      break;
-
     }
   }
 
@@ -956,12 +874,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
     case REWRITE_ENABLED:
       return isRewriteEnabled();
 
-    case MM_NEXT_WRITE_ID:
-      return getMmNextWriteId();
-
-    case MM_WATERMARK_WRITE_ID:
-      return getMmWatermarkWriteId();
-
     }
     throw new IllegalStateException();
   }
@@ -1003,10 +915,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
       return isSetTemporary();
     case REWRITE_ENABLED:
       return isSetRewriteEnabled();
-    case MM_NEXT_WRITE_ID:
-      return isSetMmNextWriteId();
-    case MM_WATERMARK_WRITE_ID:
-      return isSetMmWatermarkWriteId();
     }
     throw new IllegalStateException();
   }
@@ -1159,24 +1067,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
         return false;
     }
 
-    boolean this_present_mmNextWriteId = true && this.isSetMmNextWriteId();
-    boolean that_present_mmNextWriteId = true && that.isSetMmNextWriteId();
-    if (this_present_mmNextWriteId || that_present_mmNextWriteId) {
-      if (!(this_present_mmNextWriteId && that_present_mmNextWriteId))
-        return false;
-      if (this.mmNextWriteId != that.mmNextWriteId)
-        return false;
-    }
-
-    boolean this_present_mmWatermarkWriteId = true && this.isSetMmWatermarkWriteId();
-    boolean that_present_mmWatermarkWriteId = true && that.isSetMmWatermarkWriteId();
-    if (this_present_mmWatermarkWriteId || that_present_mmWatermarkWriteId) {
-      if (!(this_present_mmWatermarkWriteId && that_present_mmWatermarkWriteId))
-        return false;
-      if (this.mmWatermarkWriteId != that.mmWatermarkWriteId)
-        return false;
-    }
-
     return true;
   }
 
@@ -1259,16 +1149,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
     if (present_rewriteEnabled)
       list.add(rewriteEnabled);
 
-    boolean present_mmNextWriteId = true && (isSetMmNextWriteId());
-    list.add(present_mmNextWriteId);
-    if (present_mmNextWriteId)
-      list.add(mmNextWriteId);
-
-    boolean present_mmWatermarkWriteId = true && (isSetMmWatermarkWriteId());
-    list.add(present_mmWatermarkWriteId);
-    if (present_mmWatermarkWriteId)
-      list.add(mmWatermarkWriteId);
-
     return list.hashCode();
   }
 
@@ -1430,26 +1310,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetMmNextWriteId()).compareTo(other.isSetMmNextWriteId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetMmNextWriteId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mmNextWriteId, other.mmNextWriteId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetMmWatermarkWriteId()).compareTo(other.isSetMmWatermarkWriteId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetMmWatermarkWriteId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mmWatermarkWriteId, other.mmWatermarkWriteId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     return 0;
   }
 
@@ -1575,18 +1435,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
       sb.append(this.rewriteEnabled);
       first = false;
     }
-    if (isSetMmNextWriteId()) {
-      if (!first) sb.append(", ");
-      sb.append("mmNextWriteId:");
-      sb.append(this.mmNextWriteId);
-      first = false;
-    }
-    if (isSetMmWatermarkWriteId()) {
-      if (!first) sb.append(", ");
-      sb.append("mmWatermarkWriteId:");
-      sb.append(this.mmWatermarkWriteId);
-      first = false;
-    }
     sb.append(")");
     return sb.toString();
   }
@@ -1783,22 +1631,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 16: // MM_NEXT_WRITE_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.mmNextWriteId = iprot.readI64();
-              struct.setMmNextWriteIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 17: // MM_WATERMARK_WRITE_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.mmWatermarkWriteId = iprot.readI64();
-              struct.setMmWatermarkWriteIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -1898,16 +1730,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
         oprot.writeBool(struct.rewriteEnabled);
         oprot.writeFieldEnd();
       }
-      if (struct.isSetMmNextWriteId()) {
-        oprot.writeFieldBegin(MM_NEXT_WRITE_ID_FIELD_DESC);
-        oprot.writeI64(struct.mmNextWriteId);
-        oprot.writeFieldEnd();
-      }
-      if (struct.isSetMmWatermarkWriteId()) {
-        oprot.writeFieldBegin(MM_WATERMARK_WRITE_ID_FIELD_DESC);
-        oprot.writeI64(struct.mmWatermarkWriteId);
-        oprot.writeFieldEnd();
-      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -1971,13 +1793,7 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
       if (struct.isSetRewriteEnabled()) {
         optionals.set(14);
       }
-      if (struct.isSetMmNextWriteId()) {
-        optionals.set(15);
-      }
-      if (struct.isSetMmWatermarkWriteId()) {
-        optionals.set(16);
-      }
-      oprot.writeBitSet(optionals, 17);
+      oprot.writeBitSet(optionals, 15);
       if (struct.isSetTableName()) {
         oprot.writeString(struct.tableName);
       }
@@ -2036,18 +1852,12 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
       if (struct.isSetRewriteEnabled()) {
         oprot.writeBool(struct.rewriteEnabled);
       }
-      if (struct.isSetMmNextWriteId()) {
-        oprot.writeI64(struct.mmNextWriteId);
-      }
-      if (struct.isSetMmWatermarkWriteId()) {
-        oprot.writeI64(struct.mmWatermarkWriteId);
-      }
     }
 
     @Override
     public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(17);
+      BitSet incoming = iprot.readBitSet(15);
       if (incoming.get(0)) {
         struct.tableName = iprot.readString();
         struct.setTableNameIsSet(true);
@@ -2131,14 +1941,6 @@ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, jav
         struct.rewriteEnabled = iprot.readBool();
         struct.setRewriteEnabledIsSet(true);
       }
-      if (incoming.get(15)) {
-        struct.mmNextWriteId = iprot.readI64();
-        struct.setMmNextWriteIdIsSet(true);
-      }
-      if (incoming.get(16)) {
-        struct.mmWatermarkWriteId = iprot.readI64();
-        struct.setMmWatermarkWriteIdIsSet(true);
-      }
     }
   }
 


[08/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 3d20125..42aaa9c 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -3137,8 +3137,6 @@ class Table:
    - privileges
    - temporary
    - rewriteEnabled
-   - mmNextWriteId
-   - mmWatermarkWriteId
   """
 
   thrift_spec = (
@@ -3158,11 +3156,9 @@ class Table:
     (13, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 13
     (14, TType.BOOL, 'temporary', None, False, ), # 14
     (15, TType.BOOL, 'rewriteEnabled', None, None, ), # 15
-    (16, TType.I64, 'mmNextWriteId', None, None, ), # 16
-    (17, TType.I64, 'mmWatermarkWriteId', None, None, ), # 17
   )
 
-  def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, mmNextWriteId=None, mmWatermarkWriteId=None,):
+  def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None,):
     self.tableName = tableName
     self.dbName = dbName
     self.owner = owner
@@ -3178,8 +3174,6 @@ class Table:
     self.privileges = privileges
     self.temporary = temporary
     self.rewriteEnabled = rewriteEnabled
-    self.mmNextWriteId = mmNextWriteId
-    self.mmWatermarkWriteId = mmWatermarkWriteId
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -3279,16 +3273,6 @@ class Table:
           self.rewriteEnabled = iprot.readBool()
         else:
           iprot.skip(ftype)
-      elif fid == 16:
-        if ftype == TType.I64:
-          self.mmNextWriteId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 17:
-        if ftype == TType.I64:
-          self.mmWatermarkWriteId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -3366,14 +3350,6 @@ class Table:
       oprot.writeFieldBegin('rewriteEnabled', TType.BOOL, 15)
       oprot.writeBool(self.rewriteEnabled)
       oprot.writeFieldEnd()
-    if self.mmNextWriteId is not None:
-      oprot.writeFieldBegin('mmNextWriteId', TType.I64, 16)
-      oprot.writeI64(self.mmNextWriteId)
-      oprot.writeFieldEnd()
-    if self.mmWatermarkWriteId is not None:
-      oprot.writeFieldBegin('mmWatermarkWriteId', TType.I64, 17)
-      oprot.writeI64(self.mmWatermarkWriteId)
-      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -3398,8 +3374,6 @@ class Table:
     value = (value * 31) ^ hash(self.privileges)
     value = (value * 31) ^ hash(self.temporary)
     value = (value * 31) ^ hash(self.rewriteEnabled)
-    value = (value * 31) ^ hash(self.mmNextWriteId)
-    value = (value * 31) ^ hash(self.mmWatermarkWriteId)
     return value
 
   def __repr__(self):
@@ -12455,22 +12429,19 @@ class CacheFileMetadataRequest:
   def __ne__(self, other):
     return not (self == other)
 
-class GetNextWriteIdRequest:
+class GetAllFunctionsResponse:
   """
   Attributes:
-   - dbName
-   - tblName
+   - functions
   """
 
   thrift_spec = (
     None, # 0
-    (1, TType.STRING, 'dbName', None, None, ), # 1
-    (2, TType.STRING, 'tblName', None, None, ), # 2
+    (1, TType.LIST, 'functions', (TType.STRUCT,(Function, Function.thrift_spec)), None, ), # 1
   )
 
-  def __init__(self, dbName=None, tblName=None,):
-    self.dbName = dbName
-    self.tblName = tblName
+  def __init__(self, functions=None,):
+    self.functions = functions
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -12482,13 +12453,14 @@ class GetNextWriteIdRequest:
       if ftype == TType.STOP:
         break
       if fid == 1:
-        if ftype == TType.STRING:
-          self.dbName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.tblName = iprot.readString()
+        if ftype == TType.LIST:
+          self.functions = []
+          (_etype569, _size566) = iprot.readListBegin()
+          for _i570 in xrange(_size566):
+            _elem571 = Function()
+            _elem571.read(iprot)
+            self.functions.append(_elem571)
+          iprot.readListEnd()
         else:
           iprot.skip(ftype)
       else:
@@ -12500,30 +12472,24 @@ class GetNextWriteIdRequest:
     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
       return
-    oprot.writeStructBegin('GetNextWriteIdRequest')
-    if self.dbName is not None:
-      oprot.writeFieldBegin('dbName', TType.STRING, 1)
-      oprot.writeString(self.dbName)
-      oprot.writeFieldEnd()
-    if self.tblName is not None:
-      oprot.writeFieldBegin('tblName', TType.STRING, 2)
-      oprot.writeString(self.tblName)
+    oprot.writeStructBegin('GetAllFunctionsResponse')
+    if self.functions is not None:
+      oprot.writeFieldBegin('functions', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRUCT, len(self.functions))
+      for iter572 in self.functions:
+        iter572.write(oprot)
+      oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
   def validate(self):
-    if self.dbName is None:
-      raise TProtocol.TProtocolException(message='Required field dbName is unset!')
-    if self.tblName is None:
-      raise TProtocol.TProtocolException(message='Required field tblName is unset!')
     return
 
 
   def __hash__(self):
     value = 17
-    value = (value * 31) ^ hash(self.dbName)
-    value = (value * 31) ^ hash(self.tblName)
+    value = (value * 31) ^ hash(self.functions)
     return value
 
   def __repr__(self):
@@ -12537,19 +12503,19 @@ class GetNextWriteIdRequest:
   def __ne__(self, other):
     return not (self == other)
 
-class GetNextWriteIdResult:
+class ClientCapabilities:
   """
   Attributes:
-   - writeId
+   - values
   """
 
   thrift_spec = (
     None, # 0
-    (1, TType.I64, 'writeId', None, None, ), # 1
+    (1, TType.LIST, 'values', (TType.I32,None), None, ), # 1
   )
 
-  def __init__(self, writeId=None,):
-    self.writeId = writeId
+  def __init__(self, values=None,):
+    self.values = values
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -12561,8 +12527,13 @@ class GetNextWriteIdResult:
       if ftype == TType.STOP:
         break
       if fid == 1:
-        if ftype == TType.I64:
-          self.writeId = iprot.readI64()
+        if ftype == TType.LIST:
+          self.values = []
+          (_etype576, _size573) = iprot.readListBegin()
+          for _i577 in xrange(_size573):
+            _elem578 = iprot.readI32()
+            self.values.append(_elem578)
+          iprot.readListEnd()
         else:
           iprot.skip(ftype)
       else:
@@ -12574,23 +12545,26 @@ class GetNextWriteIdResult:
     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
       return
-    oprot.writeStructBegin('GetNextWriteIdResult')
-    if self.writeId is not None:
-      oprot.writeFieldBegin('writeId', TType.I64, 1)
-      oprot.writeI64(self.writeId)
+    oprot.writeStructBegin('ClientCapabilities')
+    if self.values is not None:
+      oprot.writeFieldBegin('values', TType.LIST, 1)
+      oprot.writeListBegin(TType.I32, len(self.values))
+      for iter579 in self.values:
+        oprot.writeI32(iter579)
+      oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
   def validate(self):
-    if self.writeId is None:
-      raise TProtocol.TProtocolException(message='Required field writeId is unset!')
+    if self.values is None:
+      raise TProtocol.TProtocolException(message='Required field values is unset!')
     return
 
 
   def __hash__(self):
     value = 17
-    value = (value * 31) ^ hash(self.writeId)
+    value = (value * 31) ^ hash(self.values)
     return value
 
   def __repr__(self):
@@ -12604,28 +12578,25 @@ class GetNextWriteIdResult:
   def __ne__(self, other):
     return not (self == other)
 
-class FinalizeWriteIdRequest:
+class GetTableRequest:
   """
   Attributes:
    - dbName
    - tblName
-   - writeId
-   - commit
+   - capabilities
   """
 
   thrift_spec = (
     None, # 0
     (1, TType.STRING, 'dbName', None, None, ), # 1
     (2, TType.STRING, 'tblName', None, None, ), # 2
-    (3, TType.I64, 'writeId', None, None, ), # 3
-    (4, TType.BOOL, 'commit', None, None, ), # 4
+    (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3
   )
 
-  def __init__(self, dbName=None, tblName=None, writeId=None, commit=None,):
+  def __init__(self, dbName=None, tblName=None, capabilities=None,):
     self.dbName = dbName
     self.tblName = tblName
-    self.writeId = writeId
-    self.commit = commit
+    self.capabilities = capabilities
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -12647,13 +12618,9 @@ class FinalizeWriteIdRequest:
         else:
           iprot.skip(ftype)
       elif fid == 3:
-        if ftype == TType.I64:
-          self.writeId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.BOOL:
-          self.commit = iprot.readBool()
+        if ftype == TType.STRUCT:
+          self.capabilities = ClientCapabilities()
+          self.capabilities.read(iprot)
         else:
           iprot.skip(ftype)
       else:
@@ -12665,7 +12632,7 @@ class FinalizeWriteIdRequest:
     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
       return
-    oprot.writeStructBegin('FinalizeWriteIdRequest')
+    oprot.writeStructBegin('GetTableRequest')
     if self.dbName is not None:
       oprot.writeFieldBegin('dbName', TType.STRING, 1)
       oprot.writeString(self.dbName)
@@ -12674,13 +12641,9 @@ class FinalizeWriteIdRequest:
       oprot.writeFieldBegin('tblName', TType.STRING, 2)
       oprot.writeString(self.tblName)
       oprot.writeFieldEnd()
-    if self.writeId is not None:
-      oprot.writeFieldBegin('writeId', TType.I64, 3)
-      oprot.writeI64(self.writeId)
-      oprot.writeFieldEnd()
-    if self.commit is not None:
-      oprot.writeFieldBegin('commit', TType.BOOL, 4)
-      oprot.writeBool(self.commit)
+    if self.capabilities is not None:
+      oprot.writeFieldBegin('capabilities', TType.STRUCT, 3)
+      self.capabilities.write(oprot)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
@@ -12690,10 +12653,6 @@ class FinalizeWriteIdRequest:
       raise TProtocol.TProtocolException(message='Required field dbName is unset!')
     if self.tblName is None:
       raise TProtocol.TProtocolException(message='Required field tblName is unset!')
-    if self.writeId is None:
-      raise TProtocol.TProtocolException(message='Required field writeId is unset!')
-    if self.commit is None:
-      raise TProtocol.TProtocolException(message='Required field commit is unset!')
     return
 
 
@@ -12701,8 +12660,7 @@ class FinalizeWriteIdRequest:
     value = 17
     value = (value * 31) ^ hash(self.dbName)
     value = (value * 31) ^ hash(self.tblName)
-    value = (value * 31) ^ hash(self.writeId)
-    value = (value * 31) ^ hash(self.commit)
+    value = (value * 31) ^ hash(self.capabilities)
     return value
 
   def __repr__(self):
@@ -12716,11 +12674,20 @@ class FinalizeWriteIdRequest:
   def __ne__(self, other):
     return not (self == other)
 
-class FinalizeWriteIdResult:
+class GetTableResult:
+  """
+  Attributes:
+   - table
+  """
 
   thrift_spec = (
+    None, # 0
+    (1, TType.STRUCT, 'table', (Table, Table.thrift_spec), None, ), # 1
   )
 
+  def __init__(self, table=None,):
+    self.table = table
+
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
@@ -12730,6 +12697,12 @@ class FinalizeWriteIdResult:
       (fname, ftype, fid) = iprot.readFieldBegin()
       if ftype == TType.STOP:
         break
+      if fid == 1:
+        if ftype == TType.STRUCT:
+          self.table = Table()
+          self.table.read(iprot)
+        else:
+          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -12739,16 +12712,23 @@ class FinalizeWriteIdResult:
     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
       return
-    oprot.writeStructBegin('FinalizeWriteIdResult')
+    oprot.writeStructBegin('GetTableResult')
+    if self.table is not None:
+      oprot.writeFieldBegin('table', TType.STRUCT, 1)
+      self.table.write(oprot)
+      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
   def validate(self):
+    if self.table is None:
+      raise TProtocol.TProtocolException(message='Required field table is unset!')
     return
 
 
   def __hash__(self):
     value = 17
+    value = (value * 31) ^ hash(self.table)
     return value
 
   def __repr__(self):
@@ -12762,25 +12742,25 @@ class FinalizeWriteIdResult:
   def __ne__(self, other):
     return not (self == other)
 
-class HeartbeatWriteIdRequest:
+class GetTablesRequest:
   """
   Attributes:
    - dbName
-   - tblName
-   - writeId
+   - tblNames
+   - capabilities
   """
 
   thrift_spec = (
     None, # 0
     (1, TType.STRING, 'dbName', None, None, ), # 1
-    (2, TType.STRING, 'tblName', None, None, ), # 2
-    (3, TType.I64, 'writeId', None, None, ), # 3
+    (2, TType.LIST, 'tblNames', (TType.STRING,None), None, ), # 2
+    (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3
   )
 
-  def __init__(self, dbName=None, tblName=None, writeId=None,):
+  def __init__(self, dbName=None, tblNames=None, capabilities=None,):
     self.dbName = dbName
-    self.tblName = tblName
-    self.writeId = writeId
+    self.tblNames = tblNames
+    self.capabilities = capabilities
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -12797,13 +12777,19 @@ class HeartbeatWriteIdRequest:
         else:
           iprot.skip(ftype)
       elif fid == 2:
-        if ftype == TType.STRING:
-          self.tblName = iprot.readString()
+        if ftype == TType.LIST:
+          self.tblNames = []
+          (_etype583, _size580) = iprot.readListBegin()
+          for _i584 in xrange(_size580):
+            _elem585 = iprot.readString()
+            self.tblNames.append(_elem585)
+          iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
-        if ftype == TType.I64:
-          self.writeId = iprot.readI64()
+        if ftype == TType.STRUCT:
+          self.capabilities = ClientCapabilities()
+          self.capabilities.read(iprot)
         else:
           iprot.skip(ftype)
       else:
@@ -12815,18 +12801,21 @@ class HeartbeatWriteIdRequest:
     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
       return
-    oprot.writeStructBegin('HeartbeatWriteIdRequest')
+    oprot.writeStructBegin('GetTablesRequest')
     if self.dbName is not None:
       oprot.writeFieldBegin('dbName', TType.STRING, 1)
       oprot.writeString(self.dbName)
       oprot.writeFieldEnd()
-    if self.tblName is not None:
-      oprot.writeFieldBegin('tblName', TType.STRING, 2)
-      oprot.writeString(self.tblName)
+    if self.tblNames is not None:
+      oprot.writeFieldBegin('tblNames', TType.LIST, 2)
+      oprot.writeListBegin(TType.STRING, len(self.tblNames))
+      for iter586 in self.tblNames:
+        oprot.writeString(iter586)
+      oprot.writeListEnd()
       oprot.writeFieldEnd()
-    if self.writeId is not None:
-      oprot.writeFieldBegin('writeId', TType.I64, 3)
-      oprot.writeI64(self.writeId)
+    if self.capabilities is not None:
+      oprot.writeFieldBegin('capabilities', TType.STRUCT, 3)
+      self.capabilities.write(oprot)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
@@ -12834,64 +12823,14 @@ class HeartbeatWriteIdRequest:
   def validate(self):
     if self.dbName is None:
       raise TProtocol.TProtocolException(message='Required field dbName is unset!')
-    if self.tblName is None:
-      raise TProtocol.TProtocolException(message='Required field tblName is unset!')
-    if self.writeId is None:
-      raise TProtocol.TProtocolException(message='Required field writeId is unset!')
     return
 
 
   def __hash__(self):
     value = 17
     value = (value * 31) ^ hash(self.dbName)
-    value = (value * 31) ^ hash(self.tblName)
-    value = (value * 31) ^ hash(self.writeId)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class HeartbeatWriteIdResult:
-
-  thrift_spec = (
-  )
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('HeartbeatWriteIdResult')
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __hash__(self):
-    value = 17
+    value = (value * 31) ^ hash(self.tblNames)
+    value = (value * 31) ^ hash(self.capabilities)
     return value
 
   def __repr__(self):
@@ -12905,291 +12844,19 @@ class HeartbeatWriteIdResult:
   def __ne__(self, other):
     return not (self == other)
 
-class GetValidWriteIdsRequest:
+class GetTablesResult:
   """
   Attributes:
-   - dbName
-   - tblName
+   - tables
   """
 
   thrift_spec = (
     None, # 0
-    (1, TType.STRING, 'dbName', None, None, ), # 1
-    (2, TType.STRING, 'tblName', None, None, ), # 2
+    (1, TType.LIST, 'tables', (TType.STRUCT,(Table, Table.thrift_spec)), None, ), # 1
   )
 
-  def __init__(self, dbName=None, tblName=None,):
-    self.dbName = dbName
-    self.tblName = tblName
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.dbName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.tblName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('GetValidWriteIdsRequest')
-    if self.dbName is not None:
-      oprot.writeFieldBegin('dbName', TType.STRING, 1)
-      oprot.writeString(self.dbName)
-      oprot.writeFieldEnd()
-    if self.tblName is not None:
-      oprot.writeFieldBegin('tblName', TType.STRING, 2)
-      oprot.writeString(self.tblName)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.dbName is None:
-      raise TProtocol.TProtocolException(message='Required field dbName is unset!')
-    if self.tblName is None:
-      raise TProtocol.TProtocolException(message='Required field tblName is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.dbName)
-    value = (value * 31) ^ hash(self.tblName)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class GetValidWriteIdsResult:
-  """
-  Attributes:
-   - lowWatermarkId
-   - highWatermarkId
-   - areIdsValid
-   - ids
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.I64, 'lowWatermarkId', None, None, ), # 1
-    (2, TType.I64, 'highWatermarkId', None, None, ), # 2
-    (3, TType.BOOL, 'areIdsValid', None, None, ), # 3
-    (4, TType.LIST, 'ids', (TType.I64,None), None, ), # 4
-  )
-
-  def __init__(self, lowWatermarkId=None, highWatermarkId=None, areIdsValid=None, ids=None,):
-    self.lowWatermarkId = lowWatermarkId
-    self.highWatermarkId = highWatermarkId
-    self.areIdsValid = areIdsValid
-    self.ids = ids
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.I64:
-          self.lowWatermarkId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.I64:
-          self.highWatermarkId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.BOOL:
-          self.areIdsValid = iprot.readBool()
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.LIST:
-          self.ids = []
-          (_etype569, _size566) = iprot.readListBegin()
-          for _i570 in xrange(_size566):
-            _elem571 = iprot.readI64()
-            self.ids.append(_elem571)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('GetValidWriteIdsResult')
-    if self.lowWatermarkId is not None:
-      oprot.writeFieldBegin('lowWatermarkId', TType.I64, 1)
-      oprot.writeI64(self.lowWatermarkId)
-      oprot.writeFieldEnd()
-    if self.highWatermarkId is not None:
-      oprot.writeFieldBegin('highWatermarkId', TType.I64, 2)
-      oprot.writeI64(self.highWatermarkId)
-      oprot.writeFieldEnd()
-    if self.areIdsValid is not None:
-      oprot.writeFieldBegin('areIdsValid', TType.BOOL, 3)
-      oprot.writeBool(self.areIdsValid)
-      oprot.writeFieldEnd()
-    if self.ids is not None:
-      oprot.writeFieldBegin('ids', TType.LIST, 4)
-      oprot.writeListBegin(TType.I64, len(self.ids))
-      for iter572 in self.ids:
-        oprot.writeI64(iter572)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.lowWatermarkId is None:
-      raise TProtocol.TProtocolException(message='Required field lowWatermarkId is unset!')
-    if self.highWatermarkId is None:
-      raise TProtocol.TProtocolException(message='Required field highWatermarkId is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.lowWatermarkId)
-    value = (value * 31) ^ hash(self.highWatermarkId)
-    value = (value * 31) ^ hash(self.areIdsValid)
-    value = (value * 31) ^ hash(self.ids)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class GetAllFunctionsResponse:
-  """
-  Attributes:
-   - functions
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.LIST, 'functions', (TType.STRUCT,(Function, Function.thrift_spec)), None, ), # 1
-  )
-
-  def __init__(self, functions=None,):
-    self.functions = functions
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.LIST:
-          self.functions = []
-          (_etype576, _size573) = iprot.readListBegin()
-          for _i577 in xrange(_size573):
-            _elem578 = Function()
-            _elem578.read(iprot)
-            self.functions.append(_elem578)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('GetAllFunctionsResponse')
-    if self.functions is not None:
-      oprot.writeFieldBegin('functions', TType.LIST, 1)
-      oprot.writeListBegin(TType.STRUCT, len(self.functions))
-      for iter579 in self.functions:
-        iter579.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.functions)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class ClientCapabilities:
-  """
-  Attributes:
-   - values
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.LIST, 'values', (TType.I32,None), None, ), # 1
-  )
-
-  def __init__(self, values=None,):
-    self.values = values
+  def __init__(self, tables=None,):
+    self.tables = tables
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -13202,353 +12869,12 @@ class ClientCapabilities:
         break
       if fid == 1:
         if ftype == TType.LIST:
-          self.values = []
-          (_etype583, _size580) = iprot.readListBegin()
-          for _i584 in xrange(_size580):
-            _elem585 = iprot.readI32()
-            self.values.append(_elem585)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('ClientCapabilities')
-    if self.values is not None:
-      oprot.writeFieldBegin('values', TType.LIST, 1)
-      oprot.writeListBegin(TType.I32, len(self.values))
-      for iter586 in self.values:
-        oprot.writeI32(iter586)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.values is None:
-      raise TProtocol.TProtocolException(message='Required field values is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.values)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class GetTableRequest:
-  """
-  Attributes:
-   - dbName
-   - tblName
-   - capabilities
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'dbName', None, None, ), # 1
-    (2, TType.STRING, 'tblName', None, None, ), # 2
-    (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3
-  )
-
-  def __init__(self, dbName=None, tblName=None, capabilities=None,):
-    self.dbName = dbName
-    self.tblName = tblName
-    self.capabilities = capabilities
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.dbName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.tblName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRUCT:
-          self.capabilities = ClientCapabilities()
-          self.capabilities.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('GetTableRequest')
-    if self.dbName is not None:
-      oprot.writeFieldBegin('dbName', TType.STRING, 1)
-      oprot.writeString(self.dbName)
-      oprot.writeFieldEnd()
-    if self.tblName is not None:
-      oprot.writeFieldBegin('tblName', TType.STRING, 2)
-      oprot.writeString(self.tblName)
-      oprot.writeFieldEnd()
-    if self.capabilities is not None:
-      oprot.writeFieldBegin('capabilities', TType.STRUCT, 3)
-      self.capabilities.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.dbName is None:
-      raise TProtocol.TProtocolException(message='Required field dbName is unset!')
-    if self.tblName is None:
-      raise TProtocol.TProtocolException(message='Required field tblName is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.dbName)
-    value = (value * 31) ^ hash(self.tblName)
-    value = (value * 31) ^ hash(self.capabilities)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class GetTableResult:
-  """
-  Attributes:
-   - table
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRUCT, 'table', (Table, Table.thrift_spec), None, ), # 1
-  )
-
-  def __init__(self, table=None,):
-    self.table = table
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRUCT:
-          self.table = Table()
-          self.table.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('GetTableResult')
-    if self.table is not None:
-      oprot.writeFieldBegin('table', TType.STRUCT, 1)
-      self.table.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.table is None:
-      raise TProtocol.TProtocolException(message='Required field table is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.table)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class GetTablesRequest:
-  """
-  Attributes:
-   - dbName
-   - tblNames
-   - capabilities
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'dbName', None, None, ), # 1
-    (2, TType.LIST, 'tblNames', (TType.STRING,None), None, ), # 2
-    (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3
-  )
-
-  def __init__(self, dbName=None, tblNames=None, capabilities=None,):
-    self.dbName = dbName
-    self.tblNames = tblNames
-    self.capabilities = capabilities
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.dbName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.LIST:
-          self.tblNames = []
+          self.tables = []
           (_etype590, _size587) = iprot.readListBegin()
           for _i591 in xrange(_size587):
-            _elem592 = iprot.readString()
-            self.tblNames.append(_elem592)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRUCT:
-          self.capabilities = ClientCapabilities()
-          self.capabilities.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('GetTablesRequest')
-    if self.dbName is not None:
-      oprot.writeFieldBegin('dbName', TType.STRING, 1)
-      oprot.writeString(self.dbName)
-      oprot.writeFieldEnd()
-    if self.tblNames is not None:
-      oprot.writeFieldBegin('tblNames', TType.LIST, 2)
-      oprot.writeListBegin(TType.STRING, len(self.tblNames))
-      for iter593 in self.tblNames:
-        oprot.writeString(iter593)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    if self.capabilities is not None:
-      oprot.writeFieldBegin('capabilities', TType.STRUCT, 3)
-      self.capabilities.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.dbName is None:
-      raise TProtocol.TProtocolException(message='Required field dbName is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.dbName)
-    value = (value * 31) ^ hash(self.tblNames)
-    value = (value * 31) ^ hash(self.capabilities)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class GetTablesResult:
-  """
-  Attributes:
-   - tables
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.LIST, 'tables', (TType.STRUCT,(Table, Table.thrift_spec)), None, ), # 1
-  )
-
-  def __init__(self, tables=None,):
-    self.tables = tables
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.LIST:
-          self.tables = []
-          (_etype597, _size594) = iprot.readListBegin()
-          for _i598 in xrange(_size594):
-            _elem599 = Table()
-            _elem599.read(iprot)
-            self.tables.append(_elem599)
+            _elem592 = Table()
+            _elem592.read(iprot)
+            self.tables.append(_elem592)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13565,8 +12891,8 @@ class GetTablesResult:
     if self.tables is not None:
       oprot.writeFieldBegin('tables', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.tables))
-      for iter600 in self.tables:
-        iter600.write(oprot)
+      for iter593 in self.tables:
+        iter593.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 5e18f9b..f411dfa 100644
--- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -736,8 +736,6 @@ class Table
   PRIVILEGES = 13
   TEMPORARY = 14
   REWRITEENABLED = 15
-  MMNEXTWRITEID = 16
-  MMWATERMARKWRITEID = 17
 
   FIELDS = {
     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
@@ -754,9 +752,7 @@ class Table
     TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'},
     PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true},
     TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true},
-    REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled', :optional => true},
-    MMNEXTWRITEID => {:type => ::Thrift::Types::I64, :name => 'mmNextWriteId', :optional => true},
-    MMWATERMARKWRITEID => {:type => ::Thrift::Types::I64, :name => 'mmWatermarkWriteId', :optional => true}
+    REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled', :optional => true}
   }
 
   def struct_fields; FIELDS; end
@@ -2809,166 +2805,6 @@ class CacheFileMetadataRequest
   ::Thrift::Struct.generate_accessors self
 end
 
-class GetNextWriteIdRequest
-  include ::Thrift::Struct, ::Thrift::Struct_Union
-  DBNAME = 1
-  TBLNAME = 2
-
-  FIELDS = {
-    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
-    TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}
-  }
-
-  def struct_fields; FIELDS; end
-
-  def validate
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
-  end
-
-  ::Thrift::Struct.generate_accessors self
-end
-
-class GetNextWriteIdResult
-  include ::Thrift::Struct, ::Thrift::Struct_Union
-  WRITEID = 1
-
-  FIELDS = {
-    WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'}
-  }
-
-  def struct_fields; FIELDS; end
-
-  def validate
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId
-  end
-
-  ::Thrift::Struct.generate_accessors self
-end
-
-class FinalizeWriteIdRequest
-  include ::Thrift::Struct, ::Thrift::Struct_Union
-  DBNAME = 1
-  TBLNAME = 2
-  WRITEID = 3
-  COMMIT = 4
-
-  FIELDS = {
-    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
-    TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
-    WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'},
-    COMMIT => {:type => ::Thrift::Types::BOOL, :name => 'commit'}
-  }
-
-  def struct_fields; FIELDS; end
-
-  def validate
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field commit is unset!') if @commit.nil?
-  end
-
-  ::Thrift::Struct.generate_accessors self
-end
-
-class FinalizeWriteIdResult
-  include ::Thrift::Struct, ::Thrift::Struct_Union
-
-  FIELDS = {
-
-  }
-
-  def struct_fields; FIELDS; end
-
-  def validate
-  end
-
-  ::Thrift::Struct.generate_accessors self
-end
-
-class HeartbeatWriteIdRequest
-  include ::Thrift::Struct, ::Thrift::Struct_Union
-  DBNAME = 1
-  TBLNAME = 2
-  WRITEID = 3
-
-  FIELDS = {
-    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
-    TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
-    WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'}
-  }
-
-  def struct_fields; FIELDS; end
-
-  def validate
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId
-  end
-
-  ::Thrift::Struct.generate_accessors self
-end
-
-class HeartbeatWriteIdResult
-  include ::Thrift::Struct, ::Thrift::Struct_Union
-
-  FIELDS = {
-
-  }
-
-  def struct_fields; FIELDS; end
-
-  def validate
-  end
-
-  ::Thrift::Struct.generate_accessors self
-end
-
-class GetValidWriteIdsRequest
-  include ::Thrift::Struct, ::Thrift::Struct_Union
-  DBNAME = 1
-  TBLNAME = 2
-
-  FIELDS = {
-    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
-    TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}
-  }
-
-  def struct_fields; FIELDS; end
-
-  def validate
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
-  end
-
-  ::Thrift::Struct.generate_accessors self
-end
-
-class GetValidWriteIdsResult
-  include ::Thrift::Struct, ::Thrift::Struct_Union
-  LOWWATERMARKID = 1
-  HIGHWATERMARKID = 2
-  AREIDSVALID = 3
-  IDS = 4
-
-  FIELDS = {
-    LOWWATERMARKID => {:type => ::Thrift::Types::I64, :name => 'lowWatermarkId'},
-    HIGHWATERMARKID => {:type => ::Thrift::Types::I64, :name => 'highWatermarkId'},
-    AREIDSVALID => {:type => ::Thrift::Types::BOOL, :name => 'areIdsValid', :optional => true},
-    IDS => {:type => ::Thrift::Types::LIST, :name => 'ids', :element => {:type => ::Thrift::Types::I64}, :optional => true}
-  }
-
-  def struct_fields; FIELDS; end
-
-  def validate
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field lowWatermarkId is unset!') unless @lowWatermarkId
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field highWatermarkId is unset!') unless @highWatermarkId
-  end
-
-  ::Thrift::Struct.generate_accessors self
-end
-
 class GetAllFunctionsResponse
   include ::Thrift::Struct, ::Thrift::Struct_Union
   FUNCTIONS = 1

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index 36be2e8..04e63f3 100644
--- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -2562,66 +2562,6 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'cache_file_metadata failed: unknown result')
     end
 
-    def get_next_write_id(req)
-      send_get_next_write_id(req)
-      return recv_get_next_write_id()
-    end
-
-    def send_get_next_write_id(req)
-      send_message('get_next_write_id', Get_next_write_id_args, :req => req)
-    end
-
-    def recv_get_next_write_id()
-      result = receive_message(Get_next_write_id_result)
-      return result.success unless result.success.nil?
-      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_next_write_id failed: unknown result')
-    end
-
-    def finalize_write_id(req)
-      send_finalize_write_id(req)
-      return recv_finalize_write_id()
-    end
-
-    def send_finalize_write_id(req)
-      send_message('finalize_write_id', Finalize_write_id_args, :req => req)
-    end
-
-    def recv_finalize_write_id()
-      result = receive_message(Finalize_write_id_result)
-      return result.success unless result.success.nil?
-      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'finalize_write_id failed: unknown result')
-    end
-
-    def heartbeat_write_id(req)
-      send_heartbeat_write_id(req)
-      return recv_heartbeat_write_id()
-    end
-
-    def send_heartbeat_write_id(req)
-      send_message('heartbeat_write_id', Heartbeat_write_id_args, :req => req)
-    end
-
-    def recv_heartbeat_write_id()
-      result = receive_message(Heartbeat_write_id_result)
-      return result.success unless result.success.nil?
-      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'heartbeat_write_id failed: unknown result')
-    end
-
-    def get_valid_write_ids(req)
-      send_get_valid_write_ids(req)
-      return recv_get_valid_write_ids()
-    end
-
-    def send_get_valid_write_ids(req)
-      send_message('get_valid_write_ids', Get_valid_write_ids_args, :req => req)
-    end
-
-    def recv_get_valid_write_ids()
-      result = receive_message(Get_valid_write_ids_result)
-      return result.success unless result.success.nil?
-      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_valid_write_ids failed: unknown result')
-    end
-
   end
 
   class Processor < ::FacebookService::Processor 
@@ -4517,34 +4457,6 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'cache_file_metadata', seqid)
     end
 
-    def process_get_next_write_id(seqid, iprot, oprot)
-      args = read_args(iprot, Get_next_write_id_args)
-      result = Get_next_write_id_result.new()
-      result.success = @handler.get_next_write_id(args.req)
-      write_result(result, oprot, 'get_next_write_id', seqid)
-    end
-
-    def process_finalize_write_id(seqid, iprot, oprot)
-      args = read_args(iprot, Finalize_write_id_args)
-      result = Finalize_write_id_result.new()
-      result.success = @handler.finalize_write_id(args.req)
-      write_result(result, oprot, 'finalize_write_id', seqid)
-    end
-
-    def process_heartbeat_write_id(seqid, iprot, oprot)
-      args = read_args(iprot, Heartbeat_write_id_args)
-      result = Heartbeat_write_id_result.new()
-      result.success = @handler.heartbeat_write_id(args.req)
-      write_result(result, oprot, 'heartbeat_write_id', seqid)
-    end
-
-    def process_get_valid_write_ids(seqid, iprot, oprot)
-      args = read_args(iprot, Get_valid_write_ids_args)
-      result = Get_valid_write_ids_result.new()
-      result.success = @handler.get_valid_write_ids(args.req)
-      write_result(result, oprot, 'get_valid_write_ids', seqid)
-    end
-
   end
 
   # HELPER FUNCTIONS AND STRUCTURES
@@ -10319,133 +10231,5 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
-  class Get_next_write_id_args
-    include ::Thrift::Struct, ::Thrift::Struct_Union
-    REQ = 1
-
-    FIELDS = {
-      REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::GetNextWriteIdRequest}
-    }
-
-    def struct_fields; FIELDS; end
-
-    def validate
-    end
-
-    ::Thrift::Struct.generate_accessors self
-  end
-
-  class Get_next_write_id_result
-    include ::Thrift::Struct, ::Thrift::Struct_Union
-    SUCCESS = 0
-
-    FIELDS = {
-      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetNextWriteIdResult}
-    }
-
-    def struct_fields; FIELDS; end
-
-    def validate
-    end
-
-    ::Thrift::Struct.generate_accessors self
-  end
-
-  class Finalize_write_id_args
-    include ::Thrift::Struct, ::Thrift::Struct_Union
-    REQ = 1
-
-    FIELDS = {
-      REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::FinalizeWriteIdRequest}
-    }
-
-    def struct_fields; FIELDS; end
-
-    def validate
-    end
-
-    ::Thrift::Struct.generate_accessors self
-  end
-
-  class Finalize_write_id_result
-    include ::Thrift::Struct, ::Thrift::Struct_Union
-    SUCCESS = 0
-
-    FIELDS = {
-      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::FinalizeWriteIdResult}
-    }
-
-    def struct_fields; FIELDS; end
-
-    def validate
-    end
-
-    ::Thrift::Struct.generate_accessors self
-  end
-
-  class Heartbeat_write_id_args
-    include ::Thrift::Struct, ::Thrift::Struct_Union
-    REQ = 1
-
-    FIELDS = {
-      REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::HeartbeatWriteIdRequest}
-    }
-
-    def struct_fields; FIELDS; end
-
-    def validate
-    end
-
-    ::Thrift::Struct.generate_accessors self
-  end
-
-  class Heartbeat_write_id_result
-    include ::Thrift::Struct, ::Thrift::Struct_Union
-    SUCCESS = 0
-
-    FIELDS = {
-      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::HeartbeatWriteIdResult}
-    }
-
-    def struct_fields; FIELDS; end
-
-    def validate
-    end
-
-    ::Thrift::Struct.generate_accessors self
-  end
-
-  class Get_valid_write_ids_args
-    include ::Thrift::Struct, ::Thrift::Struct_Union
-    REQ = 1
-
-    FIELDS = {
-      REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::GetValidWriteIdsRequest}
-    }
-
-    def struct_fields; FIELDS; end
-
-    def validate
-    end
-
-    ::Thrift::Struct.generate_accessors self
-  end
-
-  class Get_valid_write_ids_result
-    include ::Thrift::Struct, ::Thrift::Struct_Union
-    SUCCESS = 0
-
-    FIELDS = {
-      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetValidWriteIdsResult}
-    }
-
-    def struct_fields; FIELDS; end
-
-    def validate
-    end
-
-    ::Thrift::Struct.generate_accessors self
-  end
-
 end
 

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index ff3505a..504946a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -41,7 +41,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
-import java.util.Random;
 import java.util.Set;
 import java.util.Timer;
 import java.util.concurrent.Callable;
@@ -83,7 +82,6 @@ import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.io.HdfsUtils;
-import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
@@ -120,7 +118,6 @@ import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
 import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
@@ -157,9 +154,7 @@ import org.slf4j.LoggerFactory;
 
 
 import static org.apache.commons.lang.StringUtils.join;
-import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT;
-import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
-import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName;
+
 import com.facebook.fb303.FacebookBase;
 import com.facebook.fb303.fb_status;
 import com.google.common.annotations.VisibleForTesting;
@@ -6915,216 +6910,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         throw newMetaException(e);
       }
     }
-
-    private final Random random = new Random();
-    @Override
-    public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws TException {
-      RawStore ms = getMS();
-      String dbName = HiveStringUtils.normalizeIdentifier(req.getDbName()),
-          tblName = HiveStringUtils.normalizeIdentifier(req.getTblName());
-      startFunction("get_next_write_id", " : db=" + dbName + " tbl=" + tblName);
-      Exception exception = null;
-      long writeId = -1;
-      try {
-        int deadlockTryCount = 10;
-        int deadlockRetryBackoffMs = 200;
-        while (deadlockTryCount > 0) {
-          boolean ok = false;
-          ms.openTransaction();
-          try {
-            Table tbl = ms.getTable(dbName, tblName);
-            if (tbl == null) {
-              throw new NoSuchObjectException(dbName + "." + tblName);
-            }
-            writeId = tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : 0;
-            tbl.setMmNextWriteId(writeId + 1);
-            ms.alterTable(dbName, tblName, tbl);
-
-            ok = true;
-          } finally {
-            if (!ok) {
-              ms.rollbackTransaction();
-              // Exception should propagate; don't override it by breaking out of the loop.
-            } else {
-              Boolean commitResult = ms.commitTransactionExpectDeadlock();
-              if (commitResult != null) {
-                if (commitResult) break; // Assume no exception; ok to break out of the loop.
-                throw new MetaException("Failed to commit");
-              }
-            }
-          }
-          LOG.warn("Getting the next write ID failed due to a deadlock; retrying");
-          Thread.sleep(random.nextInt(deadlockRetryBackoffMs));
-        }
-
-        // Do a separate txn after we have reserved the number.
-        boolean ok = false;
-        ms.openTransaction();
-        try {
-          Table tbl = ms.getTable(dbName, tblName);
-          ms.createTableWrite(tbl, writeId, MM_WRITE_OPEN, System.currentTimeMillis());
-          ok = true;
-        } finally {
-          commitOrRollback(ms, ok);
-        }
-      } catch (Exception e) {
-        exception = e;
-        throwMetaException(e);
-      } finally {
-        endFunction("get_next_write_id", exception == null, exception, tblName);
-      }
-      return new GetNextWriteIdResult(writeId);
-    }
-
-    @Override
-    public FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req) throws TException {
-      RawStore ms = getMS();
-      String dbName =  HiveStringUtils.normalizeIdentifier(req.getDbName()),
-          tblName = HiveStringUtils.normalizeIdentifier(req.getTblName());
-      long writeId = req.getWriteId();
-      boolean commit = req.isCommit();
-      startFunction("finalize_write_id", " : db=" + dbName + " tbl=" + tblName
-          + " writeId=" + writeId + " commit=" + commit);
-      Exception ex = null;
-      try {
-        boolean ok = false;
-        ms.openTransaction();
-        try {
-          MTableWrite tw = getActiveTableWrite(ms, dbName, tblName, writeId);
-          if (tw == null) {
-            throw new MetaException("Write ID " + writeId + " for " + dbName + "." + tblName
-                + " does not exist or is not active");
-          }
-          tw.setState(String.valueOf(commit ? MM_WRITE_COMMITTED : MM_WRITE_ABORTED));
-          ms.updateTableWrite(tw);
-          ok = true;
-        } finally {
-          commitOrRollback(ms, ok);
-        }
-      } catch (Exception e) {
-        ex = e;
-        throwMetaException(e);
-      } finally {
-        endFunction("finalize_write_id", ex == null, ex, tblName);
-      }
-      return new FinalizeWriteIdResult();
-    }
-
-    private void commitOrRollback(RawStore ms, boolean ok) throws MetaException {
-      if (ok) {
-        if (!ms.commitTransaction()) throw new MetaException("Failed to commit");
-      } else {
-        ms.rollbackTransaction();
-      }
-    }
-
-    @Override
-    public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req)
-        throws TException {
-      RawStore ms = getMS();
-      String dbName = HiveStringUtils.normalizeIdentifier(req.getDbName()),
-          tblName = HiveStringUtils.normalizeIdentifier(req.getTblName());
-      long writeId = req.getWriteId();
-      startFunction("heartbeat_write_id", " : db="
-          + dbName + " tbl=" + tblName + " writeId=" + writeId);
-      Exception ex = null;
-      boolean wasAborted = false;
-      try {
-        boolean ok = false;
-        ms.openTransaction();
-        try {
-          MTableWrite tw = getActiveTableWrite(ms, dbName, tblName, writeId);
-          long absTimeout = HiveConf.getTimeVar(getConf(),
-              ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT, TimeUnit.MILLISECONDS);
-          if (tw.getCreated() + absTimeout < System.currentTimeMillis()) {
-            tw.setState(String.valueOf(MM_WRITE_ABORTED));
-            wasAborted = true;
-          }
-          tw.setLastHeartbeat(System.currentTimeMillis());
-          ms.updateTableWrite(tw);
-          ok = true;
-        } finally {
-          commitOrRollback(ms, ok);
-        }
-      } catch (Exception e) {
-        ex = e;
-        throwMetaException(e);
-      } finally {
-        endFunction("heartbeat_write_id", ex == null, ex, tblName);
-      }
-      if (wasAborted) throw new MetaException("The write was aborted due to absolute timeout");
-      return new HeartbeatWriteIdResult();
-    }
-
-    private MTableWrite getActiveTableWrite(RawStore ms, String dbName,
-        String tblName, long writeId) throws MetaException {
-      MTableWrite tw = ms.getTableWrite(dbName, tblName, writeId);
-      if (tw == null) {
-        return null;
-      }
-      assert tw.getState().length() == 1;
-      char state = tw.getState().charAt(0);
-      if (state != MM_WRITE_OPEN) {
-        throw new MetaException("Invalid write state: " + state);
-      }
-      return tw;
-    }
-
-    @Override
-    public GetValidWriteIdsResult get_valid_write_ids(
-        GetValidWriteIdsRequest req) throws TException {
-      RawStore ms = getMS();
-      String dbName = req.getDbName(), tblName = req.getTblName();
-      startFunction("get_valid_write_ids", " : db=" + dbName + " tbl=" + tblName);
-      GetValidWriteIdsResult result = new GetValidWriteIdsResult();
-      Exception ex = null;
-      try {
-        boolean ok = false;
-        ms.openTransaction();
-        try {
-          Table tbl = ms.getTable(dbName, tblName);
-          if (tbl == null) {
-            throw new InvalidObjectException(dbName + "." + tblName);
-          }
-          long nextId = tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : 0;
-          long watermarkId = tbl.isSetMmWatermarkWriteId() ? tbl.getMmWatermarkWriteId() : -1;
-          if (nextId > (watermarkId + 1)) {
-            // There may be some intermediate failed or active writes; get the valid ones.
-            List<Long> ids = ms.getTableWriteIds(
-                dbName, tblName, watermarkId, nextId, MM_WRITE_COMMITTED);
-            // TODO: we could optimize here and send the smaller of the lists, and also use ranges
-            if (!ids.isEmpty()) {
-              Iterator<Long> iter = ids.iterator();
-              long oldWatermarkId = watermarkId;
-              while (iter.hasNext()) {
-                Long nextWriteId = iter.next();
-                if (nextWriteId != watermarkId + 1) break;
-                ++watermarkId;
-              }
-              long removed = watermarkId - oldWatermarkId;
-              if (removed > 0) {
-                ids = ids.subList((int)removed, ids.size());
-              }
-              if (!ids.isEmpty()) {
-                result.setIds(ids);
-                result.setAreIdsValid(true);
-              }
-            }
-          }
-          result.setHighWatermarkId(nextId);
-          result.setLowWatermarkId(watermarkId);
-          ok = true;
-        } finally {
-          commitOrRollback(ms, ok);
-        }
-      } catch (Exception e) {
-        ex = e;
-        throwMetaException(e);
-      } finally {
-        endFunction("get_valid_write_ids", ex == null, ex, tblName);
-      }
-      return result;
-    }
   }
 
 
@@ -7598,7 +7383,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           startCompactorInitiator(conf);
           startCompactorWorkers(conf);
           startCompactorCleaner(conf);
-          startMmHousekeepingThread(conf);
           startHouseKeeperService(conf);
         } catch (Throwable e) {
           LOG.error("Failure when starting the compactor, compactions may not happen, " +
@@ -7640,16 +7424,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
   }
 
-  private static void startMmHousekeepingThread(HiveConf conf) throws Exception {
-    long intervalMs = HiveConf.getTimeVar(conf,
-        ConfVars.HIVE_METASTORE_MM_THREAD_SCAN_INTERVAL, TimeUnit.MILLISECONDS);
-    if (intervalMs > 0) {
-      MetaStoreThread thread = new MmCleanerThread(intervalMs);
-      initializeAndStartThread(thread, conf);
-    }
-  }
-
-
   private static MetaStoreThread instantiateThread(String classname) throws Exception {
     Class<?> c = Class.forName(classname);
     Object o = c.newInstance();

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 4912a31..0d8a76a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -2539,27 +2539,4 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
     CacheFileMetadataResult result = client.cache_file_metadata(req);
     return result.isIsSupported();
   }
-
-  @Override
-  public long getNextTableWriteId(String dbName, String tableName) throws TException {
-    return client.get_next_write_id(new GetNextWriteIdRequest(dbName, tableName)).getWriteId();
-  }
-
-  @Override
-  public void finalizeTableWrite(
-      String dbName, String tableName, long writeId, boolean commit) throws TException {
-    client.finalize_write_id(new FinalizeWriteIdRequest(dbName, tableName, writeId, commit));
-  }
-
-  @Override
-  public void heartbeatTableWrite(
-      String dbName, String tableName, long writeId) throws TException {
-    client.heartbeat_write_id(new HeartbeatWriteIdRequest(dbName, tableName, writeId));
-  }
-
-  @Override
-  public GetValidWriteIdsResult getValidWriteIds(
-      String dbName, String tableName) throws TException {
-    return client.get_valid_write_ids(new GetValidWriteIdsRequest(dbName, tableName));
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 82db281..023a289 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
 import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
-import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResult;
 import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
@@ -1665,13 +1664,4 @@ public interface IMetaStoreClient {
 
   void addForeignKey(List<SQLForeignKey> foreignKeyCols) throws
   MetaException, NoSuchObjectException, TException;
-
-  long getNextTableWriteId(String dbName, String tableName) throws TException;
-
-  void heartbeatTableWrite(String dbName, String tableName, long writeId) throws TException;
-
-  void finalizeTableWrite(String dbName, String tableName, long writeId,
-      boolean commit) throws TException;
-
-  GetValidWriteIdsResult getValidWriteIds(String dbName, String tableName) throws TException;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java
deleted file mode 100644
index d99b0d7..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java
+++ /dev/null
@@ -1,397 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.ValidWriteIds;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.RawStore.FullTableName;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Supplier;
-
-public class MmCleanerThread extends Thread implements MetaStoreThread {
-  private final static Logger LOG = LoggerFactory.getLogger(MmCleanerThread.class);
-  private HiveConf conf;
-  private int threadId;
-  private AtomicBoolean stop;
-  private long intervalMs;
-  private long heartbeatTimeoutMs, absTimeoutMs, abortedGraceMs;
-  /** Time override for tests. Only used for MM timestamp logic, not for the thread timing. */
-  private Supplier<Long> timeOverride = null;
-
-  public MmCleanerThread(long intervalMs) {
-    this.intervalMs = intervalMs;
-  }
-
-  @VisibleForTesting
-  void overrideTime(Supplier<Long> timeOverride) {
-    this.timeOverride = timeOverride;
-  }
-
-  private long getTimeMs() {
-    return timeOverride == null ? System.currentTimeMillis() : timeOverride.get();
-  }
-
-  @Override
-  public void setHiveConf(HiveConf conf) {
-    this.conf = conf;
-    heartbeatTimeoutMs = HiveConf.getTimeVar(
-        conf, ConfVars.HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT, TimeUnit.MILLISECONDS);
-    absTimeoutMs = HiveConf.getTimeVar(
-        conf, ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT, TimeUnit.MILLISECONDS);
-    abortedGraceMs = HiveConf.getTimeVar(
-        conf, ConfVars.HIVE_METASTORE_MM_ABORTED_GRACE_PERIOD, TimeUnit.MILLISECONDS);
-    if (heartbeatTimeoutMs > absTimeoutMs) {
-      throw new RuntimeException("Heartbeat timeout " + heartbeatTimeoutMs
-          + " cannot be larger than the absolute timeout " + absTimeoutMs);
-    }
-  }
-
-  @Override
-  public void setThreadId(int threadId) {
-    this.threadId = threadId;
-  }
-
-  @Override
-  public void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException {
-    this.stop = stop;
-    setPriority(MIN_PRIORITY);
-    setDaemon(true);
-  }
-
-  @Override
-  public void run() {
-    // Only get RS here, when we are already on the thread.
-    RawStore rs = getRs();
-    while (true) {
-      if (checkStop()) return;
-      long endTimeNs = System.nanoTime() + intervalMs * 1000000L;
-
-      runOneIteration(rs);
-
-      if (checkStop()) return;
-      long waitTimeMs = (endTimeNs - System.nanoTime()) / 1000000L;
-      if (waitTimeMs <= 0) continue;
-      try {
-        Thread.sleep(waitTimeMs);
-      } catch (InterruptedException e) {
-        LOG.error("Thread was interrupted and will now exit");
-        return;
-      }
-    }
-  }
-
-  private RawStore getRs() {
-    try {
-      return RawStoreProxy.getProxy(conf, conf,
-          conf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL), threadId);
-    } catch (MetaException e) {
-      LOG.error("Failed to get RawStore; the thread will now die", e);
-      throw new RuntimeException(e);
-    }
-  }
-
-  private boolean checkStop() {
-    if (!stop.get()) return false;
-    LOG.info("Stopping due to an external request");
-    return true;
-  }
-
-  @VisibleForTesting
-  void runOneIteration(RawStore rs) {
-    // We only get the names here; we want to get and process each table in a separate DB txn.
-    List<FullTableName> mmTables = null;
-    try {
-      mmTables = rs.getAllMmTablesForCleanup();
-    } catch (MetaException e) {
-      LOG.error("Failed to get tables", e);
-      return;
-    }
-    for (FullTableName tableName : mmTables) {
-      try {
-        processOneTable(tableName, rs);
-      } catch (MetaException e) {
-        LOG.error("Failed to process " + tableName, e);
-      }
-    }
-  }
-
-  private void processOneTable(FullTableName table, RawStore rs) throws MetaException {
-    // 1. Time out writes that have been running for a while.
-    //    a) Heartbeat timeouts (not enabled right now as heartbeat is not implemented).
-    //    b) Absolute timeouts.
-    //    c) Gaps that have the next ID and the derived absolute timeout. This is a small special
-    //       case that can happen if we increment next ID but fail to insert the write ID record,
-    //       which we do in separate txns to avoid making the conflict-prone increment txn longer.
-    LOG.info("Processing table " + table);
-    Table t = rs.getTable(table.dbName, table.tblName);
-    HashSet<Long> removeWriteIds = new HashSet<>(), cleanupOnlyWriteIds = new HashSet<>();
-    getWritesThatReadyForCleanUp(t, table, rs, removeWriteIds, cleanupOnlyWriteIds);
-
-    // 2. Delete the aborted writes' files from the FS.
-    deleteAbortedWriteIdFiles(table, rs, t, removeWriteIds);
-    deleteAbortedWriteIdFiles(table, rs, t, cleanupOnlyWriteIds);
-    // removeWriteIds-s now only contains the writes that were fully cleaned up after.
-
-    // 3. Advance the watermark.
-    advanceWatermark(table, rs, removeWriteIds);
-  }
-
-  private void getWritesThatReadyForCleanUp(Table t, FullTableName table, RawStore rs,
-      HashSet<Long> removeWriteIds, HashSet<Long> cleanupOnlyWriteIds) throws MetaException {
-    // We will generally ignore errors here. First, we expect some conflicts; second, we will get
-    // the final view of things after we do (or try, at any rate) all the updates.
-    long watermarkId = t.isSetMmWatermarkWriteId() ? t.getMmWatermarkWriteId() : -1,
-        nextWriteId = t.isSetMmNextWriteId() ? t.getMmNextWriteId() : 0;
-    long now = getTimeMs(), earliestOkHeartbeatMs = now - heartbeatTimeoutMs,
-        earliestOkCreateMs = now - absTimeoutMs, latestAbortedMs = now - abortedGraceMs;
-
-    List<MTableWrite> writes = rs.getTableWrites(
-        table.dbName, table.tblName, watermarkId, nextWriteId);
-    ListIterator<MTableWrite> iter = writes.listIterator(writes.size());
-    long expectedId = -1, nextCreated = -1;
-    // We will go in reverse order and add aborted writes for the gaps that have a following
-    // write ID that would imply that the previous one (created earlier) would have already
-    // expired, had it been open and not updated.
-    while (iter.hasPrevious()) {
-      MTableWrite write = iter.previous();
-      addTimedOutMissingWriteIds(rs, table.dbName, table.tblName, write.getWriteId(),
-          nextCreated, expectedId, earliestOkHeartbeatMs, cleanupOnlyWriteIds, now);
-      expectedId = write.getWriteId() - 1;
-      nextCreated = write.getCreated();
-      char state = write.getState().charAt(0);
-      if (state == HiveMetaStore.MM_WRITE_ABORTED) {
-        if (write.getLastHeartbeat() < latestAbortedMs) {
-          removeWriteIds.add(write.getWriteId());
-        } else {
-          cleanupOnlyWriteIds.add(write.getWriteId());
-        }
-      } else if (state == HiveMetaStore.MM_WRITE_OPEN && write.getCreated() < earliestOkCreateMs) {
-        // TODO: also check for heartbeat here.
-        if (expireTimedOutWriteId(rs, table.dbName, table.tblName, write.getWriteId(),
-            now, earliestOkCreateMs, earliestOkHeartbeatMs, cleanupOnlyWriteIds)) {
-          cleanupOnlyWriteIds.add(write.getWriteId());
-        }
-      }
-    }
-    addTimedOutMissingWriteIds(rs, table.dbName, table.tblName, watermarkId,
-        nextCreated, expectedId, earliestOkHeartbeatMs, cleanupOnlyWriteIds, now);
-  }
-
-  private void advanceWatermark(
-      FullTableName table, RawStore rs, HashSet<Long> cleanedUpWriteIds) {
-    if (!rs.openTransaction()) {
-      LOG.error("Cannot open transaction");
-      return;
-    }
-    boolean success = false;
-    try {
-      Table t = rs.getTable(table.dbName, table.tblName);
-      if (t == null) {
-        return;
-      }
-      long watermarkId = t.getMmWatermarkWriteId();
-      List<Long> writeIds = rs.getTableWriteIds(table.dbName, table.tblName, watermarkId,
-          t.getMmNextWriteId(), HiveMetaStore.MM_WRITE_COMMITTED);
-      long expectedId = watermarkId + 1;
-      boolean hasGap = false;
-      Iterator<Long> idIter = writeIds.iterator();
-      while (idIter.hasNext()) {
-        long next = idIter.next();
-        if (next < expectedId) continue;
-        while (next > expectedId) {
-          if (!cleanedUpWriteIds.contains(expectedId)) {
-            hasGap = true;
-            break;
-          }
-          ++expectedId;
-        }
-        if (hasGap) break;
-        ++expectedId;
-      }
-      // Make sure we also advance over the trailing aborted ones.
-      if (!hasGap) {
-        while (cleanedUpWriteIds.contains(expectedId)) {
-          ++expectedId;
-        }
-      }
-      long newWatermarkId = expectedId - 1;
-      if (newWatermarkId > watermarkId) {
-        t.setMmWatermarkWriteId(newWatermarkId);
-        rs.alterTable(table.dbName, table.tblName, t);
-        rs.deleteTableWrites(table.dbName, table.tblName, -1, expectedId);
-      }
-      success = true;
-    } catch (Exception ex) {
-      // TODO: should we try a couple times on conflicts? Aborted writes cannot be unaborted.
-      LOG.error("Failed to advance watermark", ex);
-      rs.rollbackTransaction();
-    }
-    if (success) {
-      tryCommit(rs);
-    }
-  }
-
-  private void deleteAbortedWriteIdFiles(
-      FullTableName table, RawStore rs, Table t, HashSet<Long> cleanUpWriteIds) {
-    if (cleanUpWriteIds.isEmpty()) return;
-    if (t.getPartitionKeysSize() > 0) {
-      for (String location : rs.getAllPartitionLocations(table.dbName, table.tblName)) {
-        deleteAbortedWriteIdFiles(location, cleanUpWriteIds);
-      }
-    } else {
-      deleteAbortedWriteIdFiles(t.getSd().getLocation(), cleanUpWriteIds);
-    }
-  }
-
-  private void deleteAbortedWriteIdFiles(String location, HashSet<Long> abortedWriteIds) {
-    LOG.info("Looking for " + abortedWriteIds.size() + " aborted write output in " + location);
-    Path path = new Path(location);
-    FileSystem fs;
-    FileStatus[] files;
-    try {
-      fs = path.getFileSystem(conf);
-      if (!fs.exists(path)) {
-        LOG.warn(path + " does not exist; assuming that the cleanup is not needed.");
-        return;
-      }
-      // TODO# this doesn't account for list bucketing. Do nothing now, ACID will solve all problems.
-      files = fs.listStatus(path);
-    } catch (Exception ex) {
-      LOG.error("Failed to get files for " + path + "; cannot ensure cleanup for any writes");
-      abortedWriteIds.clear();
-      return;
-    }
-    for (FileStatus file : files) {
-      Path childPath = file.getPath();
-      if (!file.isDirectory()) {
-        LOG.warn("Skipping a non-directory file " + childPath);
-        continue;
-      }
-      Long writeId = ValidWriteIds.extractWriteId(childPath);
-      if (writeId == null) {
-        LOG.warn("Skipping an unknown directory " + childPath);
-        continue;
-      }
-      if (!abortedWriteIds.contains(writeId.longValue())) continue;
-      try {
-        if (!fs.delete(childPath, true)) throw new IOException("delete returned false");
-      } catch (Exception ex) {
-        LOG.error("Couldn't delete " + childPath + "; not cleaning up " + writeId, ex);
-        abortedWriteIds.remove(writeId.longValue());
-      }
-    }
-  }
-
-  private boolean expireTimedOutWriteId(RawStore rs, String dbName,
-      String tblName, long writeId, long now, long earliestOkCreatedMs,
-      long earliestOkHeartbeatMs, HashSet<Long> cleanupOnlyWriteIds) {
-    if (!rs.openTransaction()) {
-      return false;
-    }
-    try {
-      MTableWrite tw = rs.getTableWrite(dbName, tblName, writeId);
-      if (tw == null) {
-        // The write have been updated since the time when we thought it has expired.
-        tryCommit(rs);
-        return true;
-      }
-      char state = tw.getState().charAt(0);
-      if (state != HiveMetaStore.MM_WRITE_OPEN
-          || (tw.getCreated() > earliestOkCreatedMs
-              && tw.getLastHeartbeat() > earliestOkHeartbeatMs)) {
-        tryCommit(rs);
-        return true; // The write has been updated since the time when we thought it has expired.
-      }
-      tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_ABORTED));
-      tw.setLastHeartbeat(now);
-      rs.updateTableWrite(tw);
-    } catch (Exception ex) {
-      LOG.error("Failed to update an expired table write", ex);
-      rs.rollbackTransaction();
-      return false;
-    }
-    boolean result = tryCommit(rs);
-    if (result) {
-      cleanupOnlyWriteIds.add(writeId);
-    }
-    return result;
-  }
-
-  private boolean tryCommit(RawStore rs) {
-    try {
-      return rs.commitTransaction();
-    } catch (Exception ex) {
-      LOG.error("Failed to commit transaction", ex);
-      return false;
-    }
-  }
-
-  private boolean addTimedOutMissingWriteIds(RawStore rs, String dbName, String tblName,
-      long foundPrevId, long nextCreated, long expectedId, long earliestOkHeartbeatMs,
-      HashSet<Long> cleanupOnlyWriteIds, long now) throws MetaException {
-    // Assume all missing ones are created at the same time as the next present write ID.
-    // We also assume missing writes never had any heartbeats.
-    if (nextCreated >= earliestOkHeartbeatMs || expectedId < 0) return true;
-    Table t = null;
-    List<Long> localCleanupOnlyWriteIds = new ArrayList<>();
-    while (foundPrevId < expectedId) {
-      if (t == null && !rs.openTransaction()) {
-        LOG.error("Cannot open transaction; skipping");
-        return false;
-      }
-      try {
-        if (t == null) {
-          t = rs.getTable(dbName, tblName);
-        }
-        // We don't need to double check if the write exists; the unique index will cause an error.
-        rs.createTableWrite(t, expectedId, HiveMetaStore.MM_WRITE_ABORTED, now);
-      } catch (Exception ex) {
-        // TODO: don't log conflict exceptions?.. although we barely ever expect them.
-        LOG.error("Failed to create a missing table write", ex);
-        rs.rollbackTransaction();
-        return false;
-      }
-      localCleanupOnlyWriteIds.add(expectedId);
-      --expectedId;
-    }
-    boolean result = (t == null || tryCommit(rs));
-    if (result) {
-      cleanupOnlyWriteIds.addAll(localCleanupOnlyWriteIds);
-    }
-    return result;
-  }
-}


[11/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index a750a1c..4fb7183 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -1195,26 +1195,6 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
    * @return \metastore\CacheFileMetadataResult
    */
   public function cache_file_metadata(\metastore\CacheFileMetadataRequest $req);
-  /**
-   * @param \metastore\GetNextWriteIdRequest $req
-   * @return \metastore\GetNextWriteIdResult
-   */
-  public function get_next_write_id(\metastore\GetNextWriteIdRequest $req);
-  /**
-   * @param \metastore\FinalizeWriteIdRequest $req
-   * @return \metastore\FinalizeWriteIdResult
-   */
-  public function finalize_write_id(\metastore\FinalizeWriteIdRequest $req);
-  /**
-   * @param \metastore\HeartbeatWriteIdRequest $req
-   * @return \metastore\HeartbeatWriteIdResult
-   */
-  public function heartbeat_write_id(\metastore\HeartbeatWriteIdRequest $req);
-  /**
-   * @param \metastore\GetValidWriteIdsRequest $req
-   * @return \metastore\GetValidWriteIdsResult
-   */
-  public function get_valid_write_ids(\metastore\GetValidWriteIdsRequest $req);
 }
 
 class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf {
@@ -9961,210 +9941,6 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
     throw new \Exception("cache_file_metadata failed: unknown result");
   }
 
-  public function get_next_write_id(\metastore\GetNextWriteIdRequest $req)
-  {
-    $this->send_get_next_write_id($req);
-    return $this->recv_get_next_write_id();
-  }
-
-  public function send_get_next_write_id(\metastore\GetNextWriteIdRequest $req)
-  {
-    $args = new \metastore\ThriftHiveMetastore_get_next_write_id_args();
-    $args->req = $req;
-    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
-    if ($bin_accel)
-    {
-      thrift_protocol_write_binary($this->output_, 'get_next_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
-    }
-    else
-    {
-      $this->output_->writeMessageBegin('get_next_write_id', TMessageType::CALL, $this->seqid_);
-      $args->write($this->output_);
-      $this->output_->writeMessageEnd();
-      $this->output_->getTransport()->flush();
-    }
-  }
-
-  public function recv_get_next_write_id()
-  {
-    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
-    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_next_write_id_result', $this->input_->isStrictRead());
-    else
-    {
-      $rseqid = 0;
-      $fname = null;
-      $mtype = 0;
-
-      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
-      if ($mtype == TMessageType::EXCEPTION) {
-        $x = new TApplicationException();
-        $x->read($this->input_);
-        $this->input_->readMessageEnd();
-        throw $x;
-      }
-      $result = new \metastore\ThriftHiveMetastore_get_next_write_id_result();
-      $result->read($this->input_);
-      $this->input_->readMessageEnd();
-    }
-    if ($result->success !== null) {
-      return $result->success;
-    }
-    throw new \Exception("get_next_write_id failed: unknown result");
-  }
-
-  public function finalize_write_id(\metastore\FinalizeWriteIdRequest $req)
-  {
-    $this->send_finalize_write_id($req);
-    return $this->recv_finalize_write_id();
-  }
-
-  public function send_finalize_write_id(\metastore\FinalizeWriteIdRequest $req)
-  {
-    $args = new \metastore\ThriftHiveMetastore_finalize_write_id_args();
-    $args->req = $req;
-    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
-    if ($bin_accel)
-    {
-      thrift_protocol_write_binary($this->output_, 'finalize_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
-    }
-    else
-    {
-      $this->output_->writeMessageBegin('finalize_write_id', TMessageType::CALL, $this->seqid_);
-      $args->write($this->output_);
-      $this->output_->writeMessageEnd();
-      $this->output_->getTransport()->flush();
-    }
-  }
-
-  public function recv_finalize_write_id()
-  {
-    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
-    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_finalize_write_id_result', $this->input_->isStrictRead());
-    else
-    {
-      $rseqid = 0;
-      $fname = null;
-      $mtype = 0;
-
-      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
-      if ($mtype == TMessageType::EXCEPTION) {
-        $x = new TApplicationException();
-        $x->read($this->input_);
-        $this->input_->readMessageEnd();
-        throw $x;
-      }
-      $result = new \metastore\ThriftHiveMetastore_finalize_write_id_result();
-      $result->read($this->input_);
-      $this->input_->readMessageEnd();
-    }
-    if ($result->success !== null) {
-      return $result->success;
-    }
-    throw new \Exception("finalize_write_id failed: unknown result");
-  }
-
-  public function heartbeat_write_id(\metastore\HeartbeatWriteIdRequest $req)
-  {
-    $this->send_heartbeat_write_id($req);
-    return $this->recv_heartbeat_write_id();
-  }
-
-  public function send_heartbeat_write_id(\metastore\HeartbeatWriteIdRequest $req)
-  {
-    $args = new \metastore\ThriftHiveMetastore_heartbeat_write_id_args();
-    $args->req = $req;
-    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
-    if ($bin_accel)
-    {
-      thrift_protocol_write_binary($this->output_, 'heartbeat_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
-    }
-    else
-    {
-      $this->output_->writeMessageBegin('heartbeat_write_id', TMessageType::CALL, $this->seqid_);
-      $args->write($this->output_);
-      $this->output_->writeMessageEnd();
-      $this->output_->getTransport()->flush();
-    }
-  }
-
-  public function recv_heartbeat_write_id()
-  {
-    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
-    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_heartbeat_write_id_result', $this->input_->isStrictRead());
-    else
-    {
-      $rseqid = 0;
-      $fname = null;
-      $mtype = 0;
-
-      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
-      if ($mtype == TMessageType::EXCEPTION) {
-        $x = new TApplicationException();
-        $x->read($this->input_);
-        $this->input_->readMessageEnd();
-        throw $x;
-      }
-      $result = new \metastore\ThriftHiveMetastore_heartbeat_write_id_result();
-      $result->read($this->input_);
-      $this->input_->readMessageEnd();
-    }
-    if ($result->success !== null) {
-      return $result->success;
-    }
-    throw new \Exception("heartbeat_write_id failed: unknown result");
-  }
-
-  public function get_valid_write_ids(\metastore\GetValidWriteIdsRequest $req)
-  {
-    $this->send_get_valid_write_ids($req);
-    return $this->recv_get_valid_write_ids();
-  }
-
-  public function send_get_valid_write_ids(\metastore\GetValidWriteIdsRequest $req)
-  {
-    $args = new \metastore\ThriftHiveMetastore_get_valid_write_ids_args();
-    $args->req = $req;
-    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
-    if ($bin_accel)
-    {
-      thrift_protocol_write_binary($this->output_, 'get_valid_write_ids', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
-    }
-    else
-    {
-      $this->output_->writeMessageBegin('get_valid_write_ids', TMessageType::CALL, $this->seqid_);
-      $args->write($this->output_);
-      $this->output_->writeMessageEnd();
-      $this->output_->getTransport()->flush();
-    }
-  }
-
-  public function recv_get_valid_write_ids()
-  {
-    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
-    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_valid_write_ids_result', $this->input_->isStrictRead());
-    else
-    {
-      $rseqid = 0;
-      $fname = null;
-      $mtype = 0;
-
-      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
-      if ($mtype == TMessageType::EXCEPTION) {
-        $x = new TApplicationException();
-        $x->read($this->input_);
-        $this->input_->readMessageEnd();
-        throw $x;
-      }
-      $result = new \metastore\ThriftHiveMetastore_get_valid_write_ids_result();
-      $result->read($this->input_);
-      $this->input_->readMessageEnd();
-    }
-    if ($result->success !== null) {
-      return $result->success;
-    }
-    throw new \Exception("get_valid_write_ids failed: unknown result");
-  }
-
 }
 
 // HELPER FUNCTIONS AND STRUCTURES
@@ -11316,14 +11092,14 @@ class ThriftHiveMetastore_get_databases_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size603 = 0;
-            $_etype606 = 0;
-            $xfer += $input->readListBegin($_etype606, $_size603);
-            for ($_i607 = 0; $_i607 < $_size603; ++$_i607)
+            $_size596 = 0;
+            $_etype599 = 0;
+            $xfer += $input->readListBegin($_etype599, $_size596);
+            for ($_i600 = 0; $_i600 < $_size596; ++$_i600)
             {
-              $elem608 = null;
-              $xfer += $input->readString($elem608);
-              $this->success []= $elem608;
+              $elem601 = null;
+              $xfer += $input->readString($elem601);
+              $this->success []= $elem601;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -11359,9 +11135,9 @@ class ThriftHiveMetastore_get_databases_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter609)
+          foreach ($this->success as $iter602)
           {
-            $xfer += $output->writeString($iter609);
+            $xfer += $output->writeString($iter602);
           }
         }
         $output->writeListEnd();
@@ -11492,14 +11268,14 @@ class ThriftHiveMetastore_get_all_databases_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size610 = 0;
-            $_etype613 = 0;
-            $xfer += $input->readListBegin($_etype613, $_size610);
-            for ($_i614 = 0; $_i614 < $_size610; ++$_i614)
+            $_size603 = 0;
+            $_etype606 = 0;
+            $xfer += $input->readListBegin($_etype606, $_size603);
+            for ($_i607 = 0; $_i607 < $_size603; ++$_i607)
             {
-              $elem615 = null;
-              $xfer += $input->readString($elem615);
-              $this->success []= $elem615;
+              $elem608 = null;
+              $xfer += $input->readString($elem608);
+              $this->success []= $elem608;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -11535,9 +11311,9 @@ class ThriftHiveMetastore_get_all_databases_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter616)
+          foreach ($this->success as $iter609)
           {
-            $xfer += $output->writeString($iter616);
+            $xfer += $output->writeString($iter609);
           }
         }
         $output->writeListEnd();
@@ -12538,18 +12314,18 @@ class ThriftHiveMetastore_get_type_all_result {
         case 0:
           if ($ftype == TType::MAP) {
             $this->success = array();
-            $_size617 = 0;
-            $_ktype618 = 0;
-            $_vtype619 = 0;
-            $xfer += $input->readMapBegin($_ktype618, $_vtype619, $_size617);
-            for ($_i621 = 0; $_i621 < $_size617; ++$_i621)
+            $_size610 = 0;
+            $_ktype611 = 0;
+            $_vtype612 = 0;
+            $xfer += $input->readMapBegin($_ktype611, $_vtype612, $_size610);
+            for ($_i614 = 0; $_i614 < $_size610; ++$_i614)
             {
-              $key622 = '';
-              $val623 = new \metastore\Type();
-              $xfer += $input->readString($key622);
-              $val623 = new \metastore\Type();
-              $xfer += $val623->read($input);
-              $this->success[$key622] = $val623;
+              $key615 = '';
+              $val616 = new \metastore\Type();
+              $xfer += $input->readString($key615);
+              $val616 = new \metastore\Type();
+              $xfer += $val616->read($input);
+              $this->success[$key615] = $val616;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -12585,10 +12361,10 @@ class ThriftHiveMetastore_get_type_all_result {
       {
         $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $kiter624 => $viter625)
+          foreach ($this->success as $kiter617 => $viter618)
           {
-            $xfer += $output->writeString($kiter624);
-            $xfer += $viter625->write($output);
+            $xfer += $output->writeString($kiter617);
+            $xfer += $viter618->write($output);
           }
         }
         $output->writeMapEnd();
@@ -12792,15 +12568,15 @@ class ThriftHiveMetastore_get_fields_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size626 = 0;
-            $_etype629 = 0;
-            $xfer += $input->readListBegin($_etype629, $_size626);
-            for ($_i630 = 0; $_i630 < $_size626; ++$_i630)
+            $_size619 = 0;
+            $_etype622 = 0;
+            $xfer += $input->readListBegin($_etype622, $_size619);
+            for ($_i623 = 0; $_i623 < $_size619; ++$_i623)
             {
-              $elem631 = null;
-              $elem631 = new \metastore\FieldSchema();
-              $xfer += $elem631->read($input);
-              $this->success []= $elem631;
+              $elem624 = null;
+              $elem624 = new \metastore\FieldSchema();
+              $xfer += $elem624->read($input);
+              $this->success []= $elem624;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -12852,9 +12628,9 @@ class ThriftHiveMetastore_get_fields_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter632)
+          foreach ($this->success as $iter625)
           {
-            $xfer += $iter632->write($output);
+            $xfer += $iter625->write($output);
           }
         }
         $output->writeListEnd();
@@ -13096,15 +12872,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size633 = 0;
-            $_etype636 = 0;
-            $xfer += $input->readListBegin($_etype636, $_size633);
-            for ($_i637 = 0; $_i637 < $_size633; ++$_i637)
+            $_size626 = 0;
+            $_etype629 = 0;
+            $xfer += $input->readListBegin($_etype629, $_size626);
+            for ($_i630 = 0; $_i630 < $_size626; ++$_i630)
             {
-              $elem638 = null;
-              $elem638 = new \metastore\FieldSchema();
-              $xfer += $elem638->read($input);
-              $this->success []= $elem638;
+              $elem631 = null;
+              $elem631 = new \metastore\FieldSchema();
+              $xfer += $elem631->read($input);
+              $this->success []= $elem631;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13156,9 +12932,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter639)
+          foreach ($this->success as $iter632)
           {
-            $xfer += $iter639->write($output);
+            $xfer += $iter632->write($output);
           }
         }
         $output->writeListEnd();
@@ -13372,15 +13148,15 @@ class ThriftHiveMetastore_get_schema_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size640 = 0;
-            $_etype643 = 0;
-            $xfer += $input->readListBegin($_etype643, $_size640);
-            for ($_i644 = 0; $_i644 < $_size640; ++$_i644)
+            $_size633 = 0;
+            $_etype636 = 0;
+            $xfer += $input->readListBegin($_etype636, $_size633);
+            for ($_i637 = 0; $_i637 < $_size633; ++$_i637)
             {
-              $elem645 = null;
-              $elem645 = new \metastore\FieldSchema();
-              $xfer += $elem645->read($input);
-              $this->success []= $elem645;
+              $elem638 = null;
+              $elem638 = new \metastore\FieldSchema();
+              $xfer += $elem638->read($input);
+              $this->success []= $elem638;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13432,9 +13208,9 @@ class ThriftHiveMetastore_get_schema_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter646)
+          foreach ($this->success as $iter639)
           {
-            $xfer += $iter646->write($output);
+            $xfer += $iter639->write($output);
           }
         }
         $output->writeListEnd();
@@ -13676,15 +13452,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size647 = 0;
-            $_etype650 = 0;
-            $xfer += $input->readListBegin($_etype650, $_size647);
-            for ($_i651 = 0; $_i651 < $_size647; ++$_i651)
+            $_size640 = 0;
+            $_etype643 = 0;
+            $xfer += $input->readListBegin($_etype643, $_size640);
+            for ($_i644 = 0; $_i644 < $_size640; ++$_i644)
             {
-              $elem652 = null;
-              $elem652 = new \metastore\FieldSchema();
-              $xfer += $elem652->read($input);
-              $this->success []= $elem652;
+              $elem645 = null;
+              $elem645 = new \metastore\FieldSchema();
+              $xfer += $elem645->read($input);
+              $this->success []= $elem645;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13736,9 +13512,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter653)
+          foreach ($this->success as $iter646)
           {
-            $xfer += $iter653->write($output);
+            $xfer += $iter646->write($output);
           }
         }
         $output->writeListEnd();
@@ -14346,15 +14122,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->primaryKeys = array();
-            $_size654 = 0;
-            $_etype657 = 0;
-            $xfer += $input->readListBegin($_etype657, $_size654);
-            for ($_i658 = 0; $_i658 < $_size654; ++$_i658)
+            $_size647 = 0;
+            $_etype650 = 0;
+            $xfer += $input->readListBegin($_etype650, $_size647);
+            for ($_i651 = 0; $_i651 < $_size647; ++$_i651)
             {
-              $elem659 = null;
-              $elem659 = new \metastore\SQLPrimaryKey();
-              $xfer += $elem659->read($input);
-              $this->primaryKeys []= $elem659;
+              $elem652 = null;
+              $elem652 = new \metastore\SQLPrimaryKey();
+              $xfer += $elem652->read($input);
+              $this->primaryKeys []= $elem652;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -14364,15 +14140,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->foreignKeys = array();
-            $_size660 = 0;
-            $_etype663 = 0;
-            $xfer += $input->readListBegin($_etype663, $_size660);
-            for ($_i664 = 0; $_i664 < $_size660; ++$_i664)
+            $_size653 = 0;
+            $_etype656 = 0;
+            $xfer += $input->readListBegin($_etype656, $_size653);
+            for ($_i657 = 0; $_i657 < $_size653; ++$_i657)
             {
-              $elem665 = null;
-              $elem665 = new \metastore\SQLForeignKey();
-              $xfer += $elem665->read($input);
-              $this->foreignKeys []= $elem665;
+              $elem658 = null;
+              $elem658 = new \metastore\SQLForeignKey();
+              $xfer += $elem658->read($input);
+              $this->foreignKeys []= $elem658;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -14408,9 +14184,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->primaryKeys));
         {
-          foreach ($this->primaryKeys as $iter666)
+          foreach ($this->primaryKeys as $iter659)
           {
-            $xfer += $iter666->write($output);
+            $xfer += $iter659->write($output);
           }
         }
         $output->writeListEnd();
@@ -14425,9 +14201,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->foreignKeys));
         {
-          foreach ($this->foreignKeys as $iter667)
+          foreach ($this->foreignKeys as $iter660)
           {
-            $xfer += $iter667->write($output);
+            $xfer += $iter660->write($output);
           }
         }
         $output->writeListEnd();
@@ -15699,14 +15475,14 @@ class ThriftHiveMetastore_truncate_table_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->partNames = array();
-            $_size668 = 0;
-            $_etype671 = 0;
-            $xfer += $input->readListBegin($_etype671, $_size668);
-            for ($_i672 = 0; $_i672 < $_size668; ++$_i672)
+            $_size661 = 0;
+            $_etype664 = 0;
+            $xfer += $input->readListBegin($_etype664, $_size661);
+            for ($_i665 = 0; $_i665 < $_size661; ++$_i665)
             {
-              $elem673 = null;
-              $xfer += $input->readString($elem673);
-              $this->partNames []= $elem673;
+              $elem666 = null;
+              $xfer += $input->readString($elem666);
+              $this->partNames []= $elem666;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15744,9 +15520,9 @@ class ThriftHiveMetastore_truncate_table_args {
       {
         $output->writeListBegin(TType::STRING, count($this->partNames));
         {
-          foreach ($this->partNames as $iter674)
+          foreach ($this->partNames as $iter667)
           {
-            $xfer += $output->writeString($iter674);
+            $xfer += $output->writeString($iter667);
           }
         }
         $output->writeListEnd();
@@ -15997,14 +15773,14 @@ class ThriftHiveMetastore_get_tables_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size675 = 0;
-            $_etype678 = 0;
-            $xfer += $input->readListBegin($_etype678, $_size675);
-            for ($_i679 = 0; $_i679 < $_size675; ++$_i679)
+            $_size668 = 0;
+            $_etype671 = 0;
+            $xfer += $input->readListBegin($_etype671, $_size668);
+            for ($_i672 = 0; $_i672 < $_size668; ++$_i672)
             {
-              $elem680 = null;
-              $xfer += $input->readString($elem680);
-              $this->success []= $elem680;
+              $elem673 = null;
+              $xfer += $input->readString($elem673);
+              $this->success []= $elem673;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16040,9 +15816,9 @@ class ThriftHiveMetastore_get_tables_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter681)
+          foreach ($this->success as $iter674)
           {
-            $xfer += $output->writeString($iter681);
+            $xfer += $output->writeString($iter674);
           }
         }
         $output->writeListEnd();
@@ -16244,14 +16020,14 @@ class ThriftHiveMetastore_get_tables_by_type_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size682 = 0;
-            $_etype685 = 0;
-            $xfer += $input->readListBegin($_etype685, $_size682);
-            for ($_i686 = 0; $_i686 < $_size682; ++$_i686)
+            $_size675 = 0;
+            $_etype678 = 0;
+            $xfer += $input->readListBegin($_etype678, $_size675);
+            for ($_i679 = 0; $_i679 < $_size675; ++$_i679)
             {
-              $elem687 = null;
-              $xfer += $input->readString($elem687);
-              $this->success []= $elem687;
+              $elem680 = null;
+              $xfer += $input->readString($elem680);
+              $this->success []= $elem680;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16287,9 +16063,9 @@ class ThriftHiveMetastore_get_tables_by_type_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter688)
+          foreach ($this->success as $iter681)
           {
-            $xfer += $output->writeString($iter688);
+            $xfer += $output->writeString($iter681);
           }
         }
         $output->writeListEnd();
@@ -16394,14 +16170,14 @@ class ThriftHiveMetastore_get_table_meta_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->tbl_types = array();
-            $_size689 = 0;
-            $_etype692 = 0;
-            $xfer += $input->readListBegin($_etype692, $_size689);
-            for ($_i693 = 0; $_i693 < $_size689; ++$_i693)
+            $_size682 = 0;
+            $_etype685 = 0;
+            $xfer += $input->readListBegin($_etype685, $_size682);
+            for ($_i686 = 0; $_i686 < $_size682; ++$_i686)
             {
-              $elem694 = null;
-              $xfer += $input->readString($elem694);
-              $this->tbl_types []= $elem694;
+              $elem687 = null;
+              $xfer += $input->readString($elem687);
+              $this->tbl_types []= $elem687;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16439,9 +16215,9 @@ class ThriftHiveMetastore_get_table_meta_args {
       {
         $output->writeListBegin(TType::STRING, count($this->tbl_types));
         {
-          foreach ($this->tbl_types as $iter695)
+          foreach ($this->tbl_types as $iter688)
           {
-            $xfer += $output->writeString($iter695);
+            $xfer += $output->writeString($iter688);
           }
         }
         $output->writeListEnd();
@@ -16518,15 +16294,15 @@ class ThriftHiveMetastore_get_table_meta_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size696 = 0;
-            $_etype699 = 0;
-            $xfer += $input->readListBegin($_etype699, $_size696);
-            for ($_i700 = 0; $_i700 < $_size696; ++$_i700)
+            $_size689 = 0;
+            $_etype692 = 0;
+            $xfer += $input->readListBegin($_etype692, $_size689);
+            for ($_i693 = 0; $_i693 < $_size689; ++$_i693)
             {
-              $elem701 = null;
-              $elem701 = new \metastore\TableMeta();
-              $xfer += $elem701->read($input);
-              $this->success []= $elem701;
+              $elem694 = null;
+              $elem694 = new \metastore\TableMeta();
+              $xfer += $elem694->read($input);
+              $this->success []= $elem694;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16562,9 +16338,9 @@ class ThriftHiveMetastore_get_table_meta_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter702)
+          foreach ($this->success as $iter695)
           {
-            $xfer += $iter702->write($output);
+            $xfer += $iter695->write($output);
           }
         }
         $output->writeListEnd();
@@ -16720,14 +16496,14 @@ class ThriftHiveMetastore_get_all_tables_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size703 = 0;
-            $_etype706 = 0;
-            $xfer += $input->readListBegin($_etype706, $_size703);
-            for ($_i707 = 0; $_i707 < $_size703; ++$_i707)
+            $_size696 = 0;
+            $_etype699 = 0;
+            $xfer += $input->readListBegin($_etype699, $_size696);
+            for ($_i700 = 0; $_i700 < $_size696; ++$_i700)
             {
-              $elem708 = null;
-              $xfer += $input->readString($elem708);
-              $this->success []= $elem708;
+              $elem701 = null;
+              $xfer += $input->readString($elem701);
+              $this->success []= $elem701;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16763,9 +16539,9 @@ class ThriftHiveMetastore_get_all_tables_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter709)
+          foreach ($this->success as $iter702)
           {
-            $xfer += $output->writeString($iter709);
+            $xfer += $output->writeString($iter702);
           }
         }
         $output->writeListEnd();
@@ -17080,14 +16856,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->tbl_names = array();
-            $_size710 = 0;
-            $_etype713 = 0;
-            $xfer += $input->readListBegin($_etype713, $_size710);
-            for ($_i714 = 0; $_i714 < $_size710; ++$_i714)
+            $_size703 = 0;
+            $_etype706 = 0;
+            $xfer += $input->readListBegin($_etype706, $_size703);
+            for ($_i707 = 0; $_i707 < $_size703; ++$_i707)
             {
-              $elem715 = null;
-              $xfer += $input->readString($elem715);
-              $this->tbl_names []= $elem715;
+              $elem708 = null;
+              $xfer += $input->readString($elem708);
+              $this->tbl_names []= $elem708;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17120,9 +16896,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
       {
         $output->writeListBegin(TType::STRING, count($this->tbl_names));
         {
-          foreach ($this->tbl_names as $iter716)
+          foreach ($this->tbl_names as $iter709)
           {
-            $xfer += $output->writeString($iter716);
+            $xfer += $output->writeString($iter709);
           }
         }
         $output->writeListEnd();
@@ -17187,15 +16963,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size717 = 0;
-            $_etype720 = 0;
-            $xfer += $input->readListBegin($_etype720, $_size717);
-            for ($_i721 = 0; $_i721 < $_size717; ++$_i721)
+            $_size710 = 0;
+            $_etype713 = 0;
+            $xfer += $input->readListBegin($_etype713, $_size710);
+            for ($_i714 = 0; $_i714 < $_size710; ++$_i714)
             {
-              $elem722 = null;
-              $elem722 = new \metastore\Table();
-              $xfer += $elem722->read($input);
-              $this->success []= $elem722;
+              $elem715 = null;
+              $elem715 = new \metastore\Table();
+              $xfer += $elem715->read($input);
+              $this->success []= $elem715;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17223,9 +16999,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter723)
+          foreach ($this->success as $iter716)
           {
-            $xfer += $iter723->write($output);
+            $xfer += $iter716->write($output);
           }
         }
         $output->writeListEnd();
@@ -17891,14 +17667,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size724 = 0;
-            $_etype727 = 0;
-            $xfer += $input->readListBegin($_etype727, $_size724);
-            for ($_i728 = 0; $_i728 < $_size724; ++$_i728)
+            $_size717 = 0;
+            $_etype720 = 0;
+            $xfer += $input->readListBegin($_etype720, $_size717);
+            for ($_i721 = 0; $_i721 < $_size717; ++$_i721)
             {
-              $elem729 = null;
-              $xfer += $input->readString($elem729);
-              $this->success []= $elem729;
+              $elem722 = null;
+              $xfer += $input->readString($elem722);
+              $this->success []= $elem722;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17950,9 +17726,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter730)
+          foreach ($this->success as $iter723)
           {
-            $xfer += $output->writeString($iter730);
+            $xfer += $output->writeString($iter723);
           }
         }
         $output->writeListEnd();
@@ -19265,15 +19041,15 @@ class ThriftHiveMetastore_add_partitions_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size731 = 0;
-            $_etype734 = 0;
-            $xfer += $input->readListBegin($_etype734, $_size731);
-            for ($_i735 = 0; $_i735 < $_size731; ++$_i735)
+            $_size724 = 0;
+            $_etype727 = 0;
+            $xfer += $input->readListBegin($_etype727, $_size724);
+            for ($_i728 = 0; $_i728 < $_size724; ++$_i728)
             {
-              $elem736 = null;
-              $elem736 = new \metastore\Partition();
-              $xfer += $elem736->read($input);
-              $this->new_parts []= $elem736;
+              $elem729 = null;
+              $elem729 = new \metastore\Partition();
+              $xfer += $elem729->read($input);
+              $this->new_parts []= $elem729;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19301,9 +19077,9 @@ class ThriftHiveMetastore_add_partitions_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter737)
+          foreach ($this->new_parts as $iter730)
           {
-            $xfer += $iter737->write($output);
+            $xfer += $iter730->write($output);
           }
         }
         $output->writeListEnd();
@@ -19518,15 +19294,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size738 = 0;
-            $_etype741 = 0;
-            $xfer += $input->readListBegin($_etype741, $_size738);
-            for ($_i742 = 0; $_i742 < $_size738; ++$_i742)
+            $_size731 = 0;
+            $_etype734 = 0;
+            $xfer += $input->readListBegin($_etype734, $_size731);
+            for ($_i735 = 0; $_i735 < $_size731; ++$_i735)
             {
-              $elem743 = null;
-              $elem743 = new \metastore\PartitionSpec();
-              $xfer += $elem743->read($input);
-              $this->new_parts []= $elem743;
+              $elem736 = null;
+              $elem736 = new \metastore\PartitionSpec();
+              $xfer += $elem736->read($input);
+              $this->new_parts []= $elem736;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19554,9 +19330,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter744)
+          foreach ($this->new_parts as $iter737)
           {
-            $xfer += $iter744->write($output);
+            $xfer += $iter737->write($output);
           }
         }
         $output->writeListEnd();
@@ -19806,14 +19582,14 @@ class ThriftHiveMetastore_append_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size745 = 0;
-            $_etype748 = 0;
-            $xfer += $input->readListBegin($_etype748, $_size745);
-            for ($_i749 = 0; $_i749 < $_size745; ++$_i749)
+            $_size738 = 0;
+            $_etype741 = 0;
+            $xfer += $input->readListBegin($_etype741, $_size738);
+            for ($_i742 = 0; $_i742 < $_size738; ++$_i742)
             {
-              $elem750 = null;
-              $xfer += $input->readString($elem750);
-              $this->part_vals []= $elem750;
+              $elem743 = null;
+              $xfer += $input->readString($elem743);
+              $this->part_vals []= $elem743;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19851,9 +19627,9 @@ class ThriftHiveMetastore_append_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter751)
+          foreach ($this->part_vals as $iter744)
           {
-            $xfer += $output->writeString($iter751);
+            $xfer += $output->writeString($iter744);
           }
         }
         $output->writeListEnd();
@@ -20355,14 +20131,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size752 = 0;
-            $_etype755 = 0;
-            $xfer += $input->readListBegin($_etype755, $_size752);
-            for ($_i756 = 0; $_i756 < $_size752; ++$_i756)
+            $_size745 = 0;
+            $_etype748 = 0;
+            $xfer += $input->readListBegin($_etype748, $_size745);
+            for ($_i749 = 0; $_i749 < $_size745; ++$_i749)
             {
-              $elem757 = null;
-              $xfer += $input->readString($elem757);
-              $this->part_vals []= $elem757;
+              $elem750 = null;
+              $xfer += $input->readString($elem750);
+              $this->part_vals []= $elem750;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -20408,9 +20184,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter758)
+          foreach ($this->part_vals as $iter751)
           {
-            $xfer += $output->writeString($iter758);
+            $xfer += $output->writeString($iter751);
           }
         }
         $output->writeListEnd();
@@ -21264,14 +21040,14 @@ class ThriftHiveMetastore_drop_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size759 = 0;
-            $_etype762 = 0;
-            $xfer += $input->readListBegin($_etype762, $_size759);
-            for ($_i763 = 0; $_i763 < $_size759; ++$_i763)
+            $_size752 = 0;
+            $_etype755 = 0;
+            $xfer += $input->readListBegin($_etype755, $_size752);
+            for ($_i756 = 0; $_i756 < $_size752; ++$_i756)
             {
-              $elem764 = null;
-              $xfer += $input->readString($elem764);
-              $this->part_vals []= $elem764;
+              $elem757 = null;
+              $xfer += $input->readString($elem757);
+              $this->part_vals []= $elem757;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21316,9 +21092,9 @@ class ThriftHiveMetastore_drop_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter765)
+          foreach ($this->part_vals as $iter758)
           {
-            $xfer += $output->writeString($iter765);
+            $xfer += $output->writeString($iter758);
           }
         }
         $output->writeListEnd();
@@ -21571,14 +21347,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size766 = 0;
-            $_etype769 = 0;
-            $xfer += $input->readListBegin($_etype769, $_size766);
-            for ($_i770 = 0; $_i770 < $_size766; ++$_i770)
+            $_size759 = 0;
+            $_etype762 = 0;
+            $xfer += $input->readListBegin($_etype762, $_size759);
+            for ($_i763 = 0; $_i763 < $_size759; ++$_i763)
             {
-              $elem771 = null;
-              $xfer += $input->readString($elem771);
-              $this->part_vals []= $elem771;
+              $elem764 = null;
+              $xfer += $input->readString($elem764);
+              $this->part_vals []= $elem764;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21631,9 +21407,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter772)
+          foreach ($this->part_vals as $iter765)
           {
-            $xfer += $output->writeString($iter772);
+            $xfer += $output->writeString($iter765);
           }
         }
         $output->writeListEnd();
@@ -22647,14 +22423,14 @@ class ThriftHiveMetastore_get_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size773 = 0;
-            $_etype776 = 0;
-            $xfer += $input->readListBegin($_etype776, $_size773);
-            for ($_i777 = 0; $_i777 < $_size773; ++$_i777)
+            $_size766 = 0;
+            $_etype769 = 0;
+            $xfer += $input->readListBegin($_etype769, $_size766);
+            for ($_i770 = 0; $_i770 < $_size766; ++$_i770)
             {
-              $elem778 = null;
-              $xfer += $input->readString($elem778);
-              $this->part_vals []= $elem778;
+              $elem771 = null;
+              $xfer += $input->readString($elem771);
+              $this->part_vals []= $elem771;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22692,9 +22468,9 @@ class ThriftHiveMetastore_get_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter779)
+          foreach ($this->part_vals as $iter772)
           {
-            $xfer += $output->writeString($iter779);
+            $xfer += $output->writeString($iter772);
           }
         }
         $output->writeListEnd();
@@ -22936,17 +22712,17 @@ class ThriftHiveMetastore_exchange_partition_args {
         case 1:
           if ($ftype == TType::MAP) {
             $this->partitionSpecs = array();
-            $_size780 = 0;
-            $_ktype781 = 0;
-            $_vtype782 = 0;
-            $xfer += $input->readMapBegin($_ktype781, $_vtype782, $_size780);
-            for ($_i784 = 0; $_i784 < $_size780; ++$_i784)
+            $_size773 = 0;
+            $_ktype774 = 0;
+            $_vtype775 = 0;
+            $xfer += $input->readMapBegin($_ktype774, $_vtype775, $_size773);
+            for ($_i777 = 0; $_i777 < $_size773; ++$_i777)
             {
-              $key785 = '';
-              $val786 = '';
-              $xfer += $input->readString($key785);
-              $xfer += $input->readString($val786);
-              $this->partitionSpecs[$key785] = $val786;
+              $key778 = '';
+              $val779 = '';
+              $xfer += $input->readString($key778);
+              $xfer += $input->readString($val779);
+              $this->partitionSpecs[$key778] = $val779;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -23002,10 +22778,10 @@ class ThriftHiveMetastore_exchange_partition_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
         {
-          foreach ($this->partitionSpecs as $kiter787 => $viter788)
+          foreach ($this->partitionSpecs as $kiter780 => $viter781)
           {
-            $xfer += $output->writeString($kiter787);
-            $xfer += $output->writeString($viter788);
+            $xfer += $output->writeString($kiter780);
+            $xfer += $output->writeString($viter781);
           }
         }
         $output->writeMapEnd();
@@ -23317,17 +23093,17 @@ class ThriftHiveMetastore_exchange_partitions_args {
         case 1:
           if ($ftype == TType::MAP) {
             $this->partitionSpecs = array();
-            $_size789 = 0;
-            $_ktype790 = 0;
-            $_vtype791 = 0;
-            $xfer += $input->readMapBegin($_ktype790, $_vtype791, $_size789);
-            for ($_i793 = 0; $_i793 < $_size789; ++$_i793)
+            $_size782 = 0;
+            $_ktype783 = 0;
+            $_vtype784 = 0;
+            $xfer += $input->readMapBegin($_ktype783, $_vtype784, $_size782);
+            for ($_i786 = 0; $_i786 < $_size782; ++$_i786)
             {
-              $key794 = '';
-              $val795 = '';
-              $xfer += $input->readString($key794);
-              $xfer += $input->readString($val795);
-              $this->partitionSpecs[$key794] = $val795;
+              $key787 = '';
+              $val788 = '';
+              $xfer += $input->readString($key787);
+              $xfer += $input->readString($val788);
+              $this->partitionSpecs[$key787] = $val788;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -23383,10 +23159,10 @@ class ThriftHiveMetastore_exchange_partitions_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
         {
-          foreach ($this->partitionSpecs as $kiter796 => $viter797)
+          foreach ($this->partitionSpecs as $kiter789 => $viter790)
           {
-            $xfer += $output->writeString($kiter796);
-            $xfer += $output->writeString($viter797);
+            $xfer += $output->writeString($kiter789);
+            $xfer += $output->writeString($viter790);
           }
         }
         $output->writeMapEnd();
@@ -23519,15 +23295,15 @@ class ThriftHiveMetastore_exchange_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size798 = 0;
-            $_etype801 = 0;
-            $xfer += $input->readListBegin($_etype801, $_size798);
-            for ($_i802 = 0; $_i802 < $_size798; ++$_i802)
+            $_size791 = 0;
+            $_etype794 = 0;
+            $xfer += $input->readListBegin($_etype794, $_size791);
+            for ($_i795 = 0; $_i795 < $_size791; ++$_i795)
             {
-              $elem803 = null;
-              $elem803 = new \metastore\Partition();
-              $xfer += $elem803->read($input);
-              $this->success []= $elem803;
+              $elem796 = null;
+              $elem796 = new \metastore\Partition();
+              $xfer += $elem796->read($input);
+              $this->success []= $elem796;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23587,9 +23363,9 @@ class ThriftHiveMetastore_exchange_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter804)
+          foreach ($this->success as $iter797)
           {
-            $xfer += $iter804->write($output);
+            $xfer += $iter797->write($output);
           }
         }
         $output->writeListEnd();
@@ -23735,14 +23511,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size805 = 0;
-            $_etype808 = 0;
-            $xfer += $input->readListBegin($_etype808, $_size805);
-            for ($_i809 = 0; $_i809 < $_size805; ++$_i809)
+            $_size798 = 0;
+            $_etype801 = 0;
+            $xfer += $input->readListBegin($_etype801, $_size798);
+            for ($_i802 = 0; $_i802 < $_size798; ++$_i802)
             {
-              $elem810 = null;
-              $xfer += $input->readString($elem810);
-              $this->part_vals []= $elem810;
+              $elem803 = null;
+              $xfer += $input->readString($elem803);
+              $this->part_vals []= $elem803;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23759,14 +23535,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size811 = 0;
-            $_etype814 = 0;
-            $xfer += $input->readListBegin($_etype814, $_size811);
-            for ($_i815 = 0; $_i815 < $_size811; ++$_i815)
+            $_size804 = 0;
+            $_etype807 = 0;
+            $xfer += $input->readListBegin($_etype807, $_size804);
+            for ($_i808 = 0; $_i808 < $_size804; ++$_i808)
             {
-              $elem816 = null;
-              $xfer += $input->readString($elem816);
-              $this->group_names []= $elem816;
+              $elem809 = null;
+              $xfer += $input->readString($elem809);
+              $this->group_names []= $elem809;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23804,9 +23580,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter817)
+          foreach ($this->part_vals as $iter810)
           {
-            $xfer += $output->writeString($iter817);
+            $xfer += $output->writeString($iter810);
           }
         }
         $output->writeListEnd();
@@ -23826,9 +23602,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter818)
+          foreach ($this->group_names as $iter811)
           {
-            $xfer += $output->writeString($iter818);
+            $xfer += $output->writeString($iter811);
           }
         }
         $output->writeListEnd();
@@ -24419,15 +24195,15 @@ class ThriftHiveMetastore_get_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size819 = 0;
-            $_etype822 = 0;
-            $xfer += $input->readListBegin($_etype822, $_size819);
-            for ($_i823 = 0; $_i823 < $_size819; ++$_i823)
+            $_size812 = 0;
+            $_etype815 = 0;
+            $xfer += $input->readListBegin($_etype815, $_size812);
+            for ($_i816 = 0; $_i816 < $_size812; ++$_i816)
             {
-              $elem824 = null;
-              $elem824 = new \metastore\Partition();
-              $xfer += $elem824->read($input);
-              $this->success []= $elem824;
+              $elem817 = null;
+              $elem817 = new \metastore\Partition();
+              $xfer += $elem817->read($input);
+              $this->success []= $elem817;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24471,9 +24247,9 @@ class ThriftHiveMetastore_get_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter825)
+          foreach ($this->success as $iter818)
           {
-            $xfer += $iter825->write($output);
+            $xfer += $iter818->write($output);
           }
         }
         $output->writeListEnd();
@@ -24619,14 +24395,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size826 = 0;
-            $_etype829 = 0;
-            $xfer += $input->readListBegin($_etype829, $_size826);
-            for ($_i830 = 0; $_i830 < $_size826; ++$_i830)
+            $_size819 = 0;
+            $_etype822 = 0;
+            $xfer += $input->readListBegin($_etype822, $_size819);
+            for ($_i823 = 0; $_i823 < $_size819; ++$_i823)
             {
-              $elem831 = null;
-              $xfer += $input->readString($elem831);
-              $this->group_names []= $elem831;
+              $elem824 = null;
+              $xfer += $input->readString($elem824);
+              $this->group_names []= $elem824;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24674,9 +24450,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter832)
+          foreach ($this->group_names as $iter825)
           {
-            $xfer += $output->writeString($iter832);
+            $xfer += $output->writeString($iter825);
           }
         }
         $output->writeListEnd();
@@ -24765,15 +24541,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size833 = 0;
-            $_etype836 = 0;
-            $xfer += $input->readListBegin($_etype836, $_size833);
-            for ($_i837 = 0; $_i837 < $_size833; ++$_i837)
+            $_size826 = 0;
+            $_etype829 = 0;
+            $xfer += $input->readListBegin($_etype829, $_size826);
+            for ($_i830 = 0; $_i830 < $_size826; ++$_i830)
             {
-              $elem838 = null;
-              $elem838 = new \metastore\Partition();
-              $xfer += $elem838->read($input);
-              $this->success []= $elem838;
+              $elem831 = null;
+              $elem831 = new \metastore\Partition();
+              $xfer += $elem831->read($input);
+              $this->success []= $elem831;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24817,9 +24593,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter839)
+          foreach ($this->success as $iter832)
           {
-            $xfer += $iter839->write($output);
+            $xfer += $iter832->write($output);
           }
         }
         $output->writeListEnd();
@@ -25039,15 +24815,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size840 = 0;
-            $_etype843 = 0;
-            $xfer += $input->readListBegin($_etype843, $_size840);
-            for ($_i844 = 0; $_i844 < $_size840; ++$_i844)
+            $_size833 = 0;
+            $_etype836 = 0;
+            $xfer += $input->readListBegin($_etype836, $_size833);
+            for ($_i837 = 0; $_i837 < $_size833; ++$_i837)
             {
-              $elem845 = null;
-              $elem845 = new \metastore\PartitionSpec();
-              $xfer += $elem845->read($input);
-              $this->success []= $elem845;
+              $elem838 = null;
+              $elem838 = new \metastore\PartitionSpec();
+              $xfer += $elem838->read($input);
+              $this->success []= $elem838;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25091,9 +24867,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter846)
+          foreach ($this->success as $iter839)
           {
-            $xfer += $iter846->write($output);
+            $xfer += $iter839->write($output);
           }
         }
         $output->writeListEnd();
@@ -25300,14 +25076,14 @@ class ThriftHiveMetastore_get_partition_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size847 = 0;
-            $_etype850 = 0;
-            $xfer += $input->readListBegin($_etype850, $_size847);
-            for ($_i851 = 0; $_i851 < $_size847; ++$_i851)
+            $_size840 = 0;
+            $_etype843 = 0;
+            $xfer += $input->readListBegin($_etype843, $_size840);
+            for ($_i844 = 0; $_i844 < $_size840; ++$_i844)
             {
-              $elem852 = null;
-              $xfer += $input->readString($elem852);
-              $this->success []= $elem852;
+              $elem845 = null;
+              $xfer += $input->readString($elem845);
+              $this->success []= $elem845;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25343,9 +25119,9 @@ class ThriftHiveMetastore_get_partition_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter853)
+          foreach ($this->success as $iter846)
           {
-            $xfer += $output->writeString($iter853);
+            $xfer += $output->writeString($iter846);
           }
         }
         $output->writeListEnd();
@@ -25461,14 +25237,14 @@ class ThriftHiveMetastore_get_partitions_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size854 = 0;
-            $_etype857 = 0;
-            $xfer += $input->readListBegin($_etype857, $_size854);
-            for ($_i858 = 0; $_i858 < $_size854; ++$_i858)
+            $_size847 = 0;
+            $_etype850 = 0;
+            $xfer += $input->readListBegin($_etype850, $_size847);
+            for ($_i851 = 0; $_i851 < $_size847; ++$_i851)
             {
-              $elem859 = null;
-              $xfer += $input->readString($elem859);
-              $this->part_vals []= $elem859;
+              $elem852 = null;
+              $xfer += $input->readString($elem852);
+              $this->part_vals []= $elem852;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25513,9 +25289,9 @@ class ThriftHiveMetastore_get_partitions_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter860)
+          foreach ($this->part_vals as $iter853)
           {
-            $xfer += $output->writeString($iter860);
+            $xfer += $output->writeString($iter853);
           }
         }
         $output->writeListEnd();
@@ -25609,15 +25385,15 @@ class ThriftHiveMetastore_get_partitions_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size861 = 0;
-            $_etype864 = 0;
-            $xfer += $input->readListBegin($_etype864, $_size861);
-            for ($_i865 = 0; $_i865 < $_size861; ++$_i865)
+            $_size854 = 0;
+            $_etype857 = 0;
+            $xfer += $input->readListBegin($_etype857, $_size854);
+            for ($_i858 = 0; $_i858 < $_size854; ++$_i858)
             {
-              $elem866 = null;
-              $elem866 = new \metastore\Partition();
-              $xfer += $elem866->read($input);
-              $this->success []= $elem866;
+              $elem859 = null;
+              $elem859 = new \metastore\Partition();
+              $xfer += $elem859->read($input);
+              $this->success []= $elem859;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25661,9 +25437,9 @@ class ThriftHiveMetastore_get_partitions_ps_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter867)
+          foreach ($this->success as $iter860)
           {
-            $xfer += $iter867->write($output);
+            $xfer += $iter860->write($output);
           }
         }
         $output->writeListEnd();
@@ -25810,14 +25586,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size868 = 0;
-            $_etype871 = 0;
-            $xfer += $input->readListBegin($_etype871, $_size868);
-            for ($_i872 = 0; $_i872 < $_size868; ++$_i872)
+            $_size861 = 0;
+            $_etype864 = 0;
+            $xfer += $input->readListBegin($_etype864, $_size861);
+            for ($_i865 = 0; $_i865 < $_size861; ++$_i865)
             {
-              $elem873 = null;
-              $xfer += $input->readString($elem873);
-              $this->part_vals []= $elem873;
+              $elem866 = null;
+              $xfer += $input->readString($elem866);
+              $this->part_vals []= $elem866;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25841,14 +25617,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 6:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size874 = 0;
-            $_etype877 = 0;
-            $xfer += $input->readListBegin($_etype877, $_size874);
-            for ($_i878 = 0; $_i878 < $_size874; ++$_i878)
+            $_size867 = 0;
+            $_etype870 = 0;
+            $xfer += $input->readListBegin($_etype870, $_size867);
+            for ($_i871 = 0; $_i871 < $_size867; ++$_i871)
             {
-              $elem879 = null;
-              $xfer += $input->readString($elem879);
-              $this->group_names []= $elem879;
+              $elem872 = null;
+              $xfer += $input->readString($elem872);
+              $this->group_names []= $elem872;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25886,9 +25662,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter880)
+          foreach ($this->part_vals as $iter873)
           {
-            $xfer += $output->writeString($iter880);
+            $xfer += $output->writeString($iter873);
           }
         }
         $output->writeListEnd();
@@ -25913,9 +25689,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter881)
+          foreach ($this->group_names as $iter874)
           {
-            $xfer += $output->writeString($iter881);
+            $xfer += $output->writeString($iter874);
           }
         }
         $output->writeListEnd();
@@ -26004,15 +25780,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size882 = 0;
-            $_etype885 = 0;
-            $xfer += $input->readListBegin($_etype885, $_size882);
-            for ($_i886 = 0; $_i886 < $_size882; ++$_i886)
+            $_size875 = 0;
+            $_etype878 = 0;
+            $xfer += $input->readListBegin($_etype878, $_size875);
+            for ($_i879 = 0; $_i879 < $_size875; ++$_i879)
             {
-              $elem887 = null;
-              $elem887 = new \metastore\Partition();
-              $xfer += $elem887->read($input);
-              $this->success []= $elem887;
+              $elem880 = null;
+              $elem880 = new \metastore\Partition();
+              $xfer += $elem880->read($input);
+              $this->success []= $elem880;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26056,9 +25832,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter888)
+          foreach ($this->success as $iter881)
           {
-            $xfer += $iter888->write($output);
+            $xfer += $iter881->write($output);
           }
         }
         $output->writeListEnd();
@@ -26179,14 +25955,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size889 = 0;
-            $_etype892 = 0;
-            $xfer += $input->readListBegin($_etype892, $_size889);
-            for ($_i893 = 0; $_i893 < $_size889; ++$_i893)
+            $_size882 = 0;
+            $_etype885 = 0;
+            $xfer += $input->readListBegin($_etype885, $_size882);
+            for ($_i886 = 0; $_i886 < $_size882; ++$_i886)
             {
-              $elem894 = null;
-              $xfer += $input->readString($elem894);
-              $this->part_vals []= $elem894;
+              $elem887 = null;
+              $xfer += $input->readString($elem887);
+              $this->part_vals []= $elem887;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26231,9 +26007,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter895)
+          foreach ($this->part_vals as $iter888)
           {
-            $xfer += $output->writeString($iter895);
+            $xfer += $output->writeString($iter888);
           }
         }
         $output->writeListEnd();
@@ -26326,14 +26102,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size896 = 0;
-            $_etype899 = 0;
-            $xfer += $input->readListBegin($_etype899, $_size896);
-            for ($_i900 = 0; $_i900 < $_size896; ++$_i900)
+            $_size889 = 0;
+            $_etype892 = 0;
+            $xfer += $input->readListBegin($_etype892, $_size889);
+            for ($_i893 = 0; $_i893 < $_size889; ++$_i893)
             {
-              $elem901 = null;
-              $xfer += $input->readString($elem901);
-              $this->success []= $elem901;
+              $elem894 = null;
+              $xfer += $input->readString($elem894);
+              $this->success []= $elem894;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26377,9 +26153,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter902)
+          foreach ($this->success as $iter895)
           {
-            $xfer += $output->writeString($iter902);
+            $xfer += $output->writeString($iter895);
           }
         }
         $output->writeListEnd();
@@ -26622,15 +26398,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size903 = 0;
-            $_etype906 = 0;
-            $xfer += $input->readListBegin($_etype906, $_size903);
-            for ($_i907 = 0; $_i907 < $_size903; ++$_i907)
+            $_size896 = 0;
+            $_etype899 = 0;
+            $xfer += $input->readListBegin($_etype899, $_size896);
+            for ($_i900 = 0; $_i900 < $_size896; ++$_i900)
             {
-              $elem908 = null;
-              $elem908 = new \metastore\Partition();
-              $xfer += $elem908->read($input);
-              $this->success []= $elem908;
+              $elem901 = null;
+              $elem901 = new \metastore\Partition();
+              $xfer += $elem901->read($input);
+              $this->success []= $elem901;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26674,9 +26450,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter909)
+          foreach ($this->success as $iter902)
           {
-            $xfer += $iter909->write($output);
+            $xfer += $iter902->write($output);
           }
         }
         $output->writeListEnd();
@@ -26919,15 +26695,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size910 = 0;
-            $_etype913 = 0;
-            $xfer += $input->readListBegin($_etype913, $_size910);
-            for ($_i914 = 0; $_i914 < $_size910; ++$_i914)
+            $_size903 = 0;
+            $_etype906 = 0;
+            $xfer += $input->readListBegin($_etype906, $_size903);
+            for ($_i907 = 0; $_i907 < $_size903; ++$_i907)
             {
-              $elem915 = null;
-              $elem915 = new \metastore\PartitionSpec();
-              $xfer += $elem915->read($input);
-              $this->success []= $elem915;
+              $elem908 = null;
+              $elem908 = new \metastore\PartitionSpec();
+              $xfer += $elem908->read($input);
+              $this->success []= $elem908;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26971,9 +26747,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter916)
+          foreach ($this->success as $iter909)
           {
-            $xfer += $iter916->write($output);
+            $xfer += $iter909->write($output);
           }
         }
         $output->writeListEnd();
@@ -27539,14 +27315,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->names = array();
-            $_size917 = 0;
-            $_etype920 = 0;
-            $xfer += $input->readListBegin($_etype920, $_size917);
-            for ($_i921 = 0; $_i921 < $_size917; ++$_i921)
+            $_size910 = 0;
+            $_etype913 = 0;
+            $xfer += $input->readListBegin($_etype913, $_size910);
+            for ($_i914 = 0; $_i914 < $_size910; ++$_i914)
             {
-              $elem922 = null;
-              $xfer += $input->readString($elem922);
-              $this->names []= $elem922;
+              $elem915 = null;
+              $xfer += $input->readString($elem915);
+              $this->names []= $elem915;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27584,9 +27360,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
       {
         $output->writeListBegin(TType::STRING, count($this->names));
         {
-          foreach ($this->names as $iter923)
+          foreach ($this->names as $iter916)
           {
-            $xfer += $output->writeString($iter923);
+            $xfer += $output->writeString($iter916);
           }
         }
         $output->writeListEnd();
@@ -27675,15 +27451,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size924 = 0;
-            $_etype927 = 0;
-            $xfer += $input->readListBegin($_etype927, $_size924);
-            for ($_i928 = 0; $_i928 < $_size924; ++$_i928)
+            $_size917 = 0;
+            $_etype920 = 0;
+            $xfer += $input->readListBegin($_etype920, $_size917);
+            for ($_i921 = 0; $_i921 < $_size917; ++$_i921)
             {
-              $elem929 = null;
-              $elem929 = new \metastore\Partition();
-              $xfer += $elem929->read($input);
-              $this->success []= $elem929;
+              $elem922 = null;
+              $elem922 = new \metastore\Partition();
+              $xfer += $elem922->read($input);
+              $this->success []= $elem922;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27727,9 +27503,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter930)
+          foreach ($this->success as $iter923)
           {
-            $xfer += $iter930->write($output);
+            $xfer += $iter923->write($output);
           }
         }
         $output->writeListEnd();
@@ -28068,15 +27844,15 @@ class ThriftHiveMetastore_alter_partitions_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size931 = 0;
-            $_etype934 = 0;
-            $xfer += $input->readListBegin($_etype934, $_size931);
-            for ($_i935 = 0; $_i935 < $_size931; ++$_i935)
+            $_size924 = 0;
+            $_etype927 = 0;
+            $xfer += $input->readListBegin($_etype927, $_size924);
+            for ($_i928 = 0; $_i928 < $_size924; ++$_i928)
             {
-              $elem936 = null;
-              $elem936 = new \metastore\Partition();
-              $xfer += $elem936->read($input);
-              $this->new_parts []= $elem936;
+              $elem929 = null;
+              $elem929 = new \metastore\Partition();
+              $xfer += $elem929->read($input);
+              $this->new_parts []= $elem929;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -28114,9 +27890,9 @@ class ThriftHiveMetastore_alter_partitions_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter937)
+          foreach ($this->new_parts as $iter930)
           {
-            $xfer += $iter937->write($output);
+            $xfer += $iter930->write($output);
           }
         }
         $output->writeListEnd();
@@ -28331,15 +28107,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size938 = 0;
-            $_etype941 = 0;
-            $xfer += $input->readListBegin($_etype941, $_size938);
-            for ($_i942 = 0; $_i942 < $_size938; ++$_i942)
+            $_size931 = 0;
+            $_etype934 = 0;
+            $xfer += $input->readListBegin($_etype934, $_size931);
+            for ($_i935 = 0; $_i935 < $_size931; ++$_i935)
             {
-              $elem943 = null;
-              $elem943 = new \metastore\Partition();
-              $xfer += $elem943->read($input);
-              $this->new_parts []= $elem943;
+              $elem936 = null;
+              $elem936 = new \metastore\Partition();
+              $xfer += $elem936->read($input);
+              $this->new_parts []= $elem936;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -28385,9 +28161,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter944)
+          foreach ($this->new_parts as $iter937)
           {
-            $xfer += $iter944->write($output);
+            $xfer += $iter937->write($output);
           }
         }
         $output->writeListEnd();
@@ -28865,14 +28641,14 @@ class ThriftHiveMetastore_rename_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size945 = 0;
-            $_etype948 = 0;
-            $xfer += $input->readListBegin($_etype948, $_size945);
-            for ($_i949 = 0; $_i949 < $_size945; ++$_i949)
+            $_size938 = 0;
+            $_etype941 = 0;
+            $xfer += $input->readListBegin($_etype941, $_size938);
+            for ($_i942 = 0; $_i942 < $_size938; ++$_i942)
             {
-              $elem950 = null;
-              $xfer += $input->readString($elem950);
-              $this->part_vals []= $elem950;
+              $elem943 = null;
+              $xfer += $input->readString($elem943);
+              $this->part_vals []= $elem943;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -28918,9 +28694,9 @@ class ThriftHiveMetastore_rename_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter951)
+          foreach ($this->part_vals as $iter944)
           {
-            $xfer += $output->writeString($iter951);
+            $xfer += $output->writeString($iter944);
           }
         }
         $output->writeListEnd();
@@ -29105,14 +28881,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size952 = 0;
-            $_etype955 = 0;
-            $xfer += $input->readListBegin($_etype955, $_size952);
-            for ($_i956 = 0; $_i956 < $_size952; ++$_i956)
+            $_size945 = 0;
+            $_etype948 = 0;
+            $xfer += $input->readListBegin($_etype948, $_size945);
+            for ($_i949 = 0; $_i949 < $_size945; ++$_i949)
             {
-              $elem957 = null;
-              $xfer += $input->readString($elem957);
-              $this->part_vals []= $elem957;
+              $elem950 = null;
+              $xfer += $input->readString($elem950);
+              $this->part_vals []= $elem950;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -29147,9 +28923,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter958)
+          foreach ($this->part_vals as $iter951)
           {
-            $xfer += $output->writeString($iter958);
+            $xfer += $output->writeString($iter951);
           }
         }
         $output->writeListEnd();
@@ -29603,14 +29379,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size959 = 0;
-            $_etype962 = 0;
-            $xfer += $input->readListBegin($_etype962, $_size959);
-            for ($_i963 = 0; $_i963 < $_size959; ++$_i963)
+            $_size952 = 0;
+            $_etype955 = 0;
+            $xfer += $input->readListBegin($_etype955, $_size952);
+            for ($_i956 = 0; $_i956 < $_size952; ++$_i956)
             {
-              $elem964 = null;
-              $xfer += $input->readString($elem964);
-              $this->success []= $elem964;
+              $elem957 = null;
+              $xfer += $input->readString($elem957);
+              $this->success []= $elem957;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -29646,9 +29422,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter965)
+          foreach ($this->success as $iter958)
           {
-            $xfer += $output->writeString($iter965);
+            $xfer += $output->writeString($iter958);
           }
         }
         $output->writeListEnd();
@@ -29808,17 +29584,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result {
         case 0:
           if ($ftype == TType::MAP) {
             $this->success = array();
-            $_size966 = 0;
-            $_ktype967 = 0;
-            $_vtype968 = 0;
-            $xfer += $input->readMapBegin($_ktype967, $_vtype968, $_size966);
-            for ($_i970 = 0; $_i970 < $_size966; ++$_i970)
+            $_size959 = 0;
+            $_ktype960 = 0;
+            $_vtype961 = 0;
+            $xfer += $input->readMapBegin($_ktype960, $_vtype961, $_size959);
+            for ($_i963 = 0; $_i963 < $_size959; ++$_i963)
             {
-              $key971 = '';
-              $val972 = '';
-              $xfer += $input->readString($key971);
-              $xfer += $input->readString($val972);
-              $this->success[$key971] = $val972;
+              $key964 = '';
+              $val965 = '';
+              $xfer += $input->readString($key964);
+              $xfer += $input->readString($val965);
+              $this->success[$key964] = $val965;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -29854,10 +29630,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success));
         {
-          foreach ($this->success as $kiter973 => $viter974)
+          foreach ($this->success as $kiter966 => $viter967)
           {
-            $xfer += $output->writeString($kiter973);
-            $xfer += $output->writeString($viter974);
+            $xfer += $output->writeString($kiter966);
+            $xfer += $output->writeString($viter967);
           }
         }
         $output->writeMapEnd();
@@ -29977,17 +29753,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args {
         case 3:
           if ($ftype == TType::MAP) {
             $this->part_vals = array();
-            $_size975 = 0;
-            $_ktype976 = 0;
-            $_vtype977 = 0;
-            $xfer += $input->readMapBegin($_ktype976, $_vtype977, $_size975);
-            for ($_i979 = 0; $_i979 < $_size975; ++$_i979)
+            $_size968 = 0;
+            $_ktype969 = 0;
+            $_vtype970 = 0;
+            $xfer += $input->readMapBegin($_ktype969, $_vtype970, $_size968);
+            for ($_i972 = 0; $_i972 < $_size968; ++$_i972)
             {
-              $key980 = '';
-              $val981 = '';
-              $xfer += $input->readString($key980);
-              $xfer += $input->readString($val981);
-              $this->part_vals[$key980] = $val981;
+              $key973 = '';
+              $val974 = '';
+              $xfer += $input->readString($key973);
+              $xfer += $input->readString($val974);
+              $this->part_vals[$key973] = $val974;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -30032,10 +29808,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $kiter982 => $viter983)
+          foreach ($this->part_vals as $kiter975 => $viter976)
           {
-            $xfer += $output->writeString($kiter982);
-            $xfer += $output->writeString($viter983);
+            $xfer += $output->writeString($kiter975);
+            $xfer += $output->writeString($viter976);
           }
         }
         $output->writeMapEnd();
@@ -30357,17 +30133,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args {
         case 3:
           if ($ftype == TType::MAP) {
             $this->part_vals = array();
-            $_size984 = 0;
-            $_ktype985 = 0;
-            $_vtype986 = 0;
-            $xfer += $input->readMapBegin($_ktype985, $_vtype986, $_size984);
-            for ($_i988 = 0; $_i988 < $_size984; ++$_i988)
+            $_size977 = 0;
+            $_ktype978 = 0;
+            $_vtype979 = 0;
+            $xfer += $input->readMapBegin($_ktype978, $_vtype979, $_size977);
+            for ($_i981 = 0; $_i981 < $_size977; ++$_i981)
             {
-              $key989 = '';
-              $val990 = '';
-              $xfer += $input->readString($key989);
-              $xfer += $input->readString($val990);
-              $this->part_vals[$key989] = $val990;
+              $key982 = '';
+              $val983 = '';
+              $xfer += $input->readString($key982);
+              $xfer += $input->readString($val983);
+              $this->part_vals[$key982] = $val983;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -30412,10 +30188,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $kiter991 => $viter992)
+          foreach ($this->part_vals as $kiter984 => $viter985)
           {
-            $xfer += $output->writeString($kiter991);
-            $xfer += $output->writeString($viter992);
+            $xfer += $output->writeString($kiter984);
+            $xfer += $output->writeString($viter985);
           }
         }
         $output->writeMapEnd();
@@ -31889,15 +31665,15 @@ class ThriftHiveMetastore_get_indexes_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size993 = 0;
-            $_etype996 = 0;
-            $xfer += $input->readListBegin($_etype996, $_size993);
-            for ($_i997 = 0; $_i997 < $_size993; ++$_i997)
+            $_size986 = 0;
+            $_etype989 = 0;
+            $xfer += $input->readListBegin($_etype989, $_size986);
+            for ($_i990 = 0; $_i990 < $_size986; ++$_i990)
             {
-              $elem998 = null;
-              $elem998 = new \metastore\Index();
-              $xfer += $elem998->read($input);
-              $this->success []= $elem998;
+              $elem991 = null;
+              $elem991 = new \metastore\Index();
+              $xfer += $elem991->read($input);
+              $this->success []= $elem991;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31941,9 +31717,9 @@ class ThriftHiveMetastore_get_indexes_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter999)
+          foreach ($this->success as $iter992)
           {
-            $xfer += $iter999->write($output);
+            $xfer += $iter992->write($output);
           }
         }
         $output->writeListEnd();
@@ -32150,14 +31926,14 @@ class ThriftHiveMetastore_get_index_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1000 = 0;
-            $_etype1003 = 0;
-            $xfer += $input->readListBegin($_etype1003, $_size1000);
-            for ($_i1004 = 0; $_i1004 < $_size1000; ++$_i1004)
+            $_size993 = 0;
+            $_etype996 = 0;
+            $xfer += $input->readListBegin($_etype996, $_size993);
+            for ($_i997 = 0; $_i997 < $_size993; ++$_i997)
             {
-              $elem1005 = null;
-              $xfer += $input->readString($elem1005);
-              $this->success []= $elem1005;
+              $elem998 = null;
+              $xfer += $input->readString($elem998);
+              $this->success []= $elem998;
             }
 

<TRUNCATED>

[16/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index 843f4b3..ca71711 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -176,10 +176,6 @@ class ThriftHiveMetastoreIf : virtual public  ::facebook::fb303::FacebookService
   virtual void put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) = 0;
   virtual void clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) = 0;
   virtual void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) = 0;
-  virtual void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) = 0;
-  virtual void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) = 0;
-  virtual void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) = 0;
-  virtual void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) = 0;
 };
 
 class ThriftHiveMetastoreIfFactory : virtual public  ::facebook::fb303::FacebookServiceIfFactory {
@@ -699,18 +695,6 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
   void cache_file_metadata(CacheFileMetadataResult& /* _return */, const CacheFileMetadataRequest& /* req */) {
     return;
   }
-  void get_next_write_id(GetNextWriteIdResult& /* _return */, const GetNextWriteIdRequest& /* req */) {
-    return;
-  }
-  void finalize_write_id(FinalizeWriteIdResult& /* _return */, const FinalizeWriteIdRequest& /* req */) {
-    return;
-  }
-  void heartbeat_write_id(HeartbeatWriteIdResult& /* _return */, const HeartbeatWriteIdRequest& /* req */) {
-    return;
-  }
-  void get_valid_write_ids(GetValidWriteIdsResult& /* _return */, const GetValidWriteIdsRequest& /* req */) {
-    return;
-  }
 };
 
 typedef struct _ThriftHiveMetastore_getMetaConf_args__isset {
@@ -19739,422 +19723,6 @@ class ThriftHiveMetastore_cache_file_metadata_presult {
 
 };
 
-typedef struct _ThriftHiveMetastore_get_next_write_id_args__isset {
-  _ThriftHiveMetastore_get_next_write_id_args__isset() : req(false) {}
-  bool req :1;
-} _ThriftHiveMetastore_get_next_write_id_args__isset;
-
-class ThriftHiveMetastore_get_next_write_id_args {
- public:
-
-  ThriftHiveMetastore_get_next_write_id_args(const ThriftHiveMetastore_get_next_write_id_args&);
-  ThriftHiveMetastore_get_next_write_id_args& operator=(const ThriftHiveMetastore_get_next_write_id_args&);
-  ThriftHiveMetastore_get_next_write_id_args() {
-  }
-
-  virtual ~ThriftHiveMetastore_get_next_write_id_args() throw();
-  GetNextWriteIdRequest req;
-
-  _ThriftHiveMetastore_get_next_write_id_args__isset __isset;
-
-  void __set_req(const GetNextWriteIdRequest& val);
-
-  bool operator == (const ThriftHiveMetastore_get_next_write_id_args & rhs) const
-  {
-    if (!(req == rhs.req))
-      return false;
-    return true;
-  }
-  bool operator != (const ThriftHiveMetastore_get_next_write_id_args &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const ThriftHiveMetastore_get_next_write_id_args & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-
-class ThriftHiveMetastore_get_next_write_id_pargs {
- public:
-
-
-  virtual ~ThriftHiveMetastore_get_next_write_id_pargs() throw();
-  const GetNextWriteIdRequest* req;
-
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-typedef struct _ThriftHiveMetastore_get_next_write_id_result__isset {
-  _ThriftHiveMetastore_get_next_write_id_result__isset() : success(false) {}
-  bool success :1;
-} _ThriftHiveMetastore_get_next_write_id_result__isset;
-
-class ThriftHiveMetastore_get_next_write_id_result {
- public:
-
-  ThriftHiveMetastore_get_next_write_id_result(const ThriftHiveMetastore_get_next_write_id_result&);
-  ThriftHiveMetastore_get_next_write_id_result& operator=(const ThriftHiveMetastore_get_next_write_id_result&);
-  ThriftHiveMetastore_get_next_write_id_result() {
-  }
-
-  virtual ~ThriftHiveMetastore_get_next_write_id_result() throw();
-  GetNextWriteIdResult success;
-
-  _ThriftHiveMetastore_get_next_write_id_result__isset __isset;
-
-  void __set_success(const GetNextWriteIdResult& val);
-
-  bool operator == (const ThriftHiveMetastore_get_next_write_id_result & rhs) const
-  {
-    if (!(success == rhs.success))
-      return false;
-    return true;
-  }
-  bool operator != (const ThriftHiveMetastore_get_next_write_id_result &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const ThriftHiveMetastore_get_next_write_id_result & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-typedef struct _ThriftHiveMetastore_get_next_write_id_presult__isset {
-  _ThriftHiveMetastore_get_next_write_id_presult__isset() : success(false) {}
-  bool success :1;
-} _ThriftHiveMetastore_get_next_write_id_presult__isset;
-
-class ThriftHiveMetastore_get_next_write_id_presult {
- public:
-
-
-  virtual ~ThriftHiveMetastore_get_next_write_id_presult() throw();
-  GetNextWriteIdResult* success;
-
-  _ThriftHiveMetastore_get_next_write_id_presult__isset __isset;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-
-};
-
-typedef struct _ThriftHiveMetastore_finalize_write_id_args__isset {
-  _ThriftHiveMetastore_finalize_write_id_args__isset() : req(false) {}
-  bool req :1;
-} _ThriftHiveMetastore_finalize_write_id_args__isset;
-
-class ThriftHiveMetastore_finalize_write_id_args {
- public:
-
-  ThriftHiveMetastore_finalize_write_id_args(const ThriftHiveMetastore_finalize_write_id_args&);
-  ThriftHiveMetastore_finalize_write_id_args& operator=(const ThriftHiveMetastore_finalize_write_id_args&);
-  ThriftHiveMetastore_finalize_write_id_args() {
-  }
-
-  virtual ~ThriftHiveMetastore_finalize_write_id_args() throw();
-  FinalizeWriteIdRequest req;
-
-  _ThriftHiveMetastore_finalize_write_id_args__isset __isset;
-
-  void __set_req(const FinalizeWriteIdRequest& val);
-
-  bool operator == (const ThriftHiveMetastore_finalize_write_id_args & rhs) const
-  {
-    if (!(req == rhs.req))
-      return false;
-    return true;
-  }
-  bool operator != (const ThriftHiveMetastore_finalize_write_id_args &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const ThriftHiveMetastore_finalize_write_id_args & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-
-class ThriftHiveMetastore_finalize_write_id_pargs {
- public:
-
-
-  virtual ~ThriftHiveMetastore_finalize_write_id_pargs() throw();
-  const FinalizeWriteIdRequest* req;
-
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-typedef struct _ThriftHiveMetastore_finalize_write_id_result__isset {
-  _ThriftHiveMetastore_finalize_write_id_result__isset() : success(false) {}
-  bool success :1;
-} _ThriftHiveMetastore_finalize_write_id_result__isset;
-
-class ThriftHiveMetastore_finalize_write_id_result {
- public:
-
-  ThriftHiveMetastore_finalize_write_id_result(const ThriftHiveMetastore_finalize_write_id_result&);
-  ThriftHiveMetastore_finalize_write_id_result& operator=(const ThriftHiveMetastore_finalize_write_id_result&);
-  ThriftHiveMetastore_finalize_write_id_result() {
-  }
-
-  virtual ~ThriftHiveMetastore_finalize_write_id_result() throw();
-  FinalizeWriteIdResult success;
-
-  _ThriftHiveMetastore_finalize_write_id_result__isset __isset;
-
-  void __set_success(const FinalizeWriteIdResult& val);
-
-  bool operator == (const ThriftHiveMetastore_finalize_write_id_result & rhs) const
-  {
-    if (!(success == rhs.success))
-      return false;
-    return true;
-  }
-  bool operator != (const ThriftHiveMetastore_finalize_write_id_result &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const ThriftHiveMetastore_finalize_write_id_result & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-typedef struct _ThriftHiveMetastore_finalize_write_id_presult__isset {
-  _ThriftHiveMetastore_finalize_write_id_presult__isset() : success(false) {}
-  bool success :1;
-} _ThriftHiveMetastore_finalize_write_id_presult__isset;
-
-class ThriftHiveMetastore_finalize_write_id_presult {
- public:
-
-
-  virtual ~ThriftHiveMetastore_finalize_write_id_presult() throw();
-  FinalizeWriteIdResult* success;
-
-  _ThriftHiveMetastore_finalize_write_id_presult__isset __isset;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-
-};
-
-typedef struct _ThriftHiveMetastore_heartbeat_write_id_args__isset {
-  _ThriftHiveMetastore_heartbeat_write_id_args__isset() : req(false) {}
-  bool req :1;
-} _ThriftHiveMetastore_heartbeat_write_id_args__isset;
-
-class ThriftHiveMetastore_heartbeat_write_id_args {
- public:
-
-  ThriftHiveMetastore_heartbeat_write_id_args(const ThriftHiveMetastore_heartbeat_write_id_args&);
-  ThriftHiveMetastore_heartbeat_write_id_args& operator=(const ThriftHiveMetastore_heartbeat_write_id_args&);
-  ThriftHiveMetastore_heartbeat_write_id_args() {
-  }
-
-  virtual ~ThriftHiveMetastore_heartbeat_write_id_args() throw();
-  HeartbeatWriteIdRequest req;
-
-  _ThriftHiveMetastore_heartbeat_write_id_args__isset __isset;
-
-  void __set_req(const HeartbeatWriteIdRequest& val);
-
-  bool operator == (const ThriftHiveMetastore_heartbeat_write_id_args & rhs) const
-  {
-    if (!(req == rhs.req))
-      return false;
-    return true;
-  }
-  bool operator != (const ThriftHiveMetastore_heartbeat_write_id_args &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const ThriftHiveMetastore_heartbeat_write_id_args & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-
-class ThriftHiveMetastore_heartbeat_write_id_pargs {
- public:
-
-
-  virtual ~ThriftHiveMetastore_heartbeat_write_id_pargs() throw();
-  const HeartbeatWriteIdRequest* req;
-
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-typedef struct _ThriftHiveMetastore_heartbeat_write_id_result__isset {
-  _ThriftHiveMetastore_heartbeat_write_id_result__isset() : success(false) {}
-  bool success :1;
-} _ThriftHiveMetastore_heartbeat_write_id_result__isset;
-
-class ThriftHiveMetastore_heartbeat_write_id_result {
- public:
-
-  ThriftHiveMetastore_heartbeat_write_id_result(const ThriftHiveMetastore_heartbeat_write_id_result&);
-  ThriftHiveMetastore_heartbeat_write_id_result& operator=(const ThriftHiveMetastore_heartbeat_write_id_result&);
-  ThriftHiveMetastore_heartbeat_write_id_result() {
-  }
-
-  virtual ~ThriftHiveMetastore_heartbeat_write_id_result() throw();
-  HeartbeatWriteIdResult success;
-
-  _ThriftHiveMetastore_heartbeat_write_id_result__isset __isset;
-
-  void __set_success(const HeartbeatWriteIdResult& val);
-
-  bool operator == (const ThriftHiveMetastore_heartbeat_write_id_result & rhs) const
-  {
-    if (!(success == rhs.success))
-      return false;
-    return true;
-  }
-  bool operator != (const ThriftHiveMetastore_heartbeat_write_id_result &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const ThriftHiveMetastore_heartbeat_write_id_result & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-typedef struct _ThriftHiveMetastore_heartbeat_write_id_presult__isset {
-  _ThriftHiveMetastore_heartbeat_write_id_presult__isset() : success(false) {}
-  bool success :1;
-} _ThriftHiveMetastore_heartbeat_write_id_presult__isset;
-
-class ThriftHiveMetastore_heartbeat_write_id_presult {
- public:
-
-
-  virtual ~ThriftHiveMetastore_heartbeat_write_id_presult() throw();
-  HeartbeatWriteIdResult* success;
-
-  _ThriftHiveMetastore_heartbeat_write_id_presult__isset __isset;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-
-};
-
-typedef struct _ThriftHiveMetastore_get_valid_write_ids_args__isset {
-  _ThriftHiveMetastore_get_valid_write_ids_args__isset() : req(false) {}
-  bool req :1;
-} _ThriftHiveMetastore_get_valid_write_ids_args__isset;
-
-class ThriftHiveMetastore_get_valid_write_ids_args {
- public:
-
-  ThriftHiveMetastore_get_valid_write_ids_args(const ThriftHiveMetastore_get_valid_write_ids_args&);
-  ThriftHiveMetastore_get_valid_write_ids_args& operator=(const ThriftHiveMetastore_get_valid_write_ids_args&);
-  ThriftHiveMetastore_get_valid_write_ids_args() {
-  }
-
-  virtual ~ThriftHiveMetastore_get_valid_write_ids_args() throw();
-  GetValidWriteIdsRequest req;
-
-  _ThriftHiveMetastore_get_valid_write_ids_args__isset __isset;
-
-  void __set_req(const GetValidWriteIdsRequest& val);
-
-  bool operator == (const ThriftHiveMetastore_get_valid_write_ids_args & rhs) const
-  {
-    if (!(req == rhs.req))
-      return false;
-    return true;
-  }
-  bool operator != (const ThriftHiveMetastore_get_valid_write_ids_args &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const ThriftHiveMetastore_get_valid_write_ids_args & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-
-class ThriftHiveMetastore_get_valid_write_ids_pargs {
- public:
-
-
-  virtual ~ThriftHiveMetastore_get_valid_write_ids_pargs() throw();
-  const GetValidWriteIdsRequest* req;
-
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-typedef struct _ThriftHiveMetastore_get_valid_write_ids_result__isset {
-  _ThriftHiveMetastore_get_valid_write_ids_result__isset() : success(false) {}
-  bool success :1;
-} _ThriftHiveMetastore_get_valid_write_ids_result__isset;
-
-class ThriftHiveMetastore_get_valid_write_ids_result {
- public:
-
-  ThriftHiveMetastore_get_valid_write_ids_result(const ThriftHiveMetastore_get_valid_write_ids_result&);
-  ThriftHiveMetastore_get_valid_write_ids_result& operator=(const ThriftHiveMetastore_get_valid_write_ids_result&);
-  ThriftHiveMetastore_get_valid_write_ids_result() {
-  }
-
-  virtual ~ThriftHiveMetastore_get_valid_write_ids_result() throw();
-  GetValidWriteIdsResult success;
-
-  _ThriftHiveMetastore_get_valid_write_ids_result__isset __isset;
-
-  void __set_success(const GetValidWriteIdsResult& val);
-
-  bool operator == (const ThriftHiveMetastore_get_valid_write_ids_result & rhs) const
-  {
-    if (!(success == rhs.success))
-      return false;
-    return true;
-  }
-  bool operator != (const ThriftHiveMetastore_get_valid_write_ids_result &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const ThriftHiveMetastore_get_valid_write_ids_result & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-typedef struct _ThriftHiveMetastore_get_valid_write_ids_presult__isset {
-  _ThriftHiveMetastore_get_valid_write_ids_presult__isset() : success(false) {}
-  bool success :1;
-} _ThriftHiveMetastore_get_valid_write_ids_presult__isset;
-
-class ThriftHiveMetastore_get_valid_write_ids_presult {
- public:
-
-
-  virtual ~ThriftHiveMetastore_get_valid_write_ids_presult() throw();
-  GetValidWriteIdsResult* success;
-
-  _ThriftHiveMetastore_get_valid_write_ids_presult__isset __isset;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-
-};
-
 class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public  ::facebook::fb303::FacebookServiceClient {
  public:
   ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) :
@@ -20628,18 +20196,6 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
   void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req);
   void send_cache_file_metadata(const CacheFileMetadataRequest& req);
   void recv_cache_file_metadata(CacheFileMetadataResult& _return);
-  void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req);
-  void send_get_next_write_id(const GetNextWriteIdRequest& req);
-  void recv_get_next_write_id(GetNextWriteIdResult& _return);
-  void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req);
-  void send_finalize_write_id(const FinalizeWriteIdRequest& req);
-  void recv_finalize_write_id(FinalizeWriteIdResult& _return);
-  void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req);
-  void send_heartbeat_write_id(const HeartbeatWriteIdRequest& req);
-  void recv_heartbeat_write_id(HeartbeatWriteIdResult& _return);
-  void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req);
-  void send_get_valid_write_ids(const GetValidWriteIdsRequest& req);
-  void recv_get_valid_write_ids(GetValidWriteIdsResult& _return);
 };
 
 class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceProcessor {
@@ -20804,10 +20360,6 @@ class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceP
   void process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_cache_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
-  void process_get_next_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
-  void process_finalize_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
-  void process_heartbeat_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
-  void process_get_valid_write_ids(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
  public:
   ThriftHiveMetastoreProcessor(boost::shared_ptr<ThriftHiveMetastoreIf> iface) :
      ::facebook::fb303::FacebookServiceProcessor(iface),
@@ -20966,10 +20518,6 @@ class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceP
     processMap_["put_file_metadata"] = &ThriftHiveMetastoreProcessor::process_put_file_metadata;
     processMap_["clear_file_metadata"] = &ThriftHiveMetastoreProcessor::process_clear_file_metadata;
     processMap_["cache_file_metadata"] = &ThriftHiveMetastoreProcessor::process_cache_file_metadata;
-    processMap_["get_next_write_id"] = &ThriftHiveMetastoreProcessor::process_get_next_write_id;
-    processMap_["finalize_write_id"] = &ThriftHiveMetastoreProcessor::process_finalize_write_id;
-    processMap_["heartbeat_write_id"] = &ThriftHiveMetastoreProcessor::process_heartbeat_write_id;
-    processMap_["get_valid_write_ids"] = &ThriftHiveMetastoreProcessor::process_get_valid_write_ids;
   }
 
   virtual ~ThriftHiveMetastoreProcessor() {}
@@ -22479,46 +22027,6 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
     return;
   }
 
-  void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) {
-    size_t sz = ifaces_.size();
-    size_t i = 0;
-    for (; i < (sz - 1); ++i) {
-      ifaces_[i]->get_next_write_id(_return, req);
-    }
-    ifaces_[i]->get_next_write_id(_return, req);
-    return;
-  }
-
-  void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) {
-    size_t sz = ifaces_.size();
-    size_t i = 0;
-    for (; i < (sz - 1); ++i) {
-      ifaces_[i]->finalize_write_id(_return, req);
-    }
-    ifaces_[i]->finalize_write_id(_return, req);
-    return;
-  }
-
-  void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) {
-    size_t sz = ifaces_.size();
-    size_t i = 0;
-    for (; i < (sz - 1); ++i) {
-      ifaces_[i]->heartbeat_write_id(_return, req);
-    }
-    ifaces_[i]->heartbeat_write_id(_return, req);
-    return;
-  }
-
-  void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) {
-    size_t sz = ifaces_.size();
-    size_t i = 0;
-    for (; i < (sz - 1); ++i) {
-      ifaces_[i]->get_valid_write_ids(_return, req);
-    }
-    ifaces_[i]->get_valid_write_ids(_return, req);
-    return;
-  }
-
 };
 
 // The 'concurrent' client is a thread safe client that correctly handles
@@ -22997,18 +22505,6 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
   void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req);
   int32_t send_cache_file_metadata(const CacheFileMetadataRequest& req);
   void recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid);
-  void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req);
-  int32_t send_get_next_write_id(const GetNextWriteIdRequest& req);
-  void recv_get_next_write_id(GetNextWriteIdResult& _return, const int32_t seqid);
-  void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req);
-  int32_t send_finalize_write_id(const FinalizeWriteIdRequest& req);
-  void recv_finalize_write_id(FinalizeWriteIdResult& _return, const int32_t seqid);
-  void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req);
-  int32_t send_heartbeat_write_id(const HeartbeatWriteIdRequest& req);
-  void recv_heartbeat_write_id(HeartbeatWriteIdResult& _return, const int32_t seqid);
-  void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req);
-  int32_t send_get_valid_write_ids(const GetValidWriteIdsRequest& req);
-  void recv_get_valid_write_ids(GetValidWriteIdsResult& _return, const int32_t seqid);
 };
 
 #ifdef _WIN32

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index 34c37e9..b4a2a92 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -792,26 +792,6 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
     printf("cache_file_metadata\n");
   }
 
-  void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) {
-    // Your implementation goes here
-    printf("get_next_write_id\n");
-  }
-
-  void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) {
-    // Your implementation goes here
-    printf("finalize_write_id\n");
-  }
-
-  void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) {
-    // Your implementation goes here
-    printf("heartbeat_write_id\n");
-  }
-
-  void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) {
-    // Your implementation goes here
-    printf("get_valid_write_ids\n");
-  }
-
 };
 
 int main(int argc, char **argv) {


[15/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index cfa2e49..fd2bdde 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -4486,16 +4486,6 @@ void Table::__set_rewriteEnabled(const bool val) {
 __isset.rewriteEnabled = true;
 }
 
-void Table::__set_mmNextWriteId(const int64_t val) {
-  this->mmNextWriteId = val;
-__isset.mmNextWriteId = true;
-}
-
-void Table::__set_mmWatermarkWriteId(const int64_t val) {
-  this->mmWatermarkWriteId = val;
-__isset.mmWatermarkWriteId = true;
-}
-
 uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -4664,22 +4654,6 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
           xfer += iprot->skip(ftype);
         }
         break;
-      case 16:
-        if (ftype == ::apache::thrift::protocol::T_I64) {
-          xfer += iprot->readI64(this->mmNextWriteId);
-          this->__isset.mmNextWriteId = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 17:
-        if (ftype == ::apache::thrift::protocol::T_I64) {
-          xfer += iprot->readI64(this->mmWatermarkWriteId);
-          this->__isset.mmWatermarkWriteId = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -4777,16 +4751,6 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
     xfer += oprot->writeBool(this->rewriteEnabled);
     xfer += oprot->writeFieldEnd();
   }
-  if (this->__isset.mmNextWriteId) {
-    xfer += oprot->writeFieldBegin("mmNextWriteId", ::apache::thrift::protocol::T_I64, 16);
-    xfer += oprot->writeI64(this->mmNextWriteId);
-    xfer += oprot->writeFieldEnd();
-  }
-  if (this->__isset.mmWatermarkWriteId) {
-    xfer += oprot->writeFieldBegin("mmWatermarkWriteId", ::apache::thrift::protocol::T_I64, 17);
-    xfer += oprot->writeI64(this->mmWatermarkWriteId);
-    xfer += oprot->writeFieldEnd();
-  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -4809,8 +4773,6 @@ void swap(Table &a, Table &b) {
   swap(a.privileges, b.privileges);
   swap(a.temporary, b.temporary);
   swap(a.rewriteEnabled, b.rewriteEnabled);
-  swap(a.mmNextWriteId, b.mmNextWriteId);
-  swap(a.mmWatermarkWriteId, b.mmWatermarkWriteId);
   swap(a.__isset, b.__isset);
 }
 
@@ -4830,8 +4792,6 @@ Table::Table(const Table& other221) {
   privileges = other221.privileges;
   temporary = other221.temporary;
   rewriteEnabled = other221.rewriteEnabled;
-  mmNextWriteId = other221.mmNextWriteId;
-  mmWatermarkWriteId = other221.mmWatermarkWriteId;
   __isset = other221.__isset;
 }
 Table& Table::operator=(const Table& other222) {
@@ -4850,8 +4810,6 @@ Table& Table::operator=(const Table& other222) {
   privileges = other222.privileges;
   temporary = other222.temporary;
   rewriteEnabled = other222.rewriteEnabled;
-  mmNextWriteId = other222.mmNextWriteId;
-  mmWatermarkWriteId = other222.mmWatermarkWriteId;
   __isset = other222.__isset;
   return *this;
 }
@@ -4873,8 +4831,6 @@ void Table::printTo(std::ostream& out) const {
   out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "<null>"));
   out << ", " << "temporary="; (__isset.temporary ? (out << to_string(temporary)) : (out << "<null>"));
   out << ", " << "rewriteEnabled="; (__isset.rewriteEnabled ? (out << to_string(rewriteEnabled)) : (out << "<null>"));
-  out << ", " << "mmNextWriteId="; (__isset.mmNextWriteId ? (out << to_string(mmNextWriteId)) : (out << "<null>"));
-  out << ", " << "mmWatermarkWriteId="; (__isset.mmWatermarkWriteId ? (out << to_string(mmWatermarkWriteId)) : (out << "<null>"));
   out << ")";
 }
 
@@ -18017,19 +17973,16 @@ void CacheFileMetadataRequest::printTo(std::ostream& out) const {
 }
 
 
-GetNextWriteIdRequest::~GetNextWriteIdRequest() throw() {
+GetAllFunctionsResponse::~GetAllFunctionsResponse() throw() {
 }
 
 
-void GetNextWriteIdRequest::__set_dbName(const std::string& val) {
-  this->dbName = val;
-}
-
-void GetNextWriteIdRequest::__set_tblName(const std::string& val) {
-  this->tblName = val;
+void GetAllFunctionsResponse::__set_functions(const std::vector<Function> & val) {
+  this->functions = val;
+__isset.functions = true;
 }
 
-uint32_t GetNextWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -18041,8 +17994,6 @@ uint32_t GetNextWriteIdRequest::read(::apache::thrift::protocol::TProtocol* ipro
 
   using ::apache::thrift::protocol::TProtocolException;
 
-  bool isset_dbName = false;
-  bool isset_tblName = false;
 
   while (true)
   {
@@ -18053,17 +18004,21 @@ uint32_t GetNextWriteIdRequest::read(::apache::thrift::protocol::TProtocol* ipro
     switch (fid)
     {
       case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->dbName);
-          isset_dbName = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->tblName);
-          isset_tblName = true;
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->functions.clear();
+            uint32_t _size746;
+            ::apache::thrift::protocol::TType _etype749;
+            xfer += iprot->readListBegin(_etype749, _size746);
+            this->functions.resize(_size746);
+            uint32_t _i750;
+            for (_i750 = 0; _i750 < _size746; ++_i750)
+            {
+              xfer += this->functions[_i750].read(iprot);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.functions = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -18077,64 +18032,64 @@ uint32_t GetNextWriteIdRequest::read(::apache::thrift::protocol::TProtocol* ipro
 
   xfer += iprot->readStructEnd();
 
-  if (!isset_dbName)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  if (!isset_tblName)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
   return xfer;
 }
 
-uint32_t GetNextWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("GetNextWriteIdRequest");
-
-  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
-  xfer += oprot->writeString(this->dbName);
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
-  xfer += oprot->writeString(this->tblName);
-  xfer += oprot->writeFieldEnd();
+  xfer += oprot->writeStructBegin("GetAllFunctionsResponse");
 
+  if (this->__isset.functions) {
+    xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1);
+    {
+      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->functions.size()));
+      std::vector<Function> ::const_iterator _iter751;
+      for (_iter751 = this->functions.begin(); _iter751 != this->functions.end(); ++_iter751)
+      {
+        xfer += (*_iter751).write(oprot);
+      }
+      xfer += oprot->writeListEnd();
+    }
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
 }
 
-void swap(GetNextWriteIdRequest &a, GetNextWriteIdRequest &b) {
+void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) {
   using ::std::swap;
-  swap(a.dbName, b.dbName);
-  swap(a.tblName, b.tblName);
+  swap(a.functions, b.functions);
+  swap(a.__isset, b.__isset);
 }
 
-GetNextWriteIdRequest::GetNextWriteIdRequest(const GetNextWriteIdRequest& other746) {
-  dbName = other746.dbName;
-  tblName = other746.tblName;
+GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other752) {
+  functions = other752.functions;
+  __isset = other752.__isset;
 }
-GetNextWriteIdRequest& GetNextWriteIdRequest::operator=(const GetNextWriteIdRequest& other747) {
-  dbName = other747.dbName;
-  tblName = other747.tblName;
+GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other753) {
+  functions = other753.functions;
+  __isset = other753.__isset;
   return *this;
 }
-void GetNextWriteIdRequest::printTo(std::ostream& out) const {
+void GetAllFunctionsResponse::printTo(std::ostream& out) const {
   using ::apache::thrift::to_string;
-  out << "GetNextWriteIdRequest(";
-  out << "dbName=" << to_string(dbName);
-  out << ", " << "tblName=" << to_string(tblName);
+  out << "GetAllFunctionsResponse(";
+  out << "functions="; (__isset.functions ? (out << to_string(functions)) : (out << "<null>"));
   out << ")";
 }
 
 
-GetNextWriteIdResult::~GetNextWriteIdResult() throw() {
+ClientCapabilities::~ClientCapabilities() throw() {
 }
 
 
-void GetNextWriteIdResult::__set_writeId(const int64_t val) {
-  this->writeId = val;
+void ClientCapabilities::__set_values(const std::vector<ClientCapability::type> & val) {
+  this->values = val;
 }
 
-uint32_t GetNextWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ClientCapabilities::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -18146,7 +18101,7 @@ uint32_t GetNextWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot
 
   using ::apache::thrift::protocol::TProtocolException;
 
-  bool isset_writeId = false;
+  bool isset_values = false;
 
   while (true)
   {
@@ -18157,9 +18112,23 @@ uint32_t GetNextWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot
     switch (fid)
     {
       case 1:
-        if (ftype == ::apache::thrift::protocol::T_I64) {
-          xfer += iprot->readI64(this->writeId);
-          isset_writeId = true;
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->values.clear();
+            uint32_t _size754;
+            ::apache::thrift::protocol::TType _etype757;
+            xfer += iprot->readListBegin(_etype757, _size754);
+            this->values.resize(_size754);
+            uint32_t _i758;
+            for (_i758 = 0; _i758 < _size754; ++_i758)
+            {
+              int32_t ecast759;
+              xfer += iprot->readI32(ecast759);
+              this->values[_i758] = (ClientCapability::type)ecast759;
+            }
+            xfer += iprot->readListEnd();
+          }
+          isset_values = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -18173,18 +18142,26 @@ uint32_t GetNextWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot
 
   xfer += iprot->readStructEnd();
 
-  if (!isset_writeId)
+  if (!isset_values)
     throw TProtocolException(TProtocolException::INVALID_DATA);
   return xfer;
 }
 
-uint32_t GetNextWriteIdResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ClientCapabilities::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("GetNextWriteIdResult");
+  xfer += oprot->writeStructBegin("ClientCapabilities");
 
-  xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 1);
-  xfer += oprot->writeI64(this->writeId);
+  xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
+  {
+    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast<uint32_t>(this->values.size()));
+    std::vector<ClientCapability::type> ::const_iterator _iter760;
+    for (_iter760 = this->values.begin(); _iter760 != this->values.end(); ++_iter760)
+    {
+      xfer += oprot->writeI32((int32_t)(*_iter760));
+    }
+    xfer += oprot->writeListEnd();
+  }
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -18192,47 +18169,44 @@ uint32_t GetNextWriteIdResult::write(::apache::thrift::protocol::TProtocol* opro
   return xfer;
 }
 
-void swap(GetNextWriteIdResult &a, GetNextWriteIdResult &b) {
+void swap(ClientCapabilities &a, ClientCapabilities &b) {
   using ::std::swap;
-  swap(a.writeId, b.writeId);
+  swap(a.values, b.values);
 }
 
-GetNextWriteIdResult::GetNextWriteIdResult(const GetNextWriteIdResult& other748) {
-  writeId = other748.writeId;
+ClientCapabilities::ClientCapabilities(const ClientCapabilities& other761) {
+  values = other761.values;
 }
-GetNextWriteIdResult& GetNextWriteIdResult::operator=(const GetNextWriteIdResult& other749) {
-  writeId = other749.writeId;
+ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other762) {
+  values = other762.values;
   return *this;
 }
-void GetNextWriteIdResult::printTo(std::ostream& out) const {
+void ClientCapabilities::printTo(std::ostream& out) const {
   using ::apache::thrift::to_string;
-  out << "GetNextWriteIdResult(";
-  out << "writeId=" << to_string(writeId);
+  out << "ClientCapabilities(";
+  out << "values=" << to_string(values);
   out << ")";
 }
 
 
-FinalizeWriteIdRequest::~FinalizeWriteIdRequest() throw() {
+GetTableRequest::~GetTableRequest() throw() {
 }
 
 
-void FinalizeWriteIdRequest::__set_dbName(const std::string& val) {
+void GetTableRequest::__set_dbName(const std::string& val) {
   this->dbName = val;
 }
 
-void FinalizeWriteIdRequest::__set_tblName(const std::string& val) {
+void GetTableRequest::__set_tblName(const std::string& val) {
   this->tblName = val;
 }
 
-void FinalizeWriteIdRequest::__set_writeId(const int64_t val) {
-  this->writeId = val;
-}
-
-void FinalizeWriteIdRequest::__set_commit(const bool val) {
-  this->commit = val;
+void GetTableRequest::__set_capabilities(const ClientCapabilities& val) {
+  this->capabilities = val;
+__isset.capabilities = true;
 }
 
-uint32_t FinalizeWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -18246,8 +18220,6 @@ uint32_t FinalizeWriteIdRequest::read(::apache::thrift::protocol::TProtocol* ipr
 
   bool isset_dbName = false;
   bool isset_tblName = false;
-  bool isset_writeId = false;
-  bool isset_commit = false;
 
   while (true)
   {
@@ -18274,17 +18246,9 @@ uint32_t FinalizeWriteIdRequest::read(::apache::thrift::protocol::TProtocol* ipr
         }
         break;
       case 3:
-        if (ftype == ::apache::thrift::protocol::T_I64) {
-          xfer += iprot->readI64(this->writeId);
-          isset_writeId = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 4:
-        if (ftype == ::apache::thrift::protocol::T_BOOL) {
-          xfer += iprot->readBool(this->commit);
-          isset_commit = true;
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->capabilities.read(iprot);
+          this->__isset.capabilities = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -18302,17 +18266,13 @@ uint32_t FinalizeWriteIdRequest::read(::apache::thrift::protocol::TProtocol* ipr
     throw TProtocolException(TProtocolException::INVALID_DATA);
   if (!isset_tblName)
     throw TProtocolException(TProtocolException::INVALID_DATA);
-  if (!isset_writeId)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  if (!isset_commit)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
   return xfer;
 }
 
-uint32_t FinalizeWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t GetTableRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("FinalizeWriteIdRequest");
+  xfer += oprot->writeStructBegin("GetTableRequest");
 
   xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
   xfer += oprot->writeString(this->dbName);
@@ -18322,56 +18282,56 @@ uint32_t FinalizeWriteIdRequest::write(::apache::thrift::protocol::TProtocol* op
   xfer += oprot->writeString(this->tblName);
   xfer += oprot->writeFieldEnd();
 
-  xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 3);
-  xfer += oprot->writeI64(this->writeId);
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldBegin("commit", ::apache::thrift::protocol::T_BOOL, 4);
-  xfer += oprot->writeBool(this->commit);
-  xfer += oprot->writeFieldEnd();
-
+  if (this->__isset.capabilities) {
+    xfer += oprot->writeFieldBegin("capabilities", ::apache::thrift::protocol::T_STRUCT, 3);
+    xfer += this->capabilities.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
 }
 
-void swap(FinalizeWriteIdRequest &a, FinalizeWriteIdRequest &b) {
+void swap(GetTableRequest &a, GetTableRequest &b) {
   using ::std::swap;
   swap(a.dbName, b.dbName);
   swap(a.tblName, b.tblName);
-  swap(a.writeId, b.writeId);
-  swap(a.commit, b.commit);
+  swap(a.capabilities, b.capabilities);
+  swap(a.__isset, b.__isset);
 }
 
-FinalizeWriteIdRequest::FinalizeWriteIdRequest(const FinalizeWriteIdRequest& other750) {
-  dbName = other750.dbName;
-  tblName = other750.tblName;
-  writeId = other750.writeId;
-  commit = other750.commit;
+GetTableRequest::GetTableRequest(const GetTableRequest& other763) {
+  dbName = other763.dbName;
+  tblName = other763.tblName;
+  capabilities = other763.capabilities;
+  __isset = other763.__isset;
 }
-FinalizeWriteIdRequest& FinalizeWriteIdRequest::operator=(const FinalizeWriteIdRequest& other751) {
-  dbName = other751.dbName;
-  tblName = other751.tblName;
-  writeId = other751.writeId;
-  commit = other751.commit;
+GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other764) {
+  dbName = other764.dbName;
+  tblName = other764.tblName;
+  capabilities = other764.capabilities;
+  __isset = other764.__isset;
   return *this;
 }
-void FinalizeWriteIdRequest::printTo(std::ostream& out) const {
+void GetTableRequest::printTo(std::ostream& out) const {
   using ::apache::thrift::to_string;
-  out << "FinalizeWriteIdRequest(";
+  out << "GetTableRequest(";
   out << "dbName=" << to_string(dbName);
   out << ", " << "tblName=" << to_string(tblName);
-  out << ", " << "writeId=" << to_string(writeId);
-  out << ", " << "commit=" << to_string(commit);
+  out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "<null>"));
   out << ")";
 }
 
 
-FinalizeWriteIdResult::~FinalizeWriteIdResult() throw() {
+GetTableResult::~GetTableResult() throw() {
 }
 
 
-uint32_t FinalizeWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot) {
+void GetTableResult::__set_table(const Table& val) {
+  this->table = val;
+}
+
+uint32_t GetTableResult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -18383,6 +18343,7 @@ uint32_t FinalizeWriteIdResult::read(::apache::thrift::protocol::TProtocol* ipro
 
   using ::apache::thrift::protocol::TProtocolException;
 
+  bool isset_table = false;
 
   while (true)
   {
@@ -18390,62 +18351,83 @@ uint32_t FinalizeWriteIdResult::read(::apache::thrift::protocol::TProtocol* ipro
     if (ftype == ::apache::thrift::protocol::T_STOP) {
       break;
     }
-    xfer += iprot->skip(ftype);
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->table.read(iprot);
+          isset_table = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
     xfer += iprot->readFieldEnd();
   }
 
   xfer += iprot->readStructEnd();
 
+  if (!isset_table)
+    throw TProtocolException(TProtocolException::INVALID_DATA);
   return xfer;
 }
 
-uint32_t FinalizeWriteIdResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t GetTableResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("FinalizeWriteIdResult");
+  xfer += oprot->writeStructBegin("GetTableResult");
+
+  xfer += oprot->writeFieldBegin("table", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->table.write(oprot);
+  xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
 }
 
-void swap(FinalizeWriteIdResult &a, FinalizeWriteIdResult &b) {
+void swap(GetTableResult &a, GetTableResult &b) {
   using ::std::swap;
-  (void) a;
-  (void) b;
+  swap(a.table, b.table);
 }
 
-FinalizeWriteIdResult::FinalizeWriteIdResult(const FinalizeWriteIdResult& other752) {
-  (void) other752;
+GetTableResult::GetTableResult(const GetTableResult& other765) {
+  table = other765.table;
 }
-FinalizeWriteIdResult& FinalizeWriteIdResult::operator=(const FinalizeWriteIdResult& other753) {
-  (void) other753;
+GetTableResult& GetTableResult::operator=(const GetTableResult& other766) {
+  table = other766.table;
   return *this;
 }
-void FinalizeWriteIdResult::printTo(std::ostream& out) const {
+void GetTableResult::printTo(std::ostream& out) const {
   using ::apache::thrift::to_string;
-  out << "FinalizeWriteIdResult(";
+  out << "GetTableResult(";
+  out << "table=" << to_string(table);
   out << ")";
 }
 
 
-HeartbeatWriteIdRequest::~HeartbeatWriteIdRequest() throw() {
+GetTablesRequest::~GetTablesRequest() throw() {
 }
 
 
-void HeartbeatWriteIdRequest::__set_dbName(const std::string& val) {
+void GetTablesRequest::__set_dbName(const std::string& val) {
   this->dbName = val;
 }
 
-void HeartbeatWriteIdRequest::__set_tblName(const std::string& val) {
-  this->tblName = val;
+void GetTablesRequest::__set_tblNames(const std::vector<std::string> & val) {
+  this->tblNames = val;
+__isset.tblNames = true;
 }
 
-void HeartbeatWriteIdRequest::__set_writeId(const int64_t val) {
-  this->writeId = val;
+void GetTablesRequest::__set_capabilities(const ClientCapabilities& val) {
+  this->capabilities = val;
+__isset.capabilities = true;
 }
 
-uint32_t HeartbeatWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -18458,8 +18440,6 @@ uint32_t HeartbeatWriteIdRequest::read(::apache::thrift::protocol::TProtocol* ip
   using ::apache::thrift::protocol::TProtocolException;
 
   bool isset_dbName = false;
-  bool isset_tblName = false;
-  bool isset_writeId = false;
 
   while (true)
   {
@@ -18478,17 +18458,29 @@ uint32_t HeartbeatWriteIdRequest::read(::apache::thrift::protocol::TProtocol* ip
         }
         break;
       case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->tblName);
-          isset_tblName = true;
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->tblNames.clear();
+            uint32_t _size767;
+            ::apache::thrift::protocol::TType _etype770;
+            xfer += iprot->readListBegin(_etype770, _size767);
+            this->tblNames.resize(_size767);
+            uint32_t _i771;
+            for (_i771 = 0; _i771 < _size767; ++_i771)
+            {
+              xfer += iprot->readString(this->tblNames[_i771]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.tblNames = true;
         } else {
           xfer += iprot->skip(ftype);
         }
         break;
       case 3:
-        if (ftype == ::apache::thrift::protocol::T_I64) {
-          xfer += iprot->readI64(this->writeId);
-          isset_writeId = true;
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->capabilities.read(iprot);
+          this->__isset.capabilities = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -18504,1008 +18496,81 @@ uint32_t HeartbeatWriteIdRequest::read(::apache::thrift::protocol::TProtocol* ip
 
   if (!isset_dbName)
     throw TProtocolException(TProtocolException::INVALID_DATA);
-  if (!isset_tblName)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  if (!isset_writeId)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
   return xfer;
 }
 
-uint32_t HeartbeatWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("HeartbeatWriteIdRequest");
+  xfer += oprot->writeStructBegin("GetTablesRequest");
 
   xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
   xfer += oprot->writeString(this->dbName);
   xfer += oprot->writeFieldEnd();
 
-  xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
-  xfer += oprot->writeString(this->tblName);
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 3);
-  xfer += oprot->writeI64(this->writeId);
-  xfer += oprot->writeFieldEnd();
-
+  if (this->__isset.tblNames) {
+    xfer += oprot->writeFieldBegin("tblNames", ::apache::thrift::protocol::T_LIST, 2);
+    {
+      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tblNames.size()));
+      std::vector<std::string> ::const_iterator _iter772;
+      for (_iter772 = this->tblNames.begin(); _iter772 != this->tblNames.end(); ++_iter772)
+      {
+        xfer += oprot->writeString((*_iter772));
+      }
+      xfer += oprot->writeListEnd();
+    }
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.capabilities) {
+    xfer += oprot->writeFieldBegin("capabilities", ::apache::thrift::protocol::T_STRUCT, 3);
+    xfer += this->capabilities.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
 }
 
-void swap(HeartbeatWriteIdRequest &a, HeartbeatWriteIdRequest &b) {
+void swap(GetTablesRequest &a, GetTablesRequest &b) {
   using ::std::swap;
   swap(a.dbName, b.dbName);
-  swap(a.tblName, b.tblName);
-  swap(a.writeId, b.writeId);
+  swap(a.tblNames, b.tblNames);
+  swap(a.capabilities, b.capabilities);
+  swap(a.__isset, b.__isset);
 }
 
-HeartbeatWriteIdRequest::HeartbeatWriteIdRequest(const HeartbeatWriteIdRequest& other754) {
-  dbName = other754.dbName;
-  tblName = other754.tblName;
-  writeId = other754.writeId;
+GetTablesRequest::GetTablesRequest(const GetTablesRequest& other773) {
+  dbName = other773.dbName;
+  tblNames = other773.tblNames;
+  capabilities = other773.capabilities;
+  __isset = other773.__isset;
 }
-HeartbeatWriteIdRequest& HeartbeatWriteIdRequest::operator=(const HeartbeatWriteIdRequest& other755) {
-  dbName = other755.dbName;
-  tblName = other755.tblName;
-  writeId = other755.writeId;
+GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other774) {
+  dbName = other774.dbName;
+  tblNames = other774.tblNames;
+  capabilities = other774.capabilities;
+  __isset = other774.__isset;
   return *this;
 }
-void HeartbeatWriteIdRequest::printTo(std::ostream& out) const {
+void GetTablesRequest::printTo(std::ostream& out) const {
   using ::apache::thrift::to_string;
-  out << "HeartbeatWriteIdRequest(";
+  out << "GetTablesRequest(";
   out << "dbName=" << to_string(dbName);
-  out << ", " << "tblName=" << to_string(tblName);
-  out << ", " << "writeId=" << to_string(writeId);
+  out << ", " << "tblNames="; (__isset.tblNames ? (out << to_string(tblNames)) : (out << "<null>"));
+  out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "<null>"));
   out << ")";
 }
 
 
-HeartbeatWriteIdResult::~HeartbeatWriteIdResult() throw() {
+GetTablesResult::~GetTablesResult() throw() {
 }
 
 
-uint32_t HeartbeatWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    xfer += iprot->skip(ftype);
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  return xfer;
-}
-
-uint32_t HeartbeatWriteIdResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
-  uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("HeartbeatWriteIdResult");
-
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-void swap(HeartbeatWriteIdResult &a, HeartbeatWriteIdResult &b) {
-  using ::std::swap;
-  (void) a;
-  (void) b;
-}
-
-HeartbeatWriteIdResult::HeartbeatWriteIdResult(const HeartbeatWriteIdResult& other756) {
-  (void) other756;
-}
-HeartbeatWriteIdResult& HeartbeatWriteIdResult::operator=(const HeartbeatWriteIdResult& other757) {
-  (void) other757;
-  return *this;
-}
-void HeartbeatWriteIdResult::printTo(std::ostream& out) const {
-  using ::apache::thrift::to_string;
-  out << "HeartbeatWriteIdResult(";
-  out << ")";
-}
-
-
-GetValidWriteIdsRequest::~GetValidWriteIdsRequest() throw() {
-}
-
-
-void GetValidWriteIdsRequest::__set_dbName(const std::string& val) {
-  this->dbName = val;
-}
-
-void GetValidWriteIdsRequest::__set_tblName(const std::string& val) {
-  this->tblName = val;
-}
-
-uint32_t GetValidWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-  bool isset_dbName = false;
-  bool isset_tblName = false;
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->dbName);
-          isset_dbName = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->tblName);
-          isset_tblName = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  if (!isset_dbName)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  if (!isset_tblName)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  return xfer;
-}
-
-uint32_t GetValidWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
-  uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("GetValidWriteIdsRequest");
-
-  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
-  xfer += oprot->writeString(this->dbName);
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
-  xfer += oprot->writeString(this->tblName);
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b) {
-  using ::std::swap;
-  swap(a.dbName, b.dbName);
-  swap(a.tblName, b.tblName);
-}
-
-GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other758) {
-  dbName = other758.dbName;
-  tblName = other758.tblName;
-}
-GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other759) {
-  dbName = other759.dbName;
-  tblName = other759.tblName;
-  return *this;
-}
-void GetValidWriteIdsRequest::printTo(std::ostream& out) const {
-  using ::apache::thrift::to_string;
-  out << "GetValidWriteIdsRequest(";
-  out << "dbName=" << to_string(dbName);
-  out << ", " << "tblName=" << to_string(tblName);
-  out << ")";
-}
-
-
-GetValidWriteIdsResult::~GetValidWriteIdsResult() throw() {
-}
-
-
-void GetValidWriteIdsResult::__set_lowWatermarkId(const int64_t val) {
-  this->lowWatermarkId = val;
-}
-
-void GetValidWriteIdsResult::__set_highWatermarkId(const int64_t val) {
-  this->highWatermarkId = val;
-}
-
-void GetValidWriteIdsResult::__set_areIdsValid(const bool val) {
-  this->areIdsValid = val;
-__isset.areIdsValid = true;
-}
-
-void GetValidWriteIdsResult::__set_ids(const std::vector<int64_t> & val) {
-  this->ids = val;
-__isset.ids = true;
-}
-
-uint32_t GetValidWriteIdsResult::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-  bool isset_lowWatermarkId = false;
-  bool isset_highWatermarkId = false;
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_I64) {
-          xfer += iprot->readI64(this->lowWatermarkId);
-          isset_lowWatermarkId = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_I64) {
-          xfer += iprot->readI64(this->highWatermarkId);
-          isset_highWatermarkId = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 3:
-        if (ftype == ::apache::thrift::protocol::T_BOOL) {
-          xfer += iprot->readBool(this->areIdsValid);
-          this->__isset.areIdsValid = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 4:
-        if (ftype == ::apache::thrift::protocol::T_LIST) {
-          {
-            this->ids.clear();
-            uint32_t _size760;
-            ::apache::thrift::protocol::TType _etype763;
-            xfer += iprot->readListBegin(_etype763, _size760);
-            this->ids.resize(_size760);
-            uint32_t _i764;
-            for (_i764 = 0; _i764 < _size760; ++_i764)
-            {
-              xfer += iprot->readI64(this->ids[_i764]);
-            }
-            xfer += iprot->readListEnd();
-          }
-          this->__isset.ids = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  if (!isset_lowWatermarkId)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  if (!isset_highWatermarkId)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  return xfer;
-}
-
-uint32_t GetValidWriteIdsResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
-  uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("GetValidWriteIdsResult");
-
-  xfer += oprot->writeFieldBegin("lowWatermarkId", ::apache::thrift::protocol::T_I64, 1);
-  xfer += oprot->writeI64(this->lowWatermarkId);
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldBegin("highWatermarkId", ::apache::thrift::protocol::T_I64, 2);
-  xfer += oprot->writeI64(this->highWatermarkId);
-  xfer += oprot->writeFieldEnd();
-
-  if (this->__isset.areIdsValid) {
-    xfer += oprot->writeFieldBegin("areIdsValid", ::apache::thrift::protocol::T_BOOL, 3);
-    xfer += oprot->writeBool(this->areIdsValid);
-    xfer += oprot->writeFieldEnd();
-  }
-  if (this->__isset.ids) {
-    xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_LIST, 4);
-    {
-      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->ids.size()));
-      std::vector<int64_t> ::const_iterator _iter765;
-      for (_iter765 = this->ids.begin(); _iter765 != this->ids.end(); ++_iter765)
-      {
-        xfer += oprot->writeI64((*_iter765));
-      }
-      xfer += oprot->writeListEnd();
-    }
-    xfer += oprot->writeFieldEnd();
-  }
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-void swap(GetValidWriteIdsResult &a, GetValidWriteIdsResult &b) {
-  using ::std::swap;
-  swap(a.lowWatermarkId, b.lowWatermarkId);
-  swap(a.highWatermarkId, b.highWatermarkId);
-  swap(a.areIdsValid, b.areIdsValid);
-  swap(a.ids, b.ids);
-  swap(a.__isset, b.__isset);
-}
-
-GetValidWriteIdsResult::GetValidWriteIdsResult(const GetValidWriteIdsResult& other766) {
-  lowWatermarkId = other766.lowWatermarkId;
-  highWatermarkId = other766.highWatermarkId;
-  areIdsValid = other766.areIdsValid;
-  ids = other766.ids;
-  __isset = other766.__isset;
-}
-GetValidWriteIdsResult& GetValidWriteIdsResult::operator=(const GetValidWriteIdsResult& other767) {
-  lowWatermarkId = other767.lowWatermarkId;
-  highWatermarkId = other767.highWatermarkId;
-  areIdsValid = other767.areIdsValid;
-  ids = other767.ids;
-  __isset = other767.__isset;
-  return *this;
-}
-void GetValidWriteIdsResult::printTo(std::ostream& out) const {
-  using ::apache::thrift::to_string;
-  out << "GetValidWriteIdsResult(";
-  out << "lowWatermarkId=" << to_string(lowWatermarkId);
-  out << ", " << "highWatermarkId=" << to_string(highWatermarkId);
-  out << ", " << "areIdsValid="; (__isset.areIdsValid ? (out << to_string(areIdsValid)) : (out << "<null>"));
-  out << ", " << "ids="; (__isset.ids ? (out << to_string(ids)) : (out << "<null>"));
-  out << ")";
-}
-
-
-GetAllFunctionsResponse::~GetAllFunctionsResponse() throw() {
-}
-
-
-void GetAllFunctionsResponse::__set_functions(const std::vector<Function> & val) {
-  this->functions = val;
-__isset.functions = true;
-}
-
-uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_LIST) {
-          {
-            this->functions.clear();
-            uint32_t _size768;
-            ::apache::thrift::protocol::TType _etype771;
-            xfer += iprot->readListBegin(_etype771, _size768);
-            this->functions.resize(_size768);
-            uint32_t _i772;
-            for (_i772 = 0; _i772 < _size768; ++_i772)
-            {
-              xfer += this->functions[_i772].read(iprot);
-            }
-            xfer += iprot->readListEnd();
-          }
-          this->__isset.functions = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  return xfer;
-}
-
-uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
-  uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("GetAllFunctionsResponse");
-
-  if (this->__isset.functions) {
-    xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1);
-    {
-      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->functions.size()));
-      std::vector<Function> ::const_iterator _iter773;
-      for (_iter773 = this->functions.begin(); _iter773 != this->functions.end(); ++_iter773)
-      {
-        xfer += (*_iter773).write(oprot);
-      }
-      xfer += oprot->writeListEnd();
-    }
-    xfer += oprot->writeFieldEnd();
-  }
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) {
-  using ::std::swap;
-  swap(a.functions, b.functions);
-  swap(a.__isset, b.__isset);
-}
-
-GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other774) {
-  functions = other774.functions;
-  __isset = other774.__isset;
-}
-GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other775) {
-  functions = other775.functions;
-  __isset = other775.__isset;
-  return *this;
-}
-void GetAllFunctionsResponse::printTo(std::ostream& out) const {
-  using ::apache::thrift::to_string;
-  out << "GetAllFunctionsResponse(";
-  out << "functions="; (__isset.functions ? (out << to_string(functions)) : (out << "<null>"));
-  out << ")";
-}
-
-
-ClientCapabilities::~ClientCapabilities() throw() {
-}
-
-
-void ClientCapabilities::__set_values(const std::vector<ClientCapability::type> & val) {
-  this->values = val;
-}
-
-uint32_t ClientCapabilities::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-  bool isset_values = false;
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_LIST) {
-          {
-            this->values.clear();
-            uint32_t _size776;
-            ::apache::thrift::protocol::TType _etype779;
-            xfer += iprot->readListBegin(_etype779, _size776);
-            this->values.resize(_size776);
-            uint32_t _i780;
-            for (_i780 = 0; _i780 < _size776; ++_i780)
-            {
-              int32_t ecast781;
-              xfer += iprot->readI32(ecast781);
-              this->values[_i780] = (ClientCapability::type)ecast781;
-            }
-            xfer += iprot->readListEnd();
-          }
-          isset_values = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  if (!isset_values)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  return xfer;
-}
-
-uint32_t ClientCapabilities::write(::apache::thrift::protocol::TProtocol* oprot) const {
-  uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ClientCapabilities");
-
-  xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
-  {
-    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast<uint32_t>(this->values.size()));
-    std::vector<ClientCapability::type> ::const_iterator _iter782;
-    for (_iter782 = this->values.begin(); _iter782 != this->values.end(); ++_iter782)
-    {
-      xfer += oprot->writeI32((int32_t)(*_iter782));
-    }
-    xfer += oprot->writeListEnd();
-  }
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-void swap(ClientCapabilities &a, ClientCapabilities &b) {
-  using ::std::swap;
-  swap(a.values, b.values);
-}
-
-ClientCapabilities::ClientCapabilities(const ClientCapabilities& other783) {
-  values = other783.values;
-}
-ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other784) {
-  values = other784.values;
-  return *this;
-}
-void ClientCapabilities::printTo(std::ostream& out) const {
-  using ::apache::thrift::to_string;
-  out << "ClientCapabilities(";
-  out << "values=" << to_string(values);
-  out << ")";
-}
-
-
-GetTableRequest::~GetTableRequest() throw() {
-}
-
-
-void GetTableRequest::__set_dbName(const std::string& val) {
-  this->dbName = val;
-}
-
-void GetTableRequest::__set_tblName(const std::string& val) {
-  this->tblName = val;
-}
-
-void GetTableRequest::__set_capabilities(const ClientCapabilities& val) {
-  this->capabilities = val;
-__isset.capabilities = true;
-}
-
-uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-  bool isset_dbName = false;
-  bool isset_tblName = false;
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->dbName);
-          isset_dbName = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->tblName);
-          isset_tblName = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 3:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->capabilities.read(iprot);
-          this->__isset.capabilities = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  if (!isset_dbName)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  if (!isset_tblName)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  return xfer;
-}
-
-uint32_t GetTableRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
-  uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("GetTableRequest");
-
-  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
-  xfer += oprot->writeString(this->dbName);
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
-  xfer += oprot->writeString(this->tblName);
-  xfer += oprot->writeFieldEnd();
-
-  if (this->__isset.capabilities) {
-    xfer += oprot->writeFieldBegin("capabilities", ::apache::thrift::protocol::T_STRUCT, 3);
-    xfer += this->capabilities.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  }
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-void swap(GetTableRequest &a, GetTableRequest &b) {
-  using ::std::swap;
-  swap(a.dbName, b.dbName);
-  swap(a.tblName, b.tblName);
-  swap(a.capabilities, b.capabilities);
-  swap(a.__isset, b.__isset);
-}
-
-GetTableRequest::GetTableRequest(const GetTableRequest& other785) {
-  dbName = other785.dbName;
-  tblName = other785.tblName;
-  capabilities = other785.capabilities;
-  __isset = other785.__isset;
-}
-GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other786) {
-  dbName = other786.dbName;
-  tblName = other786.tblName;
-  capabilities = other786.capabilities;
-  __isset = other786.__isset;
-  return *this;
-}
-void GetTableRequest::printTo(std::ostream& out) const {
-  using ::apache::thrift::to_string;
-  out << "GetTableRequest(";
-  out << "dbName=" << to_string(dbName);
-  out << ", " << "tblName=" << to_string(tblName);
-  out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "<null>"));
-  out << ")";
-}
-
-
-GetTableResult::~GetTableResult() throw() {
-}
-
-
-void GetTableResult::__set_table(const Table& val) {
-  this->table = val;
-}
-
-uint32_t GetTableResult::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-  bool isset_table = false;
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->table.read(iprot);
-          isset_table = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  if (!isset_table)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  return xfer;
-}
-
-uint32_t GetTableResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
-  uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("GetTableResult");
-
-  xfer += oprot->writeFieldBegin("table", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += this->table.write(oprot);
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-void swap(GetTableResult &a, GetTableResult &b) {
-  using ::std::swap;
-  swap(a.table, b.table);
-}
-
-GetTableResult::GetTableResult(const GetTableResult& other787) {
-  table = other787.table;
-}
-GetTableResult& GetTableResult::operator=(const GetTableResult& other788) {
-  table = other788.table;
-  return *this;
-}
-void GetTableResult::printTo(std::ostream& out) const {
-  using ::apache::thrift::to_string;
-  out << "GetTableResult(";
-  out << "table=" << to_string(table);
-  out << ")";
-}
-
-
-GetTablesRequest::~GetTablesRequest() throw() {
-}
-
-
-void GetTablesRequest::__set_dbName(const std::string& val) {
-  this->dbName = val;
-}
-
-void GetTablesRequest::__set_tblNames(const std::vector<std::string> & val) {
-  this->tblNames = val;
-__isset.tblNames = true;
-}
-
-void GetTablesRequest::__set_capabilities(const ClientCapabilities& val) {
-  this->capabilities = val;
-__isset.capabilities = true;
-}
-
-uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-  bool isset_dbName = false;
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->dbName);
-          isset_dbName = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_LIST) {
-          {
-            this->tblNames.clear();
-            uint32_t _size789;
-            ::apache::thrift::protocol::TType _etype792;
-            xfer += iprot->readListBegin(_etype792, _size789);
-            this->tblNames.resize(_size789);
-            uint32_t _i793;
-            for (_i793 = 0; _i793 < _size789; ++_i793)
-            {
-              xfer += iprot->readString(this->tblNames[_i793]);
-            }
-            xfer += iprot->readListEnd();
-          }
-          this->__isset.tblNames = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 3:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->capabilities.read(iprot);
-          this->__isset.capabilities = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  if (!isset_dbName)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
-  return xfer;
-}
-
-uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
-  uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("GetTablesRequest");
-
-  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
-  xfer += oprot->writeString(this->dbName);
-  xfer += oprot->writeFieldEnd();
-
-  if (this->__isset.tblNames) {
-    xfer += oprot->writeFieldBegin("tblNames", ::apache::thrift::protocol::T_LIST, 2);
-    {
-      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tblNames.size()));
-      std::vector<std::string> ::const_iterator _iter794;
-      for (_iter794 = this->tblNames.begin(); _iter794 != this->tblNames.end(); ++_iter794)
-      {
-        xfer += oprot->writeString((*_iter794));
-      }
-      xfer += oprot->writeListEnd();
-    }
-    xfer += oprot->writeFieldEnd();
-  }
-  if (this->__isset.capabilities) {
-    xfer += oprot->writeFieldBegin("capabilities", ::apache::thrift::protocol::T_STRUCT, 3);
-    xfer += this->capabilities.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  }
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-void swap(GetTablesRequest &a, GetTablesRequest &b) {
-  using ::std::swap;
-  swap(a.dbName, b.dbName);
-  swap(a.tblNames, b.tblNames);
-  swap(a.capabilities, b.capabilities);
-  swap(a.__isset, b.__isset);
-}
-
-GetTablesRequest::GetTablesRequest(const GetTablesRequest& other795) {
-  dbName = other795.dbName;
-  tblNames = other795.tblNames;
-  capabilities = other795.capabilities;
-  __isset = other795.__isset;
-}
-GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other796) {
-  dbName = other796.dbName;
-  tblNames = other796.tblNames;
-  capabilities = other796.capabilities;
-  __isset = other796.__isset;
-  return *this;
-}
-void GetTablesRequest::printTo(std::ostream& out) const {
-  using ::apache::thrift::to_string;
-  out << "GetTablesRequest(";
-  out << "dbName=" << to_string(dbName);
-  out << ", " << "tblNames="; (__isset.tblNames ? (out << to_string(tblNames)) : (out << "<null>"));
-  out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "<null>"));
-  out << ")";
-}
-
-
-GetTablesResult::~GetTablesResult() throw() {
-}
-
-
-void GetTablesResult::__set_tables(const std::vector<Table> & val) {
-  this->tables = val;
-}
-
-uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) {
+void GetTablesResult::__set_tables(const std::vector<Table> & val) {
+  this->tables = val;
+}
+
+uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -19531,14 +18596,14 @@ uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->tables.clear();
-            uint32_t _size797;
-            ::apache::thrift::protocol::TType _etype800;
-            xfer += iprot->readListBegin(_etype800, _size797);
-            this->tables.resize(_size797);
-            uint32_t _i801;
-            for (_i801 = 0; _i801 < _size797; ++_i801)
+            uint32_t _size775;
+            ::apache::thrift::protocol::TType _etype778;
+            xfer += iprot->readListBegin(_etype778, _size775);
+            this->tables.resize(_size775);
+            uint32_t _i779;
+            for (_i779 = 0; _i779 < _size775; ++_i779)
             {
-              xfer += this->tables[_i801].read(iprot);
+              xfer += this->tables[_i779].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -19569,10 +18634,10 @@ uint32_t GetTablesResult::write(::apache::thrift::protocol::TProtocol* oprot) co
   xfer += oprot->writeFieldBegin("tables", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->tables.size()));
-    std::vector<Table> ::const_iterator _iter802;
-    for (_iter802 = this->tables.begin(); _iter802 != this->tables.end(); ++_iter802)
+    std::vector<Table> ::const_iterator _iter780;
+    for (_iter780 = this->tables.begin(); _iter780 != this->tables.end(); ++_iter780)
     {
-      xfer += (*_iter802).write(oprot);
+      xfer += (*_iter780).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -19588,11 +18653,11 @@ void swap(GetTablesResult &a, GetTablesResult &b) {
   swap(a.tables, b.tables);
 }
 
-GetTablesResult::GetTablesResult(const GetTablesResult& other803) {
-  tables = other803.tables;
+GetTablesResult::GetTablesResult(const GetTablesResult& other781) {
+  tables = other781.tables;
 }
-GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other804) {
-  tables = other804.tables;
+GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other782) {
+  tables = other782.tables;
   return *this;
 }
 void GetTablesResult::printTo(std::ostream& out) const {
@@ -19734,19 +18799,19 @@ void swap(TableMeta &a, TableMeta &b) {
   swap(a.__isset, b.__isset);
 }
 
-TableMeta::TableMeta(const TableMeta& other805) {
-  dbName = other805.dbName;
-  tableName = other805.tableName;
-  tableType = other805.tableType;
-  comments = other805.comments;
-  __isset = other805.__isset;
+TableMeta::TableMeta(const TableMeta& other783) {
+  dbName = other783.dbName;
+  tableName = other783.tableName;
+  tableType = other783.tableType;
+  comments = other783.comments;
+  __isset = other783.__isset;
 }
-TableMeta& TableMeta::operator=(const TableMeta& other806) {
-  dbName = other806.dbName;
-  tableName = other806.tableName;
-  tableType = other806.tableType;
-  comments = other806.comments;
-  __isset = other806.__isset;
+TableMeta& TableMeta::operator=(const TableMeta& other784) {
+  dbName = other784.dbName;
+  tableName = other784.tableName;
+  tableType = other784.tableType;
+  comments = other784.comments;
+  __isset = other784.__isset;
   return *this;
 }
 void TableMeta::printTo(std::ostream& out) const {
@@ -19829,13 +18894,13 @@ void swap(MetaException &a, MetaException &b) {
   swap(a.__isset, b.__isset);
 }
 
-MetaException::MetaException(const MetaException& other807) : TException() {
-  message = other807.message;
-  __isset = other807.__isset;
+MetaException::MetaException(const MetaException& other785) : TException() {
+  message = other785.message;
+  __isset = other785.__isset;
 }
-MetaException& MetaException::operator=(const MetaException& other808) {
-  message = other808.message;
-  __isset = other808.__isset;
+MetaException& MetaException::operator=(const MetaException& other786) {
+  message = other786.message;
+  __isset = other786.__isset;
   return *this;
 }
 void MetaException::printTo(std::ostream& out) const {
@@ -19926,13 +18991,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) {
   swap(a.__isset, b.__isset);
 }
 
-UnknownTableException::UnknownTableException(const UnknownTableException& other809) : TException() {
-  message = other809.message;
-  __isset = other809.__isset;
+UnknownTableException::UnknownTableException(const UnknownTableException& other787) : TException() {
+  message = other787.message;
+  __isset = other787.__isset;
 }
-UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other810) {
-  message = other810.message;
-  __isset = other810.__isset;
+UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other788) {
+  message = other788.message;
+  __isset = other788.__isset;
   return *this;
 }
 void UnknownTableException::printTo(std::ostream& out) const {
@@ -20023,13 +19088,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) {
   swap(a.__isset, b.__isset);
 }
 
-UnknownDBException::UnknownDBException(const UnknownDBException& other811) : TException() {
-  message = other811.message;
-  __isset = other811.__isset;
+UnknownDBException::UnknownDBException(const UnknownDBException& other789) : TException() {
+  message = other789.message;
+  __isset = other789.__isset;
 }
-UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other812) {
-  message = other812.message;
-  __isset = other812.__isset;
+UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other790) {
+  message = other790.message;
+  __isset = other790.__isset;
   return *this;
 }
 void UnknownDBException::printTo(std::ostream& out) const {
@@ -20120,13 +19185,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) {
   swap(a.__isset, b.__isset);
 }
 
-AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other813) : TException() {
-  message = other813.message;
-  __isset = other813.__isset;
+AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other791) : TException() {
+  message = other791.message;
+  __isset = other791.__isset;
 }
-AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other814) {
-  message = other814.message;
-  __isset = other814.__isset;
+AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other792) {
+  message = other792.message;
+  __isset = other792.__isset;
   return *this;
 }
 void AlreadyExistsException::printTo(std::ostream& out) const {
@@ -20217,13 +19282,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) {
   swap(a.__isset, b.__isset);
 }
 
-InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other815) : TException() {
-  message = other815.message;
-  __isset = other815.__isset;
+InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other793) : TException() {
+  message = other793.message;
+  __isset = other793.__isset;
 }
-InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other816) {
-  message = other816.message;
-  __isset = other816.__isset;
+InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other794) {
+  message = other794.message;
+  __isset = other794.__isset;
   return *this;
 }
 void InvalidPartitionException::printTo(std::ostream& out) const {
@@ -20314,13 +19379,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) {
   swap(a.__isset, b.__isset);
 }
 
-UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other817) : TException() {
-  message = other817.message;
-  __isset = other817.__isset;
+UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other795) : TException() {
+  message = other795.message;
+  __isset = other795.__isset;
 }
-UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other818) {
-  message = other818.message;
-  __isset = other818.__isset;
+UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other796) {
+  message = other796.message;
+  __isset = other796.__isset;
   return *this;
 }
 void UnknownPartitionException::printTo(std::ostream& out) const {
@@ -20411,13 +19476,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) {
   swap(a.__isset, b.__isset);
 }
 
-InvalidObjectException::InvalidObjectException(const InvalidObjectException& other819) : TException() {
-  message = other819.message;
-  __isset = other819.__isset;
+InvalidObjectException::InvalidObjectException(const InvalidObjectException& other797) : TException() {
+  message = other797.message;
+  __isset = other797.__isset;
 }
-InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other820) {
-  message = other820.message;
-  __isset = other820.__isset;
+InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other798) {
+  message = other798.message;
+  __isset = other798.__isset;
   return *this;
 }
 void InvalidObjectException::printTo(std::ostream& out) const {
@@ -20508,13 +19573,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) {
   swap(a.__isset, b.__isset);
 }
 
-NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other821) : TException() {
-  message = other821.message;
-  __isset = other821.__isset;
+NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other799) : TException() {
+  message = other799.message;
+  __isset = other799.__isset;
 }
-NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other822) {
-  message = other822.message;
-  __isset = other822.__isset;
+NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other800) {
+  message = other800.message;
+  __isset = other800.__isset;
   return *this;
 }
 void NoSuchObjectException::printTo(std::ostream& out) const {
@@ -20605,13 +19670,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) {
   swap(a.__isset, b.__isset);
 }
 
-IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other823) : TException() {
-  message = other823.message;
-  __isset = other823.__isset;
+IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other801) : TException() {
+  message = other801.message;
+  __isset = other801.__isset;
 }
-IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other824) {
-  message = other824.message;
-  __isset = other824.__isset;
+IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other802) {
+  message = other802.message;
+  __isset = other802.__isset;
   return *this;
 }
 void IndexAlreadyExistsException::printTo(std::ostream& out) const {
@@ -20702,13 +19767,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) {
   swap(a.__isset, b.__isset);
 }
 
-InvalidOperationException::InvalidOperationException(const InvalidOperationException& other825) : TException() {
-  message = other825.message;
-  __isset = other825.__isset;
+InvalidOperationException::InvalidOperationException(const InvalidOperationException& other803) : TException() {
+  message = other803.message;
+  __isset = other803.__isset;
 }
-InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other826) {
-  message = other826.message;
-  __isset = other826.__isset;
+InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other804) {
+  message = other804.message;
+  __isset = other804.__isset;
   return *this;
 }
 void InvalidOperationException::printTo(std::ostream& out) const {
@@ -20799,13 +19864,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) {
   swap(a.__isset, b.__isset);
 }
 
-ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other827) : TException() {
-  message = other827.message;
-  __isset = other827.__isset;
+ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other805) : TException() {
+  message = other805.message;
+  __isset = other805.__isset;
 }
-ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other828) {
-  message = other828.message;
-  __isset = other828.__isset;
+ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other806) {
+  message = other806.message;
+  __isset = other806.__isset;
   return *this;
 }
 void ConfigValSecurityException::printTo(std::ostream& out) const {
@@ -20896,13 +19961,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) {
   swap(a.__isset, b.__isset);
 }
 
-InvalidInputException::InvalidInputException(const InvalidInputException& other829) : TException() {
-  message = other829.message;
-  __isset = other829.__isset;
+InvalidInputException::InvalidInputException(const InvalidInputException& other807) : TException() {
+  message = other807.message;
+  __isset = other807.__isset;
 }
-InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other830) {
-  message = other830.message;
-  __isset = other830.__isset;
+InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other808) {
+  message = other808.message;
+  __isset = other808.__isset;
   return *this;
 }
 void InvalidInputException::printTo(std::ostream& out) const {
@@ -20993,13 +20058,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) {
   swap(a.__isset, b.__isset);
 }
 
-NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other831) : TException() {
-  message = other831.message;
-  __isset = other831.__isset;
+NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other809) : TException() {
+  message = other809.message;
+  __isset = other809.__isset;
 }
-NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other832) {
-  message = other832.message;
-  __isset = other832.__isset;
+NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other810) {
+  message = other810.message;
+  __isset = other810.__isset;
   return *this;
 }
 void NoSuchTxnException::printTo(std::ostream& out) const {
@@ -21090,13 +20155,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) {
   swap(a.__isset, b.__isset);
 }
 
-TxnAbortedException::TxnAbortedException(const TxnAbortedException& other833) : TException() {
-  message = other833.message;
-  __isset = other833.__isset;
+TxnAbortedException::TxnAbortedException(const TxnAbortedException& other811) : TException() {
+  message = other811.message;
+  __isset = other811.__isset;
 }
-TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other834) {
-  message = other834.message;
-  __isset = other834.__isset;
+TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other812) {
+  message = other812.message;
+  __isset = other812.__isset;
   return *this;
 }
 void TxnAbortedException::printTo(std::ostream& out) const {
@@ -21187,13 +20252,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) {
   swap(a.__isset, b.__isset);
 }
 
-TxnOpenException::TxnOpenException(const TxnOpenException& other835) : TException() {
-  message = other835.message;
-  __isset = other835.__isset;
+TxnOpenException::TxnOpenException(const TxnOpenException& other813) : TException() {
+  message = other813.message;
+  __isset = other813.__isset;
 }
-TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other836) {
-  message = other836.message;
-  __isset = other836.__isset;
+TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other814) {
+  message = other814.message;
+  __isset = other814.__isset;
   return *this;
 }
 void TxnOpenException::printTo(std::ostream& out) const {
@@ -21284,13 +20349,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) {
   swap(a.__isset, b.__isset);
 }
 
-NoSuchLockException::NoSuchLockException(const NoSuchLockException& other837) : TException() {
-  message = other837.message;
-  __isset = other837.__isset;
+NoSuchLockException::NoSuchLockException(const NoSuchLockException& other815) : TException() {
+  message = other815.message;
+  __isset = other815.__isset;
 }
-NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other838) {
-  message = other838.message;
-  __isset = other838.__isset;
+NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other816) {
+  message = other816.message;
+  __isset = other816.__isset;
   return *this;
 }
 void NoSuchLockException::printTo(std::ostream& out) const {

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index d6a90cc..5f83781 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -395,22 +395,6 @@ class CacheFileMetadataResult;
 
 class CacheFileMetadataRequest;
 
-class GetNextWriteIdRequest;
-
-class GetNextWriteIdResult;
-
-class FinalizeWriteIdRequest;
-
-class FinalizeWriteIdResult;
-
-class HeartbeatWriteIdRequest;
-
-class HeartbeatWriteIdResult;
-
-class GetValidWriteIdsRequest;
-
-class GetValidWriteIdsResult;
-
 class GetAllFunctionsResponse;
 
 class ClientCapabilities;
@@ -2079,7 +2063,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj)
 }
 
 typedef struct _Table__isset {
-  _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), mmNextWriteId(false), mmWatermarkWriteId(false) {}
+  _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false) {}
   bool tableName :1;
   bool dbName :1;
   bool owner :1;
@@ -2095,8 +2079,6 @@ typedef struct _Table__isset {
   bool privileges :1;
   bool temporary :1;
   bool rewriteEnabled :1;
-  bool mmNextWriteId :1;
-  bool mmWatermarkWriteId :1;
 } _Table__isset;
 
 class Table {
@@ -2104,7 +2086,7 @@ class Table {
 
   Table(const Table&);
   Table& operator=(const Table&);
-  Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), mmNextWriteId(0), mmWatermarkWriteId(0) {
+  Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0) {
   }
 
   virtual ~Table() throw();
@@ -2123,8 +2105,6 @@ class Table {
   PrincipalPrivilegeSet privileges;
   bool temporary;
   bool rewriteEnabled;
-  int64_t mmNextWriteId;
-  int64_t mmWatermarkWriteId;
 
   _Table__isset __isset;
 
@@ -2158,10 +2138,6 @@ class Table {
 
   void __set_rewriteEnabled(const bool val);
 
-  void __set_mmNextWriteId(const int64_t val);
-
-  void __set_mmWatermarkWriteId(const int64_t val);
-
   bool operator == (const Table & rhs) const
   {
     if (!(tableName == rhs.tableName))
@@ -2200,14 +2176,6 @@ class Table {
       return false;
     else if (__isset.rewriteEnabled && !(rewriteEnabled == rhs.rewriteEnabled))
       return false;
-    if (__isset.mmNextWriteId != rhs.__isset.mmNextWriteId)
-      return false;
-    else if (__isset.mmNextWriteId && !(mmNextWriteId == rhs.mmNextWriteId))
-      return false;
-    if (__isset.mmWatermarkWriteId != rhs.__isset.mmWatermarkWriteId)
-      return false;
-    else if (__isset.mmWatermarkWriteId && !(mmWatermarkWriteId == rhs.mmWatermarkWriteId))
-      return false;
     return true;
   }
   bool operator != (const Table &rhs) const {
@@ -7334,377 +7302,6 @@ inline std::ostream& operator<<(std::ostream& out, const CacheFileMetadataReques
   return out;
 }
 
-
-class GetNextWriteIdRequest {
- public:
-
-  GetNextWriteIdRequest(const GetNextWriteIdRequest&);
-  GetNextWriteIdRequest& operator=(const GetNextWriteIdRequest&);
-  GetNextWriteIdRequest() : dbName(), tblName() {
-  }
-
-  virtual ~GetNextWriteIdRequest() throw();
-  std::string dbName;
-  std::string tblName;
-
-  void __set_dbName(const std::string& val);
-
-  void __set_tblName(const std::string& val);
-
-  bool operator == (const GetNextWriteIdRequest & rhs) const
-  {
-    if (!(dbName == rhs.dbName))
-      return false;
-    if (!(tblName == rhs.tblName))
-      return false;
-    return true;
-  }
-  bool operator != (const GetNextWriteIdRequest &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const GetNextWriteIdRequest & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-  virtual void printTo(std::ostream& out) const;
-};
-
-void swap(GetNextWriteIdRequest &a, GetNextWriteIdRequest &b);
-
-inline std::ostream& operator<<(std::ostream& out, const GetNextWriteIdRequest& obj)
-{
-  obj.printTo(out);
-  return out;
-}
-
-
-class GetNextWriteIdResult {
- public:
-
-  GetNextWriteIdResult(const GetNextWriteIdResult&);
-  GetNextWriteIdResult& operator=(const GetNextWriteIdResult&);
-  GetNextWriteIdResult() : writeId(0) {
-  }
-
-  virtual ~GetNextWriteIdResult() throw();
-  int64_t writeId;
-
-  void __set_writeId(const int64_t val);
-
-  bool operator == (const GetNextWriteIdResult & rhs) const
-  {
-    if (!(writeId == rhs.writeId))
-      return false;
-    return true;
-  }
-  bool operator != (const GetNextWriteIdResult &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const GetNextWriteIdResult & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-  virtual void printTo(std::ostream& out) const;
-};
-
-void swap(GetNextWriteIdResult &a, GetNextWriteIdResult &b);
-
-inline std::ostream& operator<<(std::ostream& out, const GetNextWriteIdResult& obj)
-{
-  obj.printTo(out);
-  return out;
-}
-
-
-class FinalizeWriteIdRequest {
- public:
-
-  FinalizeWriteIdRequest(const FinalizeWriteIdRequest&);
-  FinalizeWriteIdRequest& operator=(const FinalizeWriteIdRequest&);
-  FinalizeWriteIdRequest() : dbName(), tblName(), writeId(0), commit(0) {
-  }
-
-  virtual ~FinalizeWriteIdRequest() throw();
-  std::string dbName;
-  std::string tblName;
-  int64_t writeId;
-  bool commit;
-
-  void __set_dbName(const std::string& val);
-
-  void __set_tblName(const std::string& val);
-
-  void __set_writeId(const int64_t val);
-
-  void __set_commit(const bool val);
-
-  bool operator == (const FinalizeWriteIdRequest & rhs) const
-  {
-    if (!(dbName == rhs.dbName))
-      return false;
-    if (!(tblName == rhs.tblName))
-      return false;
-    if (!(writeId == rhs.writeId))
-      return false;
-    if (!(commit == rhs.commit))
-      return false;
-    return true;
-  }
-  bool operator != (const FinalizeWriteIdRequest &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const FinalizeWriteIdRequest & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-  virtual void printTo(std::ostream& out) const;
-};
-
-void swap(FinalizeWriteIdRequest &a, FinalizeWriteIdRequest &b);
-
-inline std::ostream& operator<<(std::ostream& out, const FinalizeWriteIdRequest& obj)
-{
-  obj.printTo(out);
-  return out;
-}
-
-
-class FinalizeWriteIdResult {
- public:
-
-  FinalizeWriteIdResult(const FinalizeWriteIdResult&);
-  FinalizeWriteIdResult& operator=(const FinalizeWriteIdResult&);
-  FinalizeWriteIdResult() {
-  }
-
-  virtual ~FinalizeWriteIdResult() throw();
-
-  bool operator == (const FinalizeWriteIdResult & /* rhs */) const
-  {
-    return true;
-  }
-  bool operator != (const FinalizeWriteIdResult &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const FinalizeWriteIdResult & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-  virtual void printTo(std::ostream& out) const;
-};
-
-void swap(FinalizeWriteIdResult &a, FinalizeWriteIdResult &b);
-
-inline std::ostream& operator<<(std::ostream& out, const FinalizeWriteIdResult& obj)
-{
-  obj.printTo(out);
-  return out;
-}
-
-
-class HeartbeatWriteIdRequest {
- public:
-
-  HeartbeatWriteIdRequest(const HeartbeatWriteIdRequest&);
-  HeartbeatWriteIdRequest& operator=(const HeartbeatWriteIdRequest&);
-  HeartbeatWriteIdRequest() : dbName(), tblName(), writeId(0) {
-  }
-
-  virtual ~HeartbeatWriteIdRequest() throw();
-  std::string dbName;
-  std::string tblName;
-  int64_t writeId;
-
-  void __set_dbName(const std::string& val);
-
-  void __set_tblName(const std::string& val);
-
-  void __set_writeId(const int64_t val);
-
-  bool operator == (const HeartbeatWriteIdRequest & rhs) const
-  {
-    if (!(dbName == rhs.dbName))
-      return false;
-    if (!(tblName == rhs.tblName))
-      return false;
-    if (!(writeId == rhs.writeId))
-      return false;
-    return true;
-  }
-  bool operator != (const HeartbeatWriteIdRequest &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const HeartbeatWriteIdRequest & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-  virtual void printTo(std::ostream& out) const;
-};
-
-void swap(HeartbeatWriteIdRequest &a, HeartbeatWriteIdRequest &b);
-
-inline std::ostream& operator<<(std::ostream& out, const HeartbeatWriteIdRequest& obj)
-{
-  obj.printTo(out);
-  return out;
-}
-
-
-class HeartbeatWriteIdResult {
- public:
-
-  HeartbeatWriteIdResult(const HeartbeatWriteIdResult&);
-  HeartbeatWriteIdResult& operator=(const HeartbeatWriteIdResult&);
-  HeartbeatWriteIdResult() {
-  }
-
-  virtual ~HeartbeatWriteIdResult() throw();
-
-  bool operator == (const HeartbeatWriteIdResult & /* rhs */) const
-  {
-    return true;
-  }
-  bool operator != (const HeartbeatWriteIdResult &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const HeartbeatWriteIdResult & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-  virtual void printTo(std::ostream& out) const;
-};
-
-void swap(HeartbeatWriteIdResult &a, HeartbeatWriteIdResult &b);
-
-inline std::ostream& operator<<(std::ostream& out, const HeartbeatWriteIdResult& obj)
-{
-  obj.printTo(out);
-  return out;
-}
-
-
-class GetValidWriteIdsRequest {
- public:
-
-  GetValidWriteIdsRequest(const GetValidWriteIdsRequest&);
-  GetValidWriteIdsRequest& operator=(const GetValidWriteIdsRequest&);
-  GetValidWriteIdsRequest() : dbName(), tblName() {
-  }
-
-  virtual ~GetValidWriteIdsRequest() throw();
-  std::string dbName;
-  std::string tblName;
-
-  void __set_dbName(const std::string& val);
-
-  void __set_tblName(const std::string& val);
-
-  bool operator == (const GetValidWriteIdsRequest & rhs) const
-  {
-    if (!(dbName == rhs.dbName))
-      return false;
-    if (!(tblName == rhs.tblName))
-      return false;
-    return true;
-  }
-  bool operator != (const GetValidWriteIdsRequest &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const GetValidWriteIdsRequest & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-  virtual void printTo(std::ostream& out) const;
-};
-
-void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b);
-
-inline std::ostream& operator<<(std::ostream& out, const GetValidWriteIdsRequest& obj)
-{
-  obj.printTo(out);
-  return out;
-}
-
-typedef struct _GetValidWriteIdsResult__isset {
-  _GetValidWriteIdsResult__isset() : areIdsValid(false), ids(false) {}
-  bool areIdsValid :1;
-  bool ids :1;
-} _GetValidWriteIdsResult__isset;
-
-class GetValidWriteIdsResult {
- public:
-
-  GetValidWriteIdsResult(const GetValidWriteIdsResult&);
-  GetValidWriteIdsResult& operator=(const GetValidWriteIdsResult&);
-  GetValidWriteIdsResult() : lowWatermarkId(0), highWatermarkId(0), areIdsValid(0) {
-  }
-
-  virtual ~GetValidWriteIdsResult() throw();
-  int64_t lowWatermarkId;
-  int64_t highWatermarkId;
-  bool areIdsValid;
-  std::vector<int64_t>  ids;
-
-  _GetValidWriteIdsResult__isset __isset;
-
-  void __set_lowWatermarkId(const int64_t val);
-
-  void __set_highWatermarkId(const int64_t val);
-
-  void __set_areIdsValid(const bool val);
-
-  void __set_ids(const std::vector<int64_t> & val);
-
-  bool operator == (const GetValidWriteIdsResult & rhs) const
-  {
-    if (!(lowWatermarkId == rhs.lowWatermarkId))
-      return false;
-    if (!(highWatermarkId == rhs.highWatermarkId))
-      return false;
-    if (__isset.areIdsValid != rhs.__isset.areIdsValid)
-      return false;
-    else if (__isset.areIdsValid && !(areIdsValid == rhs.areIdsValid))
-      return false;
-    if (__isset.ids != rhs.__isset.ids)
-      return false;
-    else if (__isset.ids && !(ids == rhs.ids))
-      return false;
-    return true;
-  }
-  bool operator != (const GetValidWriteIdsResult &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const GetValidWriteIdsResult & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-  virtual void printTo(std::ostream& out) const;
-};
-
-void swap(GetValidWriteIdsResult &a, GetValidWriteIdsResult &b);
-
-inline std::ostream& operator<<(std::ostream& out, const GetValidWriteIdsResult& obj)
-{
-  obj.printTo(out);
-  return out;
-}
-
 typedef struct _GetAllFunctionsResponse__isset {
   _GetAllFunctionsResponse__isset() : functions(false) {}
   bool functions :1;


[02/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/results/clientpositive/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_all.q.out b/ql/src/test/results/clientpositive/mm_all.q.out
index db5de69..71826df 100644
--- a/ql/src/test/results/clientpositive/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/mm_all.q.out
@@ -82,6 +82,7 @@ STAGE PLANS:
                     output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
                     serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
                     name: default.part_mm
+                Write Type: INSERT
 
   Stage: Stage-7
     Conditional Operator
@@ -288,21 +289,6 @@ POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
 POSTHOOK: Output: default@simple_mm
 POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert overwrite table simple_mm select key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_mm
-POSTHOOK: query: insert overwrite table simple_mm select key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_mm
-POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select * from simple_mm order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@simple_mm
@@ -1236,473 +1222,6 @@ POSTHOOK: query: drop table merge1_mm
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@merge1_mm
 POSTHOOK: Output: default@merge1_mm
-PREHOOK: query: drop table ctas0_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table ctas0_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: database:default
-PREHOOK: Output: default@ctas0_mm
-POSTHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@ctas0_mm
-POSTHOOK: Lineage: ctas0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: ctas0_mm.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: select * from ctas0_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@ctas0_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from ctas0_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@ctas0_mm
-#### A masked pattern was here ####
-98	455
-97	455
-0	456
-10	456
-100	457
-103	457
-PREHOOK: query: drop table ctas0_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@ctas0_mm
-PREHOOK: Output: default@ctas0_mm
-POSTHOOK: query: drop table ctas0_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@ctas0_mm
-POSTHOOK: Output: default@ctas0_mm
-PREHOOK: query: drop table ctas1_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table ctas1_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
-  select * from intermediate union all select * from intermediate
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: database:default
-PREHOOK: Output: default@ctas1_mm
-POSTHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
-  select * from intermediate union all select * from intermediate
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@ctas1_mm
-POSTHOOK: Lineage: ctas1_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: ctas1_mm.p EXPRESSION [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: select * from ctas1_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@ctas1_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from ctas1_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@ctas1_mm
-#### A masked pattern was here ####
-98	455
-98	455
-97	455
-97	455
-0	456
-0	456
-10	456
-10	456
-100	457
-100	457
-103	457
-103	457
-PREHOOK: query: drop table ctas1_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@ctas1_mm
-PREHOOK: Output: default@ctas1_mm
-POSTHOOK: query: drop table ctas1_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@ctas1_mm
-POSTHOOK: Output: default@ctas1_mm
-PREHOOK: query: drop table iow0_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table iow0_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@iow0_mm
-PREHOOK: query: insert overwrite table iow0_mm select key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: insert overwrite table iow0_mm select key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow0_mm
-POSTHOOK: Lineage: iow0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table iow0_mm select key + 1 from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: insert into table iow0_mm select key + 1 from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow0_mm
-POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from iow0_mm order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@iow0_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from iow0_mm order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@iow0_mm
-#### A masked pattern was here ####
-0
-1
-10
-11
-97
-98
-98
-99
-100
-101
-103
-104
-PREHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow0_mm
-POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from iow0_mm order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@iow0_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from iow0_mm order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@iow0_mm
-#### A masked pattern was here ####
-2
-12
-99
-100
-102
-105
-PREHOOK: query: drop table iow0_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@iow0_mm
-PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: drop table iow0_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@iow0_mm
-POSTHOOK: Output: default@iow0_mm
-PREHOOK: query: drop table iow1_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table iow1_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@iow1_mm
-PREHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key as k1, key from intermediate union all select key as k1, key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key as k1, key from intermediate union all select key as k1, key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow1_mm@key2=0
-POSTHOOK: Output: default@iow1_mm@key2=10
-POSTHOOK: Output: default@iow1_mm@key2=100
-POSTHOOK: Output: default@iow1_mm@key2=103
-POSTHOOK: Output: default@iow1_mm@key2=97
-POSTHOOK: Output: default@iow1_mm@key2=98
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table iow1_mm partition (key2)
-select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: insert into table iow1_mm partition (key2)
-select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow1_mm@key2=0
-POSTHOOK: Output: default@iow1_mm@key2=10
-POSTHOOK: Output: default@iow1_mm@key2=100
-POSTHOOK: Output: default@iow1_mm@key2=103
-POSTHOOK: Output: default@iow1_mm@key2=97
-POSTHOOK: Output: default@iow1_mm@key2=98
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from iow1_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@iow1_mm
-PREHOOK: Input: default@iow1_mm@key2=0
-PREHOOK: Input: default@iow1_mm@key2=10
-PREHOOK: Input: default@iow1_mm@key2=100
-PREHOOK: Input: default@iow1_mm@key2=103
-PREHOOK: Input: default@iow1_mm@key2=97
-PREHOOK: Input: default@iow1_mm@key2=98
-#### A masked pattern was here ####
-POSTHOOK: query: select * from iow1_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@iow1_mm
-POSTHOOK: Input: default@iow1_mm@key2=0
-POSTHOOK: Input: default@iow1_mm@key2=10
-POSTHOOK: Input: default@iow1_mm@key2=100
-POSTHOOK: Input: default@iow1_mm@key2=103
-POSTHOOK: Input: default@iow1_mm@key2=97
-POSTHOOK: Input: default@iow1_mm@key2=98
-#### A masked pattern was here ####
-0	0
-0	0
-0	0
-1	0
-10	10
-10	10
-10	10
-11	10
-97	97
-97	97
-97	97
-98	97
-98	98
-98	98
-98	98
-99	98
-100	100
-100	100
-100	100
-101	100
-103	103
-103	103
-103	103
-104	103
-PREHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow1_mm@key2=0
-POSTHOOK: Output: default@iow1_mm@key2=10
-POSTHOOK: Output: default@iow1_mm@key2=100
-POSTHOOK: Output: default@iow1_mm@key2=103
-POSTHOOK: Output: default@iow1_mm@key2=97
-POSTHOOK: Output: default@iow1_mm@key2=98
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from iow1_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@iow1_mm
-PREHOOK: Input: default@iow1_mm@key2=0
-PREHOOK: Input: default@iow1_mm@key2=10
-PREHOOK: Input: default@iow1_mm@key2=100
-PREHOOK: Input: default@iow1_mm@key2=103
-PREHOOK: Input: default@iow1_mm@key2=97
-PREHOOK: Input: default@iow1_mm@key2=98
-#### A masked pattern was here ####
-POSTHOOK: query: select * from iow1_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@iow1_mm
-POSTHOOK: Input: default@iow1_mm@key2=0
-POSTHOOK: Input: default@iow1_mm@key2=10
-POSTHOOK: Input: default@iow1_mm@key2=100
-POSTHOOK: Input: default@iow1_mm@key2=103
-POSTHOOK: Input: default@iow1_mm@key2=97
-POSTHOOK: Input: default@iow1_mm@key2=98
-#### A masked pattern was here ####
-3	0
-4	0
-13	10
-14	10
-100	97
-101	97
-101	98
-102	98
-103	100
-104	100
-106	103
-107	103
-PREHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow1_mm@key2=100
-POSTHOOK: Output: default@iow1_mm@key2=101
-POSTHOOK: Output: default@iow1_mm@key2=102
-POSTHOOK: Output: default@iow1_mm@key2=103
-POSTHOOK: Output: default@iow1_mm@key2=105
-POSTHOOK: Output: default@iow1_mm@key2=106
-POSTHOOK: Output: default@iow1_mm@key2=12
-POSTHOOK: Output: default@iow1_mm@key2=13
-POSTHOOK: Output: default@iow1_mm@key2=2
-POSTHOOK: Output: default@iow1_mm@key2=3
-POSTHOOK: Output: default@iow1_mm@key2=99
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=105).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=106).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=12).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=13).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=2).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=3).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from iow1_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@iow1_mm
-PREHOOK: Input: default@iow1_mm@key2=0
-PREHOOK: Input: default@iow1_mm@key2=10
-PREHOOK: Input: default@iow1_mm@key2=100
-PREHOOK: Input: default@iow1_mm@key2=101
-PREHOOK: Input: default@iow1_mm@key2=102
-PREHOOK: Input: default@iow1_mm@key2=103
-PREHOOK: Input: default@iow1_mm@key2=105
-PREHOOK: Input: default@iow1_mm@key2=106
-PREHOOK: Input: default@iow1_mm@key2=12
-PREHOOK: Input: default@iow1_mm@key2=13
-PREHOOK: Input: default@iow1_mm@key2=2
-PREHOOK: Input: default@iow1_mm@key2=3
-PREHOOK: Input: default@iow1_mm@key2=97
-PREHOOK: Input: default@iow1_mm@key2=98
-PREHOOK: Input: default@iow1_mm@key2=99
-#### A masked pattern was here ####
-POSTHOOK: query: select * from iow1_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@iow1_mm
-POSTHOOK: Input: default@iow1_mm@key2=0
-POSTHOOK: Input: default@iow1_mm@key2=10
-POSTHOOK: Input: default@iow1_mm@key2=100
-POSTHOOK: Input: default@iow1_mm@key2=101
-POSTHOOK: Input: default@iow1_mm@key2=102
-POSTHOOK: Input: default@iow1_mm@key2=103
-POSTHOOK: Input: default@iow1_mm@key2=105
-POSTHOOK: Input: default@iow1_mm@key2=106
-POSTHOOK: Input: default@iow1_mm@key2=12
-POSTHOOK: Input: default@iow1_mm@key2=13
-POSTHOOK: Input: default@iow1_mm@key2=2
-POSTHOOK: Input: default@iow1_mm@key2=3
-POSTHOOK: Input: default@iow1_mm@key2=97
-POSTHOOK: Input: default@iow1_mm@key2=98
-POSTHOOK: Input: default@iow1_mm@key2=99
-#### A masked pattern was here ####
-2	2
-3	0
-3	3
-4	0
-12	12
-13	10
-13	13
-14	10
-99	99
-100	97
-100	100
-100	100
-101	97
-101	98
-101	101
-102	98
-102	102
-103	103
-105	105
-106	106
-PREHOOK: query: drop table iow1_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@iow1_mm
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: drop table iow1_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@iow1_mm
-POSTHOOK: Output: default@iow1_mm
 PREHOOK: query: drop table load0_mm
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table load0_mm
@@ -1765,7 +1284,7 @@ POSTHOOK: query: select count(1) from load0_mm
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@load0_mm
 #### A masked pattern was here ####
-500
+1000
 PREHOOK: query: drop table load0_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@load0_mm
@@ -1916,7 +1435,7 @@ POSTHOOK: query: select count(1) from load1_mm
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@load1_mm
 #### A masked pattern was here ####
-500
+1050
 PREHOOK: query: drop table load1_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@load1_mm
@@ -1999,1091 +1518,30 @@ POSTHOOK: query: drop table intermediate2
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@intermediate2
 POSTHOOK: Output: default@intermediate2
-PREHOOK: query: drop table intermediate_nonpart
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table intermediate_nonpart
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table intermmediate_part
+PREHOOK: query: drop table multi0_1_mm
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table intermmediate_part
+POSTHOOK: query: drop table multi0_1_mm
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table intermmediate_nonpart
+PREHOOK: query: drop table multi0_2_mm
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table intermmediate_nonpart
+POSTHOOK: query: drop table multi0_2_mm
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table intermediate_nonpart(key int, p int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@intermediate_nonpart
-POSTHOOK: query: create table intermediate_nonpart(key int, p int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@intermediate_nonpart
-PREHOOK: query: insert into intermediate_nonpart select * from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@intermediate_nonpart
-POSTHOOK: query: insert into intermediate_nonpart select * from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@intermediate_nonpart
-POSTHOOK: Lineage: intermediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: intermediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: query: create table multi0_1_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
-PREHOOK: Output: default@intermmediate_nonpart
-POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: Output: default@multi0_1_mm
+POSTHOOK: query: create table multi0_1_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
-POSTHOOK: Output: default@intermmediate_nonpart
-PREHOOK: query: insert into intermmediate_nonpart select * from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@intermmediate_nonpart
-POSTHOOK: query: insert into intermmediate_nonpart select * from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@intermmediate_nonpart
-POSTHOOK: Lineage: intermmediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: intermmediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: Output: default@multi0_1_mm
+PREHOOK: query: create table multi0_2_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
-PREHOOK: Output: default@intermmediate
-POSTHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: Output: default@multi0_2_mm
+POSTHOOK: query: create table multi0_2_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
-POSTHOOK: Output: default@intermmediate
-PREHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@intermmediate
-POSTHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@intermmediate@p=455
-POSTHOOK: Output: default@intermmediate@p=456
-POSTHOOK: Output: default@intermmediate@p=457
-POSTHOOK: Lineage: intermmediate PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: intermmediate PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: intermmediate PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@intermediate_nonpart
-#### A masked pattern was here ####
-POSTHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@intermediate_nonpart
-#### A masked pattern was here ####
-PREHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@intermmediate_nonpart
-#### A masked pattern was here ####
-POSTHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@intermmediate_nonpart
-#### A masked pattern was here ####
-PREHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-#### A masked pattern was here ####
-PREHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@intermmediate@p=455
-PREHOOK: Input: default@intermmediate@p=456
-PREHOOK: Input: default@intermmediate@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@intermmediate@p=455
-POSTHOOK: Input: default@intermmediate@p=456
-POSTHOOK: Input: default@intermmediate@p=457
-#### A masked pattern was here ####
-PREHOOK: query: drop table intermediate_nonpart
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@intermediate_nonpart
-PREHOOK: Output: default@intermediate_nonpart
-POSTHOOK: query: drop table intermediate_nonpart
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@intermediate_nonpart
-POSTHOOK: Output: default@intermediate_nonpart
-PREHOOK: query: drop table intermmediate_part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table intermmediate_part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table intermmediate_nonpart
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@intermmediate_nonpart
-PREHOOK: Output: default@intermmediate_nonpart
-POSTHOOK: query: drop table intermmediate_nonpart
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@intermmediate_nonpart
-POSTHOOK: Output: default@intermmediate_nonpart
-PREHOOK: query: drop table import0_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import0_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import0_mm
-POSTHOOK: query: create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import0_mm
-PREHOOK: query: import table import0_mm from 'ql/test/data/exports/intermediate_nonpart'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import0_mm
-POSTHOOK: query: import table import0_mm from 'ql/test/data/exports/intermediate_nonpart'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import0_mm
-PREHOOK: query: select * from import0_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import0_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import0_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import0_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table import0_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import0_mm
-PREHOOK: Output: default@import0_mm
-POSTHOOK: query: drop table import0_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import0_mm
-POSTHOOK: Output: default@import0_mm
-PREHOOK: query: drop table import1_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import1_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import1_mm(key int) partitioned by (p int)
-  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import1_mm
-POSTHOOK: query: create table import1_mm(key int) partitioned by (p int)
-  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import1_mm
-PREHOOK: query: import table import1_mm from 'ql/test/data/exports/intermediate_part'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import1_mm
-POSTHOOK: query: import table import1_mm from 'ql/test/data/exports/intermediate_part'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import1_mm
-POSTHOOK: Output: default@import1_mm@p=455
-POSTHOOK: Output: default@import1_mm@p=456
-POSTHOOK: Output: default@import1_mm@p=457
-PREHOOK: query: select * from import1_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import1_mm
-PREHOOK: Input: default@import1_mm@p=455
-PREHOOK: Input: default@import1_mm@p=456
-PREHOOK: Input: default@import1_mm@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import1_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import1_mm
-POSTHOOK: Input: default@import1_mm@p=455
-POSTHOOK: Input: default@import1_mm@p=456
-POSTHOOK: Input: default@import1_mm@p=457
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table import1_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import1_mm
-PREHOOK: Output: default@import1_mm
-POSTHOOK: query: drop table import1_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import1_mm
-POSTHOOK: Output: default@import1_mm
-PREHOOK: query: drop table import4_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import4_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import4_mm
-POSTHOOK: query: create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import4_mm
-PREHOOK: query: import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import4_mm
-POSTHOOK: query: import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import4_mm
-PREHOOK: query: select * from import4_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import4_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import4_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import4_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table import4_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import4_mm
-PREHOOK: Output: default@import4_mm
-POSTHOOK: query: drop table import4_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import4_mm
-POSTHOOK: Output: default@import4_mm
-PREHOOK: query: drop table import5_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import5_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import5_mm
-POSTHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import5_mm
-PREHOOK: query: import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import5_mm
-POSTHOOK: query: import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import5_mm
-POSTHOOK: Output: default@import5_mm@p=455
-PREHOOK: query: select * from import5_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import5_mm
-PREHOOK: Input: default@import5_mm@p=455
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import5_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import5_mm
-POSTHOOK: Input: default@import5_mm@p=455
-#### A masked pattern was here ####
-97	455
-98	455
-PREHOOK: query: drop table import5_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import5_mm
-PREHOOK: Output: default@import5_mm
-POSTHOOK: query: drop table import5_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import5_mm
-POSTHOOK: Output: default@import5_mm
-PREHOOK: query: drop table import6_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import6_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import6_mm(key int, p int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import6_mm
-POSTHOOK: query: create table import6_mm(key int, p int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import6_mm
-PREHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import6_mm
-POSTHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import6_mm
-PREHOOK: query: select * from import6_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import6_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import6_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import6_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table import6_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import6_mm
-PREHOOK: Output: default@import6_mm
-POSTHOOK: query: drop table import6_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import6_mm
-POSTHOOK: Output: default@import6_mm
-PREHOOK: query: drop table import7_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import7_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import7_mm(key int) partitioned by (p int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import7_mm
-POSTHOOK: query: create table import7_mm(key int) partitioned by (p int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import7_mm
-PREHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import7_mm
-POSTHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import7_mm
-POSTHOOK: Output: default@import7_mm@p=455
-POSTHOOK: Output: default@import7_mm@p=456
-POSTHOOK: Output: default@import7_mm@p=457
-PREHOOK: query: select * from import7_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import7_mm
-PREHOOK: Input: default@import7_mm@p=455
-PREHOOK: Input: default@import7_mm@p=456
-PREHOOK: Input: default@import7_mm@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import7_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import7_mm
-POSTHOOK: Input: default@import7_mm@p=455
-POSTHOOK: Input: default@import7_mm@p=456
-POSTHOOK: Input: default@import7_mm@p=457
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table import7_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import7_mm
-PREHOOK: Output: default@import7_mm
-POSTHOOK: query: drop table import7_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import7_mm
-POSTHOOK: Output: default@import7_mm
-PREHOOK: query: drop table multi0_1_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table multi0_1_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table multi0_2_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table multi0_2_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table multi0_1_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@multi0_1_mm
-POSTHOOK: query: create table multi0_1_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@multi0_1_mm
-PREHOOK: query: create table multi0_2_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@multi0_2_mm
-POSTHOOK: query: create table multi0_2_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@multi0_2_mm
-PREHOOK: query: from intermediate
-insert overwrite table multi0_1_mm select key, p
-insert overwrite table multi0_2_mm select p, key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi0_1_mm
-PREHOOK: Output: default@multi0_2_mm
-POSTHOOK: query: from intermediate
-insert overwrite table multi0_1_mm select key, p
-insert overwrite table multi0_2_mm select p, key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi0_1_mm
-POSTHOOK: Output: default@multi0_2_mm
-POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from multi0_1_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi0_1_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi0_1_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi0_1_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: select * from multi0_2_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi0_2_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi0_2_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi0_2_mm
-#### A masked pattern was here ####
-455	97
-455	98
-456	0
-456	10
-457	100
-457	103
-PREHOOK: query: from intermediate
-insert into table multi0_1_mm select p, key
-insert overwrite table multi0_2_mm select key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi0_1_mm
-PREHOOK: Output: default@multi0_2_mm
-POSTHOOK: query: from intermediate
-insert into table multi0_1_mm select p, key
-insert overwrite table multi0_2_mm select key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi0_1_mm
 POSTHOOK: Output: default@multi0_2_mm
-POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: select * from multi0_1_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi0_1_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi0_1_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi0_1_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-455	97
-455	98
-456	0
-456	10
-457	100
-457	103
-PREHOOK: query: select * from multi0_2_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi0_2_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi0_2_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi0_2_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table multi0_1_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@multi0_1_mm
-PREHOOK: Output: default@multi0_1_mm
-POSTHOOK: query: drop table multi0_1_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@multi0_1_mm
-POSTHOOK: Output: default@multi0_1_mm
-PREHOOK: query: drop table multi0_2_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@multi0_2_mm
-PREHOOK: Output: default@multi0_2_mm
-POSTHOOK: query: drop table multi0_2_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@multi0_2_mm
-POSTHOOK: Output: default@multi0_2_mm
-PREHOOK: query: drop table multi1_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table multi1_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@multi1_mm
-POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@multi1_mm
-PREHOOK: query: from intermediate
-insert into table multi1_mm partition(p=1) select p, key
-insert into table multi1_mm partition(p=2) select key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi1_mm@p=1
-PREHOOK: Output: default@multi1_mm@p=2
-POSTHOOK: query: from intermediate
-insert into table multi1_mm partition(p=1) select p, key
-insert into table multi1_mm partition(p=2) select key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: Output: default@multi1_mm@p=2
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: select * from multi1_mm order by key, key2, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi1_mm
-PREHOOK: Input: default@multi1_mm@p=1
-PREHOOK: Input: default@multi1_mm@p=2
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi1_mm order by key, key2, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi1_mm
-POSTHOOK: Input: default@multi1_mm@p=1
-POSTHOOK: Input: default@multi1_mm@p=2
-#### A masked pattern was here ####
-0	456	2
-10	456	2
-97	455	2
-98	455	2
-100	457	2
-103	457	2
-455	97	1
-455	98	1
-456	0	1
-456	10	1
-457	100	1
-457	103	1
-PREHOOK: query: from intermediate
-insert into table multi1_mm partition(p=2) select p, key
-insert overwrite table multi1_mm partition(p=1) select key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi1_mm@p=1
-PREHOOK: Output: default@multi1_mm@p=2
-POSTHOOK: query: from intermediate
-insert into table multi1_mm partition(p=2) select p, key
-insert overwrite table multi1_mm partition(p=1) select key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: Output: default@multi1_mm@p=2
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from multi1_mm order by key, key2, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi1_mm
-PREHOOK: Input: default@multi1_mm@p=1
-PREHOOK: Input: default@multi1_mm@p=2
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi1_mm order by key, key2, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi1_mm
-POSTHOOK: Input: default@multi1_mm@p=1
-POSTHOOK: Input: default@multi1_mm@p=2
-#### A masked pattern was here ####
-0	456	1
-0	456	2
-10	456	1
-10	456	2
-97	455	1
-97	455	2
-98	455	1
-98	455	2
-100	457	1
-100	457	2
-103	457	1
-103	457	2
-455	97	1
-455	97	2
-455	98	1
-455	98	2
-456	0	1
-456	0	2
-456	10	1
-456	10	2
-457	100	1
-457	100	2
-457	103	1
-457	103	2
-PREHOOK: query: from intermediate
-insert into table multi1_mm partition(p) select p, key, p
-insert into table multi1_mm partition(p=1) select key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi1_mm
-PREHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: query: from intermediate
-insert into table multi1_mm partition(p) select p, key, p
-insert into table multi1_mm partition(p=1) select key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: Output: default@multi1_mm@p=455
-POSTHOOK: Output: default@multi1_mm@p=456
-POSTHOOK: Output: default@multi1_mm@p=457
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=457).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi1_mm
-PREHOOK: Input: default@multi1_mm@p=1
-PREHOOK: Input: default@multi1_mm@p=2
-PREHOOK: Input: default@multi1_mm@p=455
-PREHOOK: Input: default@multi1_mm@p=456
-PREHOOK: Input: default@multi1_mm@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi1_mm
-POSTHOOK: Input: default@multi1_mm@p=1
-POSTHOOK: Input: default@multi1_mm@p=2
-POSTHOOK: Input: default@multi1_mm@p=455
-POSTHOOK: Input: default@multi1_mm@p=456
-POSTHOOK: Input: default@multi1_mm@p=457
-#### A masked pattern was here ####
-0	456	1
-0	456	1
-0	456	2
-10	456	1
-10	456	1
-10	456	2
-97	455	1
-97	455	1
-97	455	2
-98	455	1
-98	455	1
-98	455	2
-100	457	1
-100	457	1
-100	457	2
-103	457	1
-103	457	1
-103	457	2
-455	97	1
-455	97	2
-455	97	455
-455	98	1
-455	98	2
-455	98	455
-456	0	1
-456	0	2
-456	0	456
-456	10	1
-456	10	2
-456	10	456
-457	100	1
-457	100	2
-457	100	457
-457	103	1
-457	103	2
-457	103	457
-PREHOOK: query: from intermediate
-insert into table multi1_mm partition(p) select p, key, 1
-insert into table multi1_mm partition(p=1) select key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi1_mm
-PREHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: query: from intermediate
-insert into table multi1_mm partition(p) select p, key, 1
-insert into table multi1_mm partition(p=1) select key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi1_mm
-PREHOOK: Input: default@multi1_mm@p=1
-PREHOOK: Input: default@multi1_mm@p=2
-PREHOOK: Input: default@multi1_mm@p=455
-PREHOOK: Input: default@multi1_mm@p=456
-PREHOOK: Input: default@multi1_mm@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi1_mm
-POSTHOOK: Input: default@multi1_mm@p=1
-POSTHOOK: Input: default@multi1_mm@p=2
-POSTHOOK: Input: default@multi1_mm@p=455
-POSTHOOK: Input: default@multi1_mm@p=456
-POSTHOOK: Input: default@multi1_mm@p=457
-#### A masked pattern was here ####
-0	456	1
-0	456	1
-0	456	1
-0	456	2
-10	456	1
-10	456	1
-10	456	1
-10	456	2
-97	455	1
-97	455	1
-97	455	1
-97	455	2
-98	455	1
-98	455	1
-98	455	1
-98	455	2
-100	457	1
-100	457	1
-100	457	1
-100	457	2
-103	457	1
-103	457	1
-103	457	1
-103	457	2
-455	97	1
-455	97	1
-455	97	2
-455	97	455
-455	98	1
-455	98	1
-455	98	2
-455	98	455
-456	0	1
-456	0	1
-456	0	2
-456	0	456
-456	10	1
-456	10	1
-456	10	2
-456	10	456
-457	100	1
-457	100	1
-457	100	2
-457	100	457
-457	103	1
-457	103	1
-457	103	2
-457	103	457
-PREHOOK: query: drop table multi1_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@multi1_mm
-PREHOOK: Output: default@multi1_mm
-POSTHOOK: query: drop table multi1_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@multi1_mm
-POSTHOOK: Output: default@multi1_mm
-PREHOOK: query: drop table stats_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table stats_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table stats_mm(key int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@stats_mm
-POSTHOOK: query: create table stats_mm(key int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@stats_mm
-PREHOOK: query: insert overwrite table stats_mm  select key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@stats_mm
-POSTHOOK: query: insert overwrite table stats_mm  select key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@stats_mm
-POSTHOOK: Lineage: stats_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: desc formatted stats_mm
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats_mm
-POSTHOOK: query: desc formatted stats_mm
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats_mm
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	1                   
-	numRows             	6                   
-	rawDataSize         	13                  
-	totalSize           	19                  
-	transactional       	true                
-	transactional_properties	insert_only         
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: insert into table stats_mm  select key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@stats_mm
-POSTHOOK: query: insert into table stats_mm  select key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@stats_mm
-POSTHOOK: Lineage: stats_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: desc formatted stats_mm
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats_mm
-POSTHOOK: query: desc formatted stats_mm
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats_mm
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	2                   
-	numRows             	12                  
-	rawDataSize         	26                  
-	totalSize           	38                  
-	transactional       	true                
-	transactional_properties	insert_only         
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table stats_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@stats_mm
-PREHOOK: Output: default@stats_mm
-POSTHOOK: query: drop table stats_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@stats_mm
-POSTHOOK: Output: default@stats_mm
-PREHOOK: query: drop table stats2_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table stats2_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table stats2_mm tblproperties("transactional"="true", "transactional_properties"="insert_only") as select array(key, value) from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@stats2_mm
-POSTHOOK: query: create table stats2_mm tblproperties("transactional"="true", "transactional_properties"="insert_only") as select array(key, value) from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@stats2_mm
-POSTHOOK: Lineage: stats2_mm._c0 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc formatted stats2_mm
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats2_mm
-POSTHOOK: query: desc formatted stats2_mm
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats2_mm
-# col_name            	data_type           	comment             
-	 	 
-_c0                 	array<string>       	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-	transactional       	true                
-	transactional_properties	insert_only         
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table stats2_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@stats2_mm
-PREHOOK: Output: default@stats2_mm
-POSTHOOK: query: drop table stats2_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@stats2_mm
-POSTHOOK: Output: default@stats2_mm
-PREHOOK: query: CREATE TABLE skewjoin_mm(key INT, value STRING) STORED AS TEXTFILE tblproperties ("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@skewjoin_mm
-POSTHOOK: query: CREATE TABLE skewjoin_mm(key INT, value STRING) STORED AS TEXTFILE tblproperties ("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@skewjoin_mm
-PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) INSERT OVERWRITE TABLE skewjoin_mm SELECT src1.key, src2.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@skewjoin_mm
-POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) INSERT OVERWRITE TABLE skewjoin_mm SELECT src1.key, src2.value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@skewjoin_mm
-POSTHOOK: Lineage: skewjoin_mm.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: skewjoin_mm.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: select count(distinct key) from skewjoin_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@skewjoin_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select count(distinct key) from skewjoin_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@skewjoin_mm
-#### A masked pattern was here ####
-309
-PREHOOK: query: drop table skewjoin_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@skewjoin_mm
-PREHOOK: Output: default@skewjoin_mm
-POSTHOOK: query: drop table skewjoin_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@skewjoin_mm
-POSTHOOK: Output: default@skewjoin_mm
 PREHOOK: query: CREATE TABLE parquet1_mm(id INT) STORED AS PARQUET tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default


[04/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/results/clientpositive/llap/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out
index 49bb8cf..62ad7b6 100644
--- a/ql/src/test/results/clientpositive/llap/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out
@@ -68,19 +68,20 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: intermediate
-                  Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                           output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
                           serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
                           name: default.part_mm
+                      Write Type: INSERT
             Execution mode: llap
             LLAP IO: all inputs
 
@@ -160,24 +161,6 @@ POSTHOOK: Input: default@part_mm
 POSTHOOK: Input: default@part_mm@key_mm=455
 POSTHOOK: Input: default@part_mm@key_mm=456
 #### A masked pattern was here ####
-0	455
-0	455
-0	456
-10	455
-10	455
-10	456
-97	455
-97	455
-97	456
-98	455
-98	455
-98	456
-100	455
-100	455
-100	456
-103	455
-103	455
-103	456
 PREHOOK: query: select * from part_mm order by key, key_mm
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_mm
@@ -190,24 +173,6 @@ POSTHOOK: Input: default@part_mm
 POSTHOOK: Input: default@part_mm@key_mm=455
 POSTHOOK: Input: default@part_mm@key_mm=456
 #### A masked pattern was here ####
-0	455
-0	455
-0	456
-10	455
-10	455
-10	456
-97	455
-97	455
-97	456
-98	455
-98	455
-98	456
-100	455
-100	455
-100	456
-103	455
-103	455
-103	456
 PREHOOK: query: truncate table part_mm
 PREHOOK: type: TRUNCATETABLE
 PREHOOK: Output: default@part_mm@key_mm=455
@@ -263,21 +228,6 @@ POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
 POSTHOOK: Output: default@simple_mm
 POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert overwrite table simple_mm select key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_mm
-POSTHOOK: query: insert overwrite table simple_mm select key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_mm
-POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select * from simple_mm order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@simple_mm
@@ -286,12 +236,6 @@ POSTHOOK: query: select * from simple_mm order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@simple_mm
 #### A masked pattern was here ####
-0
-10
-97
-98
-100
-103
 PREHOOK: query: insert into table simple_mm select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
@@ -315,18 +259,6 @@ POSTHOOK: query: select * from simple_mm order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@simple_mm
 #### A masked pattern was here ####
-0
-0
-10
-10
-97
-97
-98
-98
-100
-100
-103
-103
 PREHOOK: query: truncate table simple_mm
 PREHOOK: type: TRUNCATETABLE
 PREHOOK: Output: default@simple_mm
@@ -376,44 +308,14 @@ POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@dp_mm@key1=123/key2=0
-POSTHOOK: Output: default@dp_mm@key1=123/key2=10
-POSTHOOK: Output: default@dp_mm@key1=123/key2=100
-POSTHOOK: Output: default@dp_mm@key1=123/key2=103
-POSTHOOK: Output: default@dp_mm@key1=123/key2=97
-POSTHOOK: Output: default@dp_mm@key1=123/key2=98
-POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=100).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=103).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=10).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=97).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=98).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select * from dp_mm order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dp_mm
-PREHOOK: Input: default@dp_mm@key1=123/key2=0
-PREHOOK: Input: default@dp_mm@key1=123/key2=10
-PREHOOK: Input: default@dp_mm@key1=123/key2=100
-PREHOOK: Input: default@dp_mm@key1=123/key2=103
-PREHOOK: Input: default@dp_mm@key1=123/key2=97
-PREHOOK: Input: default@dp_mm@key1=123/key2=98
 #### A masked pattern was here ####
 POSTHOOK: query: select * from dp_mm order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dp_mm
-POSTHOOK: Input: default@dp_mm@key1=123/key2=0
-POSTHOOK: Input: default@dp_mm@key1=123/key2=10
-POSTHOOK: Input: default@dp_mm@key1=123/key2=100
-POSTHOOK: Input: default@dp_mm@key1=123/key2=103
-POSTHOOK: Input: default@dp_mm@key1=123/key2=97
-POSTHOOK: Input: default@dp_mm@key1=123/key2=98
-#### A masked pattern was here ####
-0	123	0
-10	123	10
-97	123	97
-98	123	98
-100	123	100
-103	123	103
+#### A masked pattern was here ####
 PREHOOK: query: drop table dp_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@dp_mm
@@ -461,18 +363,6 @@ POSTHOOK: query: select * from union_mm order by id
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@union_mm
 #### A masked pattern was here ####
-0
-1
-10
-11
-97
-98
-98
-99
-100
-101
-103
-104
 PREHOOK: query: insert into table union_mm 
 select p from
 (
@@ -512,35 +402,6 @@ POSTHOOK: query: select * from union_mm order by id
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@union_mm
 #### A masked pattern was here ####
-0
-0
-1
-1
-2
-10
-10
-11
-11
-12
-97
-97
-98
-98
-98
-99
-99
-99
-100
-100
-100
-101
-101
-102
-103
-103
-104
-104
-105
 PREHOOK: query: insert into table union_mm
 SELECT p FROM
 (
@@ -594,50 +455,6 @@ POSTHOOK: query: select * from union_mm order by id
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@union_mm
 #### A masked pattern was here ####
-0
-0
-0
-1
-1
-1
-2
-2
-10
-10
-10
-11
-11
-11
-12
-12
-97
-97
-97
-98
-98
-98
-98
-99
-99
-99
-99
-100
-100
-100
-100
-101
-101
-101
-102
-102
-103
-103
-103
-104
-104
-104
-105
-105
 PREHOOK: query: drop table union_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@union_mm
@@ -675,70 +492,14 @@ POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@partunion_mm@key=0
-POSTHOOK: Output: default@partunion_mm@key=1
-POSTHOOK: Output: default@partunion_mm@key=10
-POSTHOOK: Output: default@partunion_mm@key=100
-POSTHOOK: Output: default@partunion_mm@key=101
-POSTHOOK: Output: default@partunion_mm@key=103
-POSTHOOK: Output: default@partunion_mm@key=104
-POSTHOOK: Output: default@partunion_mm@key=11
-POSTHOOK: Output: default@partunion_mm@key=97
-POSTHOOK: Output: default@partunion_mm@key=98
-POSTHOOK: Output: default@partunion_mm@key=99
-POSTHOOK: Lineage: partunion_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: partunion_mm PARTITION(key=100).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: partunion_mm PARTITION(key=101).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: partunion_mm PARTITION(key=103).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: partunion_mm PARTITION(key=104).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: partunion_mm PARTITION(key=10).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: partunion_mm PARTITION(key=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: partunion_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: partunion_mm PARTITION(key=97).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: partunion_mm PARTITION(key=98).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: partunion_mm PARTITION(key=99).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select * from partunion_mm order by id
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partunion_mm
-PREHOOK: Input: default@partunion_mm@key=0
-PREHOOK: Input: default@partunion_mm@key=1
-PREHOOK: Input: default@partunion_mm@key=10
-PREHOOK: Input: default@partunion_mm@key=100
-PREHOOK: Input: default@partunion_mm@key=101
-PREHOOK: Input: default@partunion_mm@key=103
-PREHOOK: Input: default@partunion_mm@key=104
-PREHOOK: Input: default@partunion_mm@key=11
-PREHOOK: Input: default@partunion_mm@key=97
-PREHOOK: Input: default@partunion_mm@key=98
-PREHOOK: Input: default@partunion_mm@key=99
 #### A masked pattern was here ####
 POSTHOOK: query: select * from partunion_mm order by id
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partunion_mm
-POSTHOOK: Input: default@partunion_mm@key=0
-POSTHOOK: Input: default@partunion_mm@key=1
-POSTHOOK: Input: default@partunion_mm@key=10
-POSTHOOK: Input: default@partunion_mm@key=100
-POSTHOOK: Input: default@partunion_mm@key=101
-POSTHOOK: Input: default@partunion_mm@key=103
-POSTHOOK: Input: default@partunion_mm@key=104
-POSTHOOK: Input: default@partunion_mm@key=11
-POSTHOOK: Input: default@partunion_mm@key=97
-POSTHOOK: Input: default@partunion_mm@key=98
-POSTHOOK: Input: default@partunion_mm@key=99
-#### A masked pattern was here ####
-0	0
-1	1
-10	10
-11	11
-97	97
-98	98
-98	98
-99	99
-100	100
-101	101
-103	103
-104	104
+#### A masked pattern was here ####
 PREHOOK: query: drop table partunion_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@partunion_mm
@@ -784,12 +545,6 @@ POSTHOOK: query: select * from skew_mm order by k2, k1, k4
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@skew_mm
 #### A masked pattern was here ####
-0	0	0
-10	10	10
-97	97	97
-98	98	98
-100	100	100
-103	103	103
 PREHOOK: query: drop table skew_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@skew_mm
@@ -827,98 +582,14 @@ POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@skew_dp_union_mm@k3=0
-POSTHOOK: Output: default@skew_dp_union_mm@k3=10
-POSTHOOK: Output: default@skew_dp_union_mm@k3=100
-POSTHOOK: Output: default@skew_dp_union_mm@k3=101
-POSTHOOK: Output: default@skew_dp_union_mm@k3=102
-POSTHOOK: Output: default@skew_dp_union_mm@k3=103
-POSTHOOK: Output: default@skew_dp_union_mm@k3=104
-POSTHOOK: Output: default@skew_dp_union_mm@k3=107
-POSTHOOK: Output: default@skew_dp_union_mm@k3=14
-POSTHOOK: Output: default@skew_dp_union_mm@k3=4
-POSTHOOK: Output: default@skew_dp_union_mm@k3=97
-POSTHOOK: Output: default@skew_dp_union_mm@k3=98
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select * from skew_dp_union_mm order by k2, k1, k4
 PREHOOK: type: QUERY
 PREHOOK: Input: default@skew_dp_union_mm
-PREHOOK: Input: default@skew_dp_union_mm@k3=0
-PREHOOK: Input: default@skew_dp_union_mm@k3=10
-PREHOOK: Input: default@skew_dp_union_mm@k3=100
-PREHOOK: Input: default@skew_dp_union_mm@k3=101
-PREHOOK: Input: default@skew_dp_union_mm@k3=102
-PREHOOK: Input: default@skew_dp_union_mm@k3=103
-PREHOOK: Input: default@skew_dp_union_mm@k3=104
-PREHOOK: Input: default@skew_dp_union_mm@k3=107
-PREHOOK: Input: default@skew_dp_union_mm@k3=14
-PREHOOK: Input: default@skew_dp_union_mm@k3=4
-PREHOOK: Input: default@skew_dp_union_mm@k3=97
-PREHOOK: Input: default@skew_dp_union_mm@k3=98
 #### A masked pattern was here ####
 POSTHOOK: query: select * from skew_dp_union_mm order by k2, k1, k4
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@skew_dp_union_mm
-POSTHOOK: Input: default@skew_dp_union_mm@k3=0
-POSTHOOK: Input: default@skew_dp_union_mm@k3=10
-POSTHOOK: Input: default@skew_dp_union_mm@k3=100
-POSTHOOK: Input: default@skew_dp_union_mm@k3=101
-POSTHOOK: Input: default@skew_dp_union_mm@k3=102
-POSTHOOK: Input: default@skew_dp_union_mm@k3=103
-POSTHOOK: Input: default@skew_dp_union_mm@k3=104
-POSTHOOK: Input: default@skew_dp_union_mm@k3=107
-POSTHOOK: Input: default@skew_dp_union_mm@k3=14
-POSTHOOK: Input: default@skew_dp_union_mm@k3=4
-POSTHOOK: Input: default@skew_dp_union_mm@k3=97
-POSTHOOK: Input: default@skew_dp_union_mm@k3=98
-#### A masked pattern was here ####
-0	0	0	0
-1	2	3	4
-10	10	10	10
-11	12	13	14
-97	97	97	97
-98	98	98	98
-98	99	100	101
-99	100	101	102
-100	100	100	100
-101	102	103	104
-103	103	103	103
-104	105	106	107
+#### A masked pattern was here ####
 PREHOOK: query: drop table skew_dp_union_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@skew_dp_union_mm
@@ -958,12 +629,6 @@ POSTHOOK: query: select * from merge0_mm
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@merge0_mm
 #### A masked pattern was here ####
-98
-97
-100
-103
-0
-10
 PREHOOK: query: insert into table merge0_mm select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
@@ -987,18 +652,6 @@ POSTHOOK: query: select * from merge0_mm
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@merge0_mm
 #### A masked pattern was here ####
-98
-97
-100
-103
-0
-10
-98
-97
-100
-103
-0
-10
 PREHOOK: query: drop table merge0_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@merge0_mm
@@ -1038,12 +691,6 @@ POSTHOOK: query: select * from merge2_mm
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@merge2_mm
 #### A masked pattern was here ####
-98
-97
-100
-103
-0
-10
 PREHOOK: query: insert into table merge2_mm select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
@@ -1067,18 +714,6 @@ POSTHOOK: query: select * from merge2_mm
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@merge2_mm
 #### A masked pattern was here ####
-98
-97
-100
-103
-0
-10
-98
-97
-100
-103
-0
-10
 PREHOOK: query: drop table merge2_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@merge2_mm
@@ -1108,44 +743,14 @@ POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@merge1_mm@key=0
-POSTHOOK: Output: default@merge1_mm@key=10
-POSTHOOK: Output: default@merge1_mm@key=100
-POSTHOOK: Output: default@merge1_mm@key=103
-POSTHOOK: Output: default@merge1_mm@key=97
-POSTHOOK: Output: default@merge1_mm@key=98
-POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: merge1_mm PARTITION(key=100).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: merge1_mm PARTITION(key=103).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select * from merge1_mm order by id, key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@merge1_mm
-PREHOOK: Input: default@merge1_mm@key=0
-PREHOOK: Input: default@merge1_mm@key=10
-PREHOOK: Input: default@merge1_mm@key=100
-PREHOOK: Input: default@merge1_mm@key=103
-PREHOOK: Input: default@merge1_mm@key=97
-PREHOOK: Input: default@merge1_mm@key=98
 #### A masked pattern was here ####
 POSTHOOK: query: select * from merge1_mm order by id, key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@merge1_mm
-POSTHOOK: Input: default@merge1_mm@key=0
-POSTHOOK: Input: default@merge1_mm@key=10
-POSTHOOK: Input: default@merge1_mm@key=100
-POSTHOOK: Input: default@merge1_mm@key=103
-POSTHOOK: Input: default@merge1_mm@key=97
-POSTHOOK: Input: default@merge1_mm@key=98
-#### A masked pattern was here ####
-0	0
-10	10
-97	97
-98	98
-100	100
-103	103
+#### A masked pattern was here ####
 PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
@@ -1159,50 +764,14 @@ POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@merge1_mm@key=0
-POSTHOOK: Output: default@merge1_mm@key=10
-POSTHOOK: Output: default@merge1_mm@key=100
-POSTHOOK: Output: default@merge1_mm@key=103
-POSTHOOK: Output: default@merge1_mm@key=97
-POSTHOOK: Output: default@merge1_mm@key=98
-POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: merge1_mm PARTITION(key=100).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: merge1_mm PARTITION(key=103).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select * from merge1_mm order by id, key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@merge1_mm
-PREHOOK: Input: default@merge1_mm@key=0
-PREHOOK: Input: default@merge1_mm@key=10
-PREHOOK: Input: default@merge1_mm@key=100
-PREHOOK: Input: default@merge1_mm@key=103
-PREHOOK: Input: default@merge1_mm@key=97
-PREHOOK: Input: default@merge1_mm@key=98
 #### A masked pattern was here ####
 POSTHOOK: query: select * from merge1_mm order by id, key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@merge1_mm
-POSTHOOK: Input: default@merge1_mm@key=0
-POSTHOOK: Input: default@merge1_mm@key=10
-POSTHOOK: Input: default@merge1_mm@key=100
-POSTHOOK: Input: default@merge1_mm@key=103
-POSTHOOK: Input: default@merge1_mm@key=97
-POSTHOOK: Input: default@merge1_mm@key=98
-#### A masked pattern was here ####
-0	0
-0	0
-10	10
-10	10
-97	97
-97	97
-98	98
-98	98
-100	100
-100	100
-103	103
-103	103
+#### A masked pattern was here ####
 PREHOOK: query: drop table merge1_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@merge1_mm
@@ -1211,473 +780,6 @@ POSTHOOK: query: drop table merge1_mm
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@merge1_mm
 POSTHOOK: Output: default@merge1_mm
-PREHOOK: query: drop table ctas0_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table ctas0_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: database:default
-PREHOOK: Output: default@ctas0_mm
-POSTHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@ctas0_mm
-POSTHOOK: Lineage: ctas0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: ctas0_mm.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: select * from ctas0_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@ctas0_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from ctas0_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@ctas0_mm
-#### A masked pattern was here ####
-98	455
-97	455
-100	457
-103	457
-0	456
-10	456
-PREHOOK: query: drop table ctas0_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@ctas0_mm
-PREHOOK: Output: default@ctas0_mm
-POSTHOOK: query: drop table ctas0_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@ctas0_mm
-POSTHOOK: Output: default@ctas0_mm
-PREHOOK: query: drop table ctas1_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table ctas1_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
-  select * from intermediate union all select * from intermediate
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: database:default
-PREHOOK: Output: default@ctas1_mm
-POSTHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
-  select * from intermediate union all select * from intermediate
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@ctas1_mm
-POSTHOOK: Lineage: ctas1_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: ctas1_mm.p EXPRESSION [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: select * from ctas1_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@ctas1_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from ctas1_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@ctas1_mm
-#### A masked pattern was here ####
-98	455
-97	455
-100	457
-103	457
-0	456
-10	456
-98	455
-97	455
-100	457
-103	457
-0	456
-10	456
-PREHOOK: query: drop table ctas1_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@ctas1_mm
-PREHOOK: Output: default@ctas1_mm
-POSTHOOK: query: drop table ctas1_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@ctas1_mm
-POSTHOOK: Output: default@ctas1_mm
-PREHOOK: query: drop table iow0_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table iow0_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@iow0_mm
-PREHOOK: query: insert overwrite table iow0_mm select key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: insert overwrite table iow0_mm select key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow0_mm
-POSTHOOK: Lineage: iow0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table iow0_mm select key + 1 from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: insert into table iow0_mm select key + 1 from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow0_mm
-POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from iow0_mm order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@iow0_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from iow0_mm order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@iow0_mm
-#### A masked pattern was here ####
-0
-1
-10
-11
-97
-98
-98
-99
-100
-101
-103
-104
-PREHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow0_mm
-POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from iow0_mm order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@iow0_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from iow0_mm order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@iow0_mm
-#### A masked pattern was here ####
-2
-12
-99
-100
-102
-105
-PREHOOK: query: drop table iow0_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@iow0_mm
-PREHOOK: Output: default@iow0_mm
-POSTHOOK: query: drop table iow0_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@iow0_mm
-POSTHOOK: Output: default@iow0_mm
-PREHOOK: query: drop table iow1_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table iow1_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@iow1_mm
-PREHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key as k1, key from intermediate union all select key as k1, key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key as k1, key from intermediate union all select key as k1, key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow1_mm@key2=0
-POSTHOOK: Output: default@iow1_mm@key2=10
-POSTHOOK: Output: default@iow1_mm@key2=100
-POSTHOOK: Output: default@iow1_mm@key2=103
-POSTHOOK: Output: default@iow1_mm@key2=97
-POSTHOOK: Output: default@iow1_mm@key2=98
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table iow1_mm partition (key2)
-select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: insert into table iow1_mm partition (key2)
-select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow1_mm@key2=0
-POSTHOOK: Output: default@iow1_mm@key2=10
-POSTHOOK: Output: default@iow1_mm@key2=100
-POSTHOOK: Output: default@iow1_mm@key2=103
-POSTHOOK: Output: default@iow1_mm@key2=97
-POSTHOOK: Output: default@iow1_mm@key2=98
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from iow1_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@iow1_mm
-PREHOOK: Input: default@iow1_mm@key2=0
-PREHOOK: Input: default@iow1_mm@key2=10
-PREHOOK: Input: default@iow1_mm@key2=100
-PREHOOK: Input: default@iow1_mm@key2=103
-PREHOOK: Input: default@iow1_mm@key2=97
-PREHOOK: Input: default@iow1_mm@key2=98
-#### A masked pattern was here ####
-POSTHOOK: query: select * from iow1_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@iow1_mm
-POSTHOOK: Input: default@iow1_mm@key2=0
-POSTHOOK: Input: default@iow1_mm@key2=10
-POSTHOOK: Input: default@iow1_mm@key2=100
-POSTHOOK: Input: default@iow1_mm@key2=103
-POSTHOOK: Input: default@iow1_mm@key2=97
-POSTHOOK: Input: default@iow1_mm@key2=98
-#### A masked pattern was here ####
-0	0
-0	0
-0	0
-1	0
-10	10
-10	10
-10	10
-11	10
-97	97
-97	97
-97	97
-98	97
-98	98
-98	98
-98	98
-99	98
-100	100
-100	100
-100	100
-101	100
-103	103
-103	103
-103	103
-104	103
-PREHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow1_mm@key2=0
-POSTHOOK: Output: default@iow1_mm@key2=10
-POSTHOOK: Output: default@iow1_mm@key2=100
-POSTHOOK: Output: default@iow1_mm@key2=103
-POSTHOOK: Output: default@iow1_mm@key2=97
-POSTHOOK: Output: default@iow1_mm@key2=98
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from iow1_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@iow1_mm
-PREHOOK: Input: default@iow1_mm@key2=0
-PREHOOK: Input: default@iow1_mm@key2=10
-PREHOOK: Input: default@iow1_mm@key2=100
-PREHOOK: Input: default@iow1_mm@key2=103
-PREHOOK: Input: default@iow1_mm@key2=97
-PREHOOK: Input: default@iow1_mm@key2=98
-#### A masked pattern was here ####
-POSTHOOK: query: select * from iow1_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@iow1_mm
-POSTHOOK: Input: default@iow1_mm@key2=0
-POSTHOOK: Input: default@iow1_mm@key2=10
-POSTHOOK: Input: default@iow1_mm@key2=100
-POSTHOOK: Input: default@iow1_mm@key2=103
-POSTHOOK: Input: default@iow1_mm@key2=97
-POSTHOOK: Input: default@iow1_mm@key2=98
-#### A masked pattern was here ####
-3	0
-4	0
-13	10
-14	10
-100	97
-101	97
-101	98
-102	98
-103	100
-104	100
-106	103
-107	103
-PREHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
-select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@iow1_mm@key2=100
-POSTHOOK: Output: default@iow1_mm@key2=101
-POSTHOOK: Output: default@iow1_mm@key2=102
-POSTHOOK: Output: default@iow1_mm@key2=103
-POSTHOOK: Output: default@iow1_mm@key2=105
-POSTHOOK: Output: default@iow1_mm@key2=106
-POSTHOOK: Output: default@iow1_mm@key2=12
-POSTHOOK: Output: default@iow1_mm@key2=13
-POSTHOOK: Output: default@iow1_mm@key2=2
-POSTHOOK: Output: default@iow1_mm@key2=3
-POSTHOOK: Output: default@iow1_mm@key2=99
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=105).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=106).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=12).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=13).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=2).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=3).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: iow1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from iow1_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@iow1_mm
-PREHOOK: Input: default@iow1_mm@key2=0
-PREHOOK: Input: default@iow1_mm@key2=10
-PREHOOK: Input: default@iow1_mm@key2=100
-PREHOOK: Input: default@iow1_mm@key2=101
-PREHOOK: Input: default@iow1_mm@key2=102
-PREHOOK: Input: default@iow1_mm@key2=103
-PREHOOK: Input: default@iow1_mm@key2=105
-PREHOOK: Input: default@iow1_mm@key2=106
-PREHOOK: Input: default@iow1_mm@key2=12
-PREHOOK: Input: default@iow1_mm@key2=13
-PREHOOK: Input: default@iow1_mm@key2=2
-PREHOOK: Input: default@iow1_mm@key2=3
-PREHOOK: Input: default@iow1_mm@key2=97
-PREHOOK: Input: default@iow1_mm@key2=98
-PREHOOK: Input: default@iow1_mm@key2=99
-#### A masked pattern was here ####
-POSTHOOK: query: select * from iow1_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@iow1_mm
-POSTHOOK: Input: default@iow1_mm@key2=0
-POSTHOOK: Input: default@iow1_mm@key2=10
-POSTHOOK: Input: default@iow1_mm@key2=100
-POSTHOOK: Input: default@iow1_mm@key2=101
-POSTHOOK: Input: default@iow1_mm@key2=102
-POSTHOOK: Input: default@iow1_mm@key2=103
-POSTHOOK: Input: default@iow1_mm@key2=105
-POSTHOOK: Input: default@iow1_mm@key2=106
-POSTHOOK: Input: default@iow1_mm@key2=12
-POSTHOOK: Input: default@iow1_mm@key2=13
-POSTHOOK: Input: default@iow1_mm@key2=2
-POSTHOOK: Input: default@iow1_mm@key2=3
-POSTHOOK: Input: default@iow1_mm@key2=97
-POSTHOOK: Input: default@iow1_mm@key2=98
-POSTHOOK: Input: default@iow1_mm@key2=99
-#### A masked pattern was here ####
-2	2
-3	0
-3	3
-4	0
-12	12
-13	10
-13	13
-14	10
-99	99
-100	97
-100	100
-100	100
-101	97
-101	98
-101	101
-102	98
-102	102
-103	103
-105	105
-106	106
-PREHOOK: query: drop table iow1_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@iow1_mm
-PREHOOK: Output: default@iow1_mm
-POSTHOOK: query: drop table iow1_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@iow1_mm
-POSTHOOK: Output: default@iow1_mm
 PREHOOK: query: drop table load0_mm
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table load0_mm
@@ -1740,7 +842,7 @@ POSTHOOK: query: select count(1) from load0_mm
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@load0_mm
 #### A masked pattern was here ####
-500
+1000
 PREHOOK: query: drop table load0_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@load0_mm
@@ -1891,7 +993,7 @@ POSTHOOK: query: select count(1) from load1_mm
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@load1_mm
 #### A masked pattern was here ####
-500
+1050
 PREHOOK: query: drop table load1_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@load1_mm
@@ -1974,424 +1076,6 @@ POSTHOOK: query: drop table intermediate2
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@intermediate2
 POSTHOOK: Output: default@intermediate2
-PREHOOK: query: drop table intermediate_nonpart
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table intermediate_nonpart
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table intermmediate_part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table intermmediate_part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table intermmediate_nonpart
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table intermmediate_nonpart
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table intermediate_nonpart(key int, p int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@intermediate_nonpart
-POSTHOOK: query: create table intermediate_nonpart(key int, p int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@intermediate_nonpart
-PREHOOK: query: insert into intermediate_nonpart select * from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@intermediate_nonpart
-POSTHOOK: query: insert into intermediate_nonpart select * from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@intermediate_nonpart
-POSTHOOK: Lineage: intermediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: intermediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@intermmediate_nonpart
-POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@intermmediate_nonpart
-PREHOOK: query: insert into intermmediate_nonpart select * from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@intermmediate_nonpart
-POSTHOOK: query: insert into intermmediate_nonpart select * from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@intermmediate_nonpart
-POSTHOOK: Lineage: intermmediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: intermmediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@intermmediate
-POSTHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@intermmediate
-PREHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@intermmediate
-POSTHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@intermmediate@p=455
-POSTHOOK: Output: default@intermmediate@p=456
-POSTHOOK: Output: default@intermmediate@p=457
-POSTHOOK: Lineage: intermmediate PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: intermmediate PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: intermmediate PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@intermediate_nonpart
-#### A masked pattern was here ####
-POSTHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@intermediate_nonpart
-#### A masked pattern was here ####
-PREHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@intermmediate_nonpart
-#### A masked pattern was here ####
-POSTHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@intermmediate_nonpart
-#### A masked pattern was here ####
-PREHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-#### A masked pattern was here ####
-PREHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@intermmediate@p=455
-PREHOOK: Input: default@intermmediate@p=456
-PREHOOK: Input: default@intermmediate@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@intermmediate@p=455
-POSTHOOK: Input: default@intermmediate@p=456
-POSTHOOK: Input: default@intermmediate@p=457
-#### A masked pattern was here ####
-PREHOOK: query: drop table intermediate_nonpart
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@intermediate_nonpart
-PREHOOK: Output: default@intermediate_nonpart
-POSTHOOK: query: drop table intermediate_nonpart
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@intermediate_nonpart
-POSTHOOK: Output: default@intermediate_nonpart
-PREHOOK: query: drop table intermmediate_part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table intermmediate_part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table intermmediate_nonpart
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@intermmediate_nonpart
-PREHOOK: Output: default@intermmediate_nonpart
-POSTHOOK: query: drop table intermmediate_nonpart
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@intermmediate_nonpart
-POSTHOOK: Output: default@intermmediate_nonpart
-PREHOOK: query: drop table import0_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import0_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import0_mm
-POSTHOOK: query: create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import0_mm
-PREHOOK: query: import table import0_mm from 'ql/test/data/exports/intermediate_nonpart'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import0_mm
-POSTHOOK: query: import table import0_mm from 'ql/test/data/exports/intermediate_nonpart'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import0_mm
-PREHOOK: query: select * from import0_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import0_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import0_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import0_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table import0_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import0_mm
-PREHOOK: Output: default@import0_mm
-POSTHOOK: query: drop table import0_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import0_mm
-POSTHOOK: Output: default@import0_mm
-PREHOOK: query: drop table import1_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import1_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import1_mm(key int) partitioned by (p int)
-  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import1_mm
-POSTHOOK: query: create table import1_mm(key int) partitioned by (p int)
-  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import1_mm
-PREHOOK: query: import table import1_mm from 'ql/test/data/exports/intermediate_part'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import1_mm
-POSTHOOK: query: import table import1_mm from 'ql/test/data/exports/intermediate_part'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import1_mm
-POSTHOOK: Output: default@import1_mm@p=455
-POSTHOOK: Output: default@import1_mm@p=456
-POSTHOOK: Output: default@import1_mm@p=457
-PREHOOK: query: select * from import1_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import1_mm
-PREHOOK: Input: default@import1_mm@p=455
-PREHOOK: Input: default@import1_mm@p=456
-PREHOOK: Input: default@import1_mm@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import1_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import1_mm
-POSTHOOK: Input: default@import1_mm@p=455
-POSTHOOK: Input: default@import1_mm@p=456
-POSTHOOK: Input: default@import1_mm@p=457
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table import1_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import1_mm
-PREHOOK: Output: default@import1_mm
-POSTHOOK: query: drop table import1_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import1_mm
-POSTHOOK: Output: default@import1_mm
-PREHOOK: query: drop table import4_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import4_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import4_mm
-POSTHOOK: query: create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import4_mm
-PREHOOK: query: import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import4_mm
-POSTHOOK: query: import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import4_mm
-PREHOOK: query: select * from import4_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import4_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import4_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import4_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table import4_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import4_mm
-PREHOOK: Output: default@import4_mm
-POSTHOOK: query: drop table import4_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import4_mm
-POSTHOOK: Output: default@import4_mm
-PREHOOK: query: drop table import5_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import5_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import5_mm
-POSTHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import5_mm
-PREHOOK: query: import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import5_mm
-POSTHOOK: query: import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import5_mm
-POSTHOOK: Output: default@import5_mm@p=455
-PREHOOK: query: select * from import5_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import5_mm
-PREHOOK: Input: default@import5_mm@p=455
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import5_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import5_mm
-POSTHOOK: Input: default@import5_mm@p=455
-#### A masked pattern was here ####
-97	455
-98	455
-PREHOOK: query: drop table import5_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import5_mm
-PREHOOK: Output: default@import5_mm
-POSTHOOK: query: drop table import5_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import5_mm
-POSTHOOK: Output: default@import5_mm
-PREHOOK: query: drop table import6_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import6_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import6_mm(key int, p int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import6_mm
-POSTHOOK: query: create table import6_mm(key int, p int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import6_mm
-PREHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import6_mm
-POSTHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import6_mm
-PREHOOK: query: select * from import6_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import6_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import6_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import6_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table import6_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import6_mm
-PREHOOK: Output: default@import6_mm
-POSTHOOK: query: drop table import6_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import6_mm
-POSTHOOK: Output: default@import6_mm
-PREHOOK: query: drop table import7_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table import7_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table import7_mm(key int) partitioned by (p int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@import7_mm
-POSTHOOK: query: create table import7_mm(key int) partitioned by (p int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@import7_mm
-PREHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part'
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Output: default@import7_mm
-POSTHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part'
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Output: default@import7_mm
-POSTHOOK: Output: default@import7_mm@p=455
-POSTHOOK: Output: default@import7_mm@p=456
-POSTHOOK: Output: default@import7_mm@p=457
-PREHOOK: query: select * from import7_mm order by key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@import7_mm
-PREHOOK: Input: default@import7_mm@p=455
-PREHOOK: Input: default@import7_mm@p=456
-PREHOOK: Input: default@import7_mm@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: select * from import7_mm order by key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@import7_mm
-POSTHOOK: Input: default@import7_mm@p=455
-POSTHOOK: Input: default@import7_mm@p=456
-POSTHOOK: Input: default@import7_mm@p=457
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table import7_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@import7_mm
-PREHOOK: Output: default@import7_mm
-POSTHOOK: query: drop table import7_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@import7_mm
-POSTHOOK: Output: default@import7_mm
 PREHOOK: query: drop table multi0_1_mm
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table multi0_1_mm
@@ -2416,649 +1100,6 @@ POSTHOOK: query: create table multi0_2_mm (key int, key2 int)  tblproperties("tr
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@multi0_2_mm
-PREHOOK: query: from intermediate
-insert overwrite table multi0_1_mm select key, p
-insert overwrite table multi0_2_mm select p, key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi0_1_mm
-PREHOOK: Output: default@multi0_2_mm
-POSTHOOK: query: from intermediate
-insert overwrite table multi0_1_mm select key, p
-insert overwrite table multi0_2_mm select p, key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi0_1_mm
-POSTHOOK: Output: default@multi0_2_mm
-POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from multi0_1_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi0_1_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi0_1_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi0_1_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: select * from multi0_2_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi0_2_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi0_2_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi0_2_mm
-#### A masked pattern was here ####
-455	97
-455	98
-456	0
-456	10
-457	100
-457	103
-PREHOOK: query: from intermediate
-insert into table multi0_1_mm select p, key
-insert overwrite table multi0_2_mm select key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi0_1_mm
-PREHOOK: Output: default@multi0_2_mm
-POSTHOOK: query: from intermediate
-insert into table multi0_1_mm select p, key
-insert overwrite table multi0_2_mm select key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi0_1_mm
-POSTHOOK: Output: default@multi0_2_mm
-POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: select * from multi0_1_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi0_1_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi0_1_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi0_1_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-455	97
-455	98
-456	0
-456	10
-457	100
-457	103
-PREHOOK: query: select * from multi0_2_mm order by key, key2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi0_2_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi0_2_mm order by key, key2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi0_2_mm
-#### A masked pattern was here ####
-0	456
-10	456
-97	455
-98	455
-100	457
-103	457
-PREHOOK: query: drop table multi0_1_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@multi0_1_mm
-PREHOOK: Output: default@multi0_1_mm
-POSTHOOK: query: drop table multi0_1_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@multi0_1_mm
-POSTHOOK: Output: default@multi0_1_mm
-PREHOOK: query: drop table multi0_2_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@multi0_2_mm
-PREHOOK: Output: default@multi0_2_mm
-POSTHOOK: query: drop table multi0_2_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@multi0_2_mm
-POSTHOOK: Output: default@multi0_2_mm
-PREHOOK: query: drop table multi1_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table multi1_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@multi1_mm
-POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@multi1_mm
-PREHOOK: query: from intermediate
-insert into table multi1_mm partition(p=1) select p, key
-insert into table multi1_mm partition(p=2) select key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi1_mm@p=1
-PREHOOK: Output: default@multi1_mm@p=2
-POSTHOOK: query: from intermediate
-insert into table multi1_mm partition(p=1) select p, key
-insert into table multi1_mm partition(p=2) select key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: Output: default@multi1_mm@p=2
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-PREHOOK: query: select * from multi1_mm order by key, key2, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi1_mm
-PREHOOK: Input: default@multi1_mm@p=1
-PREHOOK: Input: default@multi1_mm@p=2
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi1_mm order by key, key2, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi1_mm
-POSTHOOK: Input: default@multi1_mm@p=1
-POSTHOOK: Input: default@multi1_mm@p=2
-#### A masked pattern was here ####
-0	456	2
-10	456	2
-97	455	2
-98	455	2
-100	457	2
-103	457	2
-455	97	1
-455	98	1
-456	0	1
-456	10	1
-457	100	1
-457	103	1
-PREHOOK: query: from intermediate
-insert into table multi1_mm partition(p=2) select p, key
-insert overwrite table multi1_mm partition(p=1) select key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi1_mm@p=1
-PREHOOK: Output: default@multi1_mm@p=2
-POSTHOOK: query: from intermediate
-insert into table multi1_mm partition(p=2) select p, key
-insert overwrite table multi1_mm partition(p=1) select key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: Output: default@multi1_mm@p=2
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from multi1_mm order by key, key2, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi1_mm
-PREHOOK: Input: default@multi1_mm@p=1
-PREHOOK: Input: default@multi1_mm@p=2
-#### A masked pattern was here ####
-POSTHOOK: query: select * from multi1_mm order by key, key2, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi1_mm
-POSTHOOK: Input: default@multi1_mm@p=1
-POSTHOOK: Input: default@multi1_mm@p=2
-#### A masked pattern was here ####
-0	456	1
-0	456	2
-10	456	1
-10	456	2
-97	455	1
-97	455	2
-98	455	1
-98	455	2
-100	457	1
-100	457	2
-103	457	1
-103	457	2
-455	97	1
-455	97	2
-455	98	1
-455	98	2
-456	0	1
-456	0	2
-456	10	1
-456	10	2
-457	100	1
-457	100	2
-457	103	1
-457	103	2
-PREHOOK: query: from intermediate
-insert into table multi1_mm partition(p) select p, key, p
-insert into table multi1_mm partition(p=1) select key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi1_mm
-PREHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: query: from intermediate
-insert into table multi1_mm partition(p) select p, key, p
-insert into table multi1_mm partition(p=1) select key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: Output: default@multi1_mm@p=455
-POSTHOOK: Output: default@multi1_mm@p=456
-POSTHOOK: Output: default@multi1_mm@p=457
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=457).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi1_mm
-PREHOOK: Input: default@multi1_mm@p=1
-PREHOOK: Input: default@multi1_mm@p=2
-PREHOOK: Input: default@multi1_mm@p=455
-PREHOOK: Input: default@multi1_mm@p=456
-PREHOOK: Input: default@multi1_mm@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi1_mm
-POSTHOOK: Input: default@multi1_mm@p=1
-POSTHOOK: Input: default@multi1_mm@p=2
-POSTHOOK: Input: default@multi1_mm@p=455
-POSTHOOK: Input: default@multi1_mm@p=456
-POSTHOOK: Input: default@multi1_mm@p=457
-#### A masked pattern was here ####
-0	456	1
-0	456	1
-0	456	2
-10	456	1
-10	456	1
-10	456	2
-97	455	1
-97	455	1
-97	455	2
-98	455	1
-98	455	1
-98	455	2
-100	457	1
-100	457	1
-100	457	2
-103	457	1
-103	457	1
-103	457	2
-455	97	1
-455	97	2
-455	97	455
-455	98	1
-455	98	2
-455	98	455
-456	0	1
-456	0	2
-456	0	456
-456	10	1
-456	10	2
-456	10	456
-457	100	1
-457	100	2
-457	100	457
-457	103	1
-457	103	2
-457	103	457
-PREHOOK: query: from intermediate
-insert into table multi1_mm partition(p) select p, key, 1
-insert into table multi1_mm partition(p=1) select key, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@multi1_mm
-PREHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: query: from intermediate
-insert into table multi1_mm partition(p) select p, key, 1
-insert into table multi1_mm partition(p=1) select key, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p
-PREHOOK: type: QUERY
-PREHOOK: Input: default@multi1_mm
-PREHOOK: Input: default@multi1_mm@p=1
-PREHOOK: Input: default@multi1_mm@p=2
-PREHOOK: Input: default@multi1_mm@p=455
-PREHOOK: Input: default@multi1_mm@p=456
-PREHOOK: Input: default@multi1_mm@p=457
-#### A masked pattern was here ####
-POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@multi1_mm
-POSTHOOK: Input: default@multi1_mm@p=1
-POSTHOOK: Input: default@multi1_mm@p=2
-POSTHOOK: Input: default@multi1_mm@p=455
-POSTHOOK: Input: default@multi1_mm@p=456
-POSTHOOK: Input: default@multi1_mm@p=457
-#### A masked pattern was here ####
-0	456	1
-0	456	1
-0	456	1
-0	456	2
-10	456	1
-10	456	1
-10	456	1
-10	456	2
-97	455	1
-97	455	1
-97	455	1
-97	455	2
-98	455	1
-98	455	1
-98	455	1
-98	455	2
-100	457	1
-100	457	1
-100	457	1
-100	457	2
-103	457	1
-103	457	1
-103	457	1
-103	457	2
-455	97	1
-455	97	1
-455	97	2
-455	97	455
-455	98	1
-455	98	1
-455	98	2
-455	98	455
-456	0	1
-456	0	1
-456	0	2
-456	0	456
-456	10	1
-456	10	1
-456	10	2
-456	10	456
-457	100	1
-457	100	1
-457	100	2
-457	100	457
-457	103	1
-457	103	1
-457	103	2
-457	103	457
-PREHOOK: query: drop table multi1_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@multi1_mm
-PREHOOK: Output: default@multi1_mm
-POSTHOOK: query: drop table multi1_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@multi1_mm
-POSTHOOK: Output: default@multi1_mm
-PREHOOK: query: drop table stats_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table stats_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table stats_mm(key int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@stats_mm
-POSTHOOK: query: create table stats_mm(key int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@stats_mm
-PREHOOK: query: insert overwrite table stats_mm  select key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@stats_mm
-POSTHOOK: query: insert overwrite table stats_mm  select key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@stats_mm
-POSTHOOK: Lineage: stats_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: desc formatted stats_mm
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats_mm
-POSTHOOK: query: desc formatted stats_mm
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats_mm
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	3                   
-	numRows             	6                   
-	rawDataSize         	13                  
-	totalSize           	19                  
-	transactional       	true                
-	transactional_properties	insert_only         
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: insert into table stats_mm  select key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@stats_mm
-POSTHOOK: query: insert into table stats_mm  select key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@stats_mm
-POSTHOOK: Lineage: stats_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: desc formatted stats_mm
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats_mm
-POSTHOOK: query: desc formatted stats_mm
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats_mm
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	6                   
-	numRows             	12                  
-	rawDataSize         	26                  
-	totalSize           	38                  
-	transactional       	true                
-	transactional_properties	insert_only         
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table stats_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@stats_mm
-PREHOOK: Output: default@stats_mm
-POSTHOOK: query: drop table stats_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@stats_mm
-POSTHOOK: Output: default@stats_mm
-PREHOOK: query: drop table stats2_mm
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table stats2_mm
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table stats2_mm tblproperties("transactional"="true", "transactional_properties"="insert_only") as select array(key, value) from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@stats2_mm
-POSTHOOK: query: create table stats2_mm tblproperties("transactional"="true", "transactional_properties"="insert_only") as select array(key, value) from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@stats2_mm
-POSTHOOK: Lineage: stats2_mm._c0 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc formatted stats2_mm
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats2_mm
-POSTHOOK: query: desc formatted stats2_mm
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats2_mm
-# col_name            	data_type           	comment             
-	 	 
-_c0                 	array<string>       	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	55                  
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-	transactional       	true                
-	transactional_properties	insert_only         
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table stats2_mm
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@stats2_mm
-PREHOOK: Output: default@stats2_mm
-POSTHOOK: query: drop table stats2_mm
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@stats2_mm
-POSTHOOK: Output: default@stats2_mm
-PREHOOK: query: CREATE TABLE skewjoin_mm(key INT, value STRING) STORED AS TEXTFILE tblproperties ("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@skewjoin_mm
-POSTHOOK: query: CREATE TABLE skewjoin_mm(key INT, value STRING) STORED AS TEXTFILE tblproperties ("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@skewjoin_mm
-PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) INSERT OVERWRITE TABLE skewjoin_mm SELECT src1.key, src2.value
-PREHO

<TRUNCATED>

[03/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/ql/src/test/results/clientpositive/llap/mm_conversions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_conversions.q.out b/ql/src/test/results/clientpositive/llap/mm_conversions.q.out
index 2cfa06d..861acaf 100644
--- a/ql/src/test/results/clientpositive/llap/mm_conversions.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_conversions.q.out
@@ -37,250 +37,200 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@intermediate@p=457
 POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: drop table simple_from_mm
+PREHOOK: query: drop table simple_from_mm1
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table simple_from_mm
+POSTHOOK: query: drop table simple_from_mm1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table simple_from_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: query: create table simple_from_mm1(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: create table simple_from_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: create table simple_from_mm1(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
-POSTHOOK: Output: default@simple_from_mm
-PREHOOK: query: insert into table simple_from_mm select key from intermediate
+POSTHOOK: Output: default@simple_from_mm1
+PREHOOK: query: insert into table simple_from_mm1 select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: insert into table simple_from_mm1 select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_from_mm
-POSTHOOK: Lineage: simple_from_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table simple_from_mm select key from intermediate
+POSTHOOK: Output: default@simple_from_mm1
+POSTHOOK: Lineage: simple_from_mm1.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: insert into table simple_from_mm1 select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: insert into table simple_from_mm1 select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_from_mm
-POSTHOOK: Lineage: simple_from_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from simple_from_mm s1 order by key
+POSTHOOK: Output: default@simple_from_mm1
+POSTHOOK: Lineage: simple_from_mm1.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from simple_from_mm1 s1 order by key
 PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
+PREHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s1 order by key
+POSTHOOK: query: select * from simple_from_mm1 s1 order by key
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
-0
-0
-98
-98
-100
-100
-PREHOOK: query: alter table simple_from_mm unset tblproperties('transactional_properties', 'transactional')
+PREHOOK: query: alter table simple_from_mm1 unset tblproperties('transactional_properties', 'transactional')
 PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@simple_from_mm
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: alter table simple_from_mm unset tblproperties('transactional_properties', 'transactional')
+PREHOOK: Input: default@simple_from_mm1
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: alter table simple_from_mm1 unset tblproperties('transactional_properties', 'transactional')
 POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@simple_from_mm
-POSTHOOK: Output: default@simple_from_mm
-PREHOOK: query: select * from simple_from_mm s2 order by key
+POSTHOOK: Input: default@simple_from_mm1
+POSTHOOK: Output: default@simple_from_mm1
+PREHOOK: query: select * from simple_from_mm1 s2 order by key
 PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
+PREHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s2 order by key
+POSTHOOK: query: select * from simple_from_mm1 s2 order by key
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
-0
-0
-98
-98
-100
-100
-PREHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: query: insert into table simple_from_mm1 select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: insert into table simple_from_mm1 select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_from_mm
-POSTHOOK: Lineage: simple_from_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from simple_from_mm s3 order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s3 order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
-#### A masked pattern was here ####
-0
-0
-0
-98
-98
-98
-100
-100
-100
-PREHOOK: query: alter table simple_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@simple_from_mm
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: alter table simple_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@simple_from_mm
-POSTHOOK: Output: default@simple_from_mm
-PREHOOK: query: select * from simple_from_mm s4 order by key
+POSTHOOK: Output: default@simple_from_mm1
+POSTHOOK: Lineage: simple_from_mm1.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from simple_from_mm1 s3 order by key
 PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
+PREHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s4 order by key
+POSTHOOK: query: select * from simple_from_mm1 s3 order by key
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm1
 #### A masked pattern was here ####
-0
-0
-0
-98
-98
-98
-100
-100
-100
-PREHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: query: drop table simple_from_mm1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@simple_from_mm1
+PREHOOK: Output: default@simple_from_mm1
+POSTHOOK: query: drop table simple_from_mm1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@simple_from_mm1
+POSTHOOK: Output: default@simple_from_mm1
+PREHOOK: query: drop table simple_from_mm2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table simple_from_mm2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table simple_from_mm2(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: create table simple_from_mm2(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@simple_from_mm2
+PREHOOK: query: insert into table simple_from_mm2 select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: insert into table simple_from_mm2 select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_from_mm
-POSTHOOK: Lineage: simple_from_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from simple_from_mm s5 order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s5 order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
-#### A masked pattern was here ####
-0
-0
-0
-0
-98
-98
-98
-98
-100
-100
-100
-100
-PREHOOK: query: alter table simple_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false')
+POSTHOOK: Output: default@simple_from_mm2
+POSTHOOK: Lineage: simple_from_mm2.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: insert into table simple_from_mm2 select key from intermediate
+PREHOOK: type: QUERY
+PREHOOK: Input: default@intermediate
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: insert into table simple_from_mm2 select key from intermediate
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+POSTHOOK: Output: default@simple_from_mm2
+POSTHOOK: Lineage: simple_from_mm2.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from simple_from_mm2 s1 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@simple_from_mm2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from simple_from_mm2 s1 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@simple_from_mm2
+#### A masked pattern was here ####
+PREHOOK: query: alter table simple_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false')
 PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@simple_from_mm
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: alter table simple_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false')
+PREHOOK: Input: default@simple_from_mm2
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: alter table simple_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false')
 POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@simple_from_mm
-POSTHOOK: Output: default@simple_from_mm
-PREHOOK: query: select * from simple_from_mm s6 order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s6 order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
-#### A masked pattern was here ####
-0
-0
-0
-0
-98
-98
-98
-98
-100
-100
-100
-100
-PREHOOK: query: insert into table simple_from_mm select key from intermediate
+POSTHOOK: Input: default@simple_from_mm2
+POSTHOOK: Output: default@simple_from_mm2
+PREHOOK: query: select * from simple_from_mm2 s2 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@simple_from_mm2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from simple_from_mm2 s2 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@simple_from_mm2
+#### A masked pattern was here ####
+PREHOOK: query: insert into table simple_from_mm2 select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: insert into table simple_from_mm select key from intermediate
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: insert into table simple_from_mm2 select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_from_mm
-POSTHOOK: Lineage: simple_from_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from simple_from_mm s7 order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@simple_from_mm
-#### A masked pattern was here ####
-POSTHOOK: query: select * from simple_from_mm s7 order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@simple_from_mm
-#### A masked pattern was here ####
-0
-0
-0
-0
-0
-98
-98
-98
-98
-98
-100
-100
-100
-100
-100
-PREHOOK: query: drop table simple_from_mm
+POSTHOOK: Output: default@simple_from_mm2
+POSTHOOK: Lineage: simple_from_mm2.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from simple_from_mm2 s3 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@simple_from_mm2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from simple_from_mm2 s3 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@simple_from_mm2
+#### A masked pattern was here ####
+PREHOOK: query: drop table simple_from_mm2
 PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@simple_from_mm
-PREHOOK: Output: default@simple_from_mm
-POSTHOOK: query: drop table simple_from_mm
+PREHOOK: Input: default@simple_from_mm2
+PREHOOK: Output: default@simple_from_mm2
+POSTHOOK: query: drop table simple_from_mm2
 POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@simple_from_mm
-POSTHOOK: Output: default@simple_from_mm
+POSTHOOK: Input: default@simple_from_mm2
+POSTHOOK: Output: default@simple_from_mm2
 PREHOOK: query: drop table simple_to_mm
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table simple_to_mm
@@ -308,21 +258,6 @@ POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
 POSTHOOK: Output: default@simple_to_mm
 POSTHOOK: Lineage: simple_to_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table simple_to_mm select key from intermediate
-PREHOOK: type: QUERY
-PREHOOK: Input: default@intermediate
-PREHOOK: Input: default@intermediate@p=455
-PREHOOK: Input: default@intermediate@p=456
-PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@simple_to_mm
-POSTHOOK: query: insert into table simple_to_mm select key from intermediate
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@intermediate
-POSTHOOK: Input: default@intermediate@p=455
-POSTHOOK: Input: default@intermediate@p=456
-POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@simple_to_mm
-POSTHOOK: Lineage: simple_to_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select * from simple_to_mm s1 order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@simple_to_mm
@@ -331,16 +266,11 @@ POSTHOOK: query: select * from simple_to_mm s1 order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@simple_to_mm
 #### A masked pattern was here ####
-0
-0
-98
-98
-100
-100
 PREHOOK: query: alter table simple_to_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@simple_to_mm
 PREHOOK: Output: default@simple_to_mm
+FAILED: Error in acquiring locks: Transaction already opened. txnid:30
 POSTHOOK: query: alter table simple_to_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
 POSTHOOK: type: ALTERTABLE_PROPERTIES
 POSTHOOK: Input: default@simple_to_mm
@@ -353,12 +283,6 @@ POSTHOOK: query: select * from simple_to_mm s2 order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@simple_to_mm
 #### A masked pattern was here ####
-0
-0
-98
-98
-100
-100
 PREHOOK: query: insert into table simple_to_mm select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
@@ -397,18 +321,6 @@ POSTHOOK: query: select * from simple_to_mm s3 order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@simple_to_mm
 #### A masked pattern was here ####
-0
-0
-0
-0
-98
-98
-98
-98
-100
-100
-100
-100
 PREHOOK: query: drop table simple_to_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@simple_to_mm
@@ -417,378 +329,260 @@ POSTHOOK: query: drop table simple_to_mm
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@simple_to_mm
 POSTHOOK: Output: default@simple_to_mm
-PREHOOK: query: drop table part_from_mm
+PREHOOK: query: drop table part_from_mm1
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table part_from_mm
+POSTHOOK: query: drop table part_from_mm1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table part_from_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: query: create table part_from_mm1(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
-PREHOOK: Output: default@part_from_mm
-POSTHOOK: query: create table part_from_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: Output: default@part_from_mm1
+POSTHOOK: query: create table part_from_mm1(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part_from_mm
-PREHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+POSTHOOK: Output: default@part_from_mm1
+PREHOOK: query: insert into table part_from_mm1 partition(key_mm='455') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+PREHOOK: Output: default@part_from_mm1@key_mm=455
+POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='455') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+POSTHOOK: Output: default@part_from_mm1@key_mm=455
+POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: insert into table part_from_mm1 partition(key_mm='455') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+PREHOOK: Output: default@part_from_mm1@key_mm=455
+POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='455') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+POSTHOOK: Output: default@part_from_mm1@key_mm=455
+POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: insert into table part_from_mm1 partition(key_mm='456') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+PREHOOK: Output: default@part_from_mm1@key_mm=456
+POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='456') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from part_from_mm s1 order by key, key_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-#### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s1 order by key, key_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-#### A masked pattern was here ####
-0	455
-0	455
-0	456
-98	455
-98	455
-98	456
-100	455
-100	455
-100	456
-PREHOOK: query: alter table part_from_mm unset tblproperties('transactional_properties', 'transactional')
+POSTHOOK: Output: default@part_from_mm1@key_mm=456
+POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from part_from_mm1 s1 order by key, key_mm
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_from_mm1
+PREHOOK: Input: default@part_from_mm1@key_mm=455
+PREHOOK: Input: default@part_from_mm1@key_mm=456
+#### A masked pattern was here ####
+POSTHOOK: query: select * from part_from_mm1 s1 order by key, key_mm
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_from_mm1
+POSTHOOK: Input: default@part_from_mm1@key_mm=455
+POSTHOOK: Input: default@part_from_mm1@key_mm=456
+#### A masked pattern was here ####
+PREHOOK: query: alter table part_from_mm1 unset tblproperties('transactional_properties', 'transactional')
 PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Output: default@part_from_mm
-POSTHOOK: query: alter table part_from_mm unset tblproperties('transactional_properties', 'transactional')
+PREHOOK: Input: default@part_from_mm1
+PREHOOK: Output: default@part_from_mm1
+POSTHOOK: query: alter table part_from_mm1 unset tblproperties('transactional_properties', 'transactional')
 POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Output: default@part_from_mm
-PREHOOK: query: select * from part_from_mm s2 order by key, key_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-#### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s2 order by key, key_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-#### A masked pattern was here ####
-0	455
-0	455
-0	456
-98	455
-98	455
-98	456
-100	455
-100	455
-100	456
-PREHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+POSTHOOK: Input: default@part_from_mm1
+POSTHOOK: Output: default@part_from_mm1
+PREHOOK: query: select * from part_from_mm1 s2 order by key, key_mm
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_from_mm1
+PREHOOK: Input: default@part_from_mm1@key_mm=455
+PREHOOK: Input: default@part_from_mm1@key_mm=456
+#### A masked pattern was here ####
+POSTHOOK: query: select * from part_from_mm1 s2 order by key, key_mm
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_from_mm1
+POSTHOOK: Input: default@part_from_mm1@key_mm=455
+POSTHOOK: Input: default@part_from_mm1@key_mm=456
+#### A masked pattern was here ####
+PREHOOK: query: insert into table part_from_mm1 partition(key_mm='456') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+PREHOOK: Output: default@part_from_mm1@key_mm=456
+POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='456') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table part_from_mm partition(key_mm='457') select key from intermediate
+POSTHOOK: Output: default@part_from_mm1@key_mm=456
+POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: insert into table part_from_mm1 partition(key_mm='457') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=457
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='457') select key from intermediate
+PREHOOK: Output: default@part_from_mm1@key_mm=457
+POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='457') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=457
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from part_from_mm s3 order by key, key_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-PREHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s3 order by key, key_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-POSTHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-0	455
-0	455
-0	456
-0	456
-0	457
-98	455
-98	455
-98	456
-98	456
-98	457
-100	455
-100	455
-100	456
-100	456
-100	457
-PREHOOK: query: alter table part_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Output: default@part_from_mm
-POSTHOOK: query: alter table part_from_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Output: default@part_from_mm
-PREHOOK: query: select * from part_from_mm s4 order by key, key_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-PREHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s4 order by key, key_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-POSTHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-0	455
-0	455
-0	456
-0	456
-0	457
-98	455
-98	455
-98	456
-98	456
-98	457
-100	455
-100	455
-100	456
-100	456
-100	457
-PREHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+POSTHOOK: Output: default@part_from_mm1@key_mm=457
+POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from part_from_mm1 s3 order by key, key_mm
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_from_mm1
+PREHOOK: Input: default@part_from_mm1@key_mm=455
+PREHOOK: Input: default@part_from_mm1@key_mm=456
+PREHOOK: Input: default@part_from_mm1@key_mm=457
+#### A masked pattern was here ####
+POSTHOOK: query: select * from part_from_mm1 s3 order by key, key_mm
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_from_mm1
+POSTHOOK: Input: default@part_from_mm1@key_mm=455
+POSTHOOK: Input: default@part_from_mm1@key_mm=456
+POSTHOOK: Input: default@part_from_mm1@key_mm=457
+#### A masked pattern was here ####
+PREHOOK: query: drop table part_from_mm1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@part_from_mm1
+PREHOOK: Output: default@part_from_mm1
+POSTHOOK: query: drop table part_from_mm1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@part_from_mm1
+POSTHOOK: Output: default@part_from_mm1
+PREHOOK: query: drop table part_from_mm2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table part_from_mm2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table part_from_mm2(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@part_from_mm2
+POSTHOOK: query: create table part_from_mm2(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@part_from_mm2
+PREHOOK: query: insert into table part_from_mm2 partition(key_mm='456') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='456') select key from intermediate
+PREHOOK: Output: default@part_from_mm2@key_mm=456
+POSTHOOK: query: insert into table part_from_mm2 partition(key_mm='456') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=456
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+POSTHOOK: Output: default@part_from_mm2@key_mm=456
+POSTHOOK: Lineage: part_from_mm2 PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: --fails here
+insert into table part_from_mm2 partition(key_mm='455') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='455') select key from intermediate
+PREHOOK: Output: default@part_from_mm2@key_mm=455
+POSTHOOK: query: --fails here
+insert into table part_from_mm2 partition(key_mm='455') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=455
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from part_from_mm s5 order by key, key_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-PREHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s5 order by key, key_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-POSTHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-0	455
-0	455
-0	455
-0	456
-0	456
-0	456
-0	457
-98	455
-98	455
-98	455
-98	456
-98	456
-98	456
-98	457
-100	455
-100	455
-100	455
-100	456
-100	456
-100	456
-100	457
-PREHOOK: query: alter table part_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false')
+POSTHOOK: Output: default@part_from_mm2@key_mm=455
+POSTHOOK: Lineage: part_from_mm2 PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from part_from_mm2 s1 order by key, key_mm
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_from_mm2
+PREHOOK: Input: default@part_from_mm2@key_mm=455
+PREHOOK: Input: default@part_from_mm2@key_mm=456
+#### A masked pattern was here ####
+POSTHOOK: query: select * from part_from_mm2 s1 order by key, key_mm
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_from_mm2
+POSTHOOK: Input: default@part_from_mm2@key_mm=455
+POSTHOOK: Input: default@part_from_mm2@key_mm=456
+#### A masked pattern was here ####
+PREHOOK: query: alter table part_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false')
 PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Output: default@part_from_mm
-POSTHOOK: query: alter table part_from_mm set tblproperties("transactional"="false", 'transactional_properties'='false')
+PREHOOK: Input: default@part_from_mm2
+PREHOOK: Output: default@part_from_mm2
+POSTHOOK: query: alter table part_from_mm2 set tblproperties("transactional"="false", 'transactional_properties'='false')
 POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Output: default@part_from_mm
-PREHOOK: query: select * from part_from_mm s6 order by key, key_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-PREHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s6 order by key, key_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-POSTHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-0	455
-0	455
-0	455
-0	456
-0	456
-0	456
-0	457
-98	455
-98	455
-98	455
-98	456
-98	456
-98	456
-98	457
-100	455
-100	455
-100	455
-100	456
-100	456
-100	456
-100	457
-PREHOOK: query: insert into table part_from_mm partition(key_mm='457') select key from intermediate
+POSTHOOK: Input: default@part_from_mm2
+POSTHOOK: Output: default@part_from_mm2
+PREHOOK: query: select * from part_from_mm2 s2 order by key, key_mm
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_from_mm2
+PREHOOK: Input: default@part_from_mm2@key_mm=455
+PREHOOK: Input: default@part_from_mm2@key_mm=456
+#### A masked pattern was here ####
+POSTHOOK: query: select * from part_from_mm2 s2 order by key, key_mm
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_from_mm2
+POSTHOOK: Input: default@part_from_mm2@key_mm=455
+POSTHOOK: Input: default@part_from_mm2@key_mm=456
+#### A masked pattern was here ####
+PREHOOK: query: insert into table part_from_mm2 partition(key_mm='457') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
 PREHOOK: Input: default@intermediate@p=455
 PREHOOK: Input: default@intermediate@p=456
 PREHOOK: Input: default@intermediate@p=457
-PREHOOK: Output: default@part_from_mm@key_mm=457
-POSTHOOK: query: insert into table part_from_mm partition(key_mm='457') select key from intermediate
+PREHOOK: Output: default@part_from_mm2@key_mm=457
+POSTHOOK: query: insert into table part_from_mm2 partition(key_mm='457') select key from intermediate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@intermediate
 POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
-POSTHOOK: Output: default@part_from_mm@key_mm=457
-POSTHOOK: Lineage: part_from_mm PARTITION(key_mm=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: select * from part_from_mm s7 order by key, key_mm
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Input: default@part_from_mm@key_mm=455
-PREHOOK: Input: default@part_from_mm@key_mm=456
-PREHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-POSTHOOK: query: select * from part_from_mm s7 order by key, key_mm
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Input: default@part_from_mm@key_mm=455
-POSTHOOK: Input: default@part_from_mm@key_mm=456
-POSTHOOK: Input: default@part_from_mm@key_mm=457
-#### A masked pattern was here ####
-0	455
-0	455
-0	455
-0	456
-0	456
-0	456
-0	457
-0	457
-98	455
-98	455
-98	455
-98	456
-98	456
-98	456
-98	457
-98	457
-100	455
-100	455
-100	455
-100	456
-100	456
-100	456
-100	457
-100	457
-PREHOOK: query: drop table part_from_mm
+POSTHOOK: Output: default@part_from_mm2@key_mm=457
+POSTHOOK: Lineage: part_from_mm2 PARTITION(key_mm=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from part_from_mm2 s3 order by key, key_mm
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_from_mm2
+PREHOOK: Input: default@part_from_mm2@key_mm=455
+PREHOOK: Input: default@part_from_mm2@key_mm=456
+PREHOOK: Input: default@part_from_mm2@key_mm=457
+#### A masked pattern was here ####
+POSTHOOK: query: select * from part_from_mm2 s3 order by key, key_mm
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_from_mm2
+POSTHOOK: Input: default@part_from_mm2@key_mm=455
+POSTHOOK: Input: default@part_from_mm2@key_mm=456
+POSTHOOK: Input: default@part_from_mm2@key_mm=457
+#### A masked pattern was here ####
+PREHOOK: query: drop table part_from_mm2
 PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@part_from_mm
-PREHOOK: Output: default@part_from_mm
-POSTHOOK: query: drop table part_from_mm
+PREHOOK: Input: default@part_from_mm2
+PREHOOK: Output: default@part_from_mm2
+POSTHOOK: query: drop table part_from_mm2
 POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@part_from_mm
-POSTHOOK: Output: default@part_from_mm
+POSTHOOK: Input: default@part_from_mm2
+POSTHOOK: Output: default@part_from_mm2
 PREHOOK: query: drop table part_to_mm
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table part_to_mm
@@ -843,16 +637,11 @@ POSTHOOK: Input: default@part_to_mm
 POSTHOOK: Input: default@part_to_mm@key_mm=455
 POSTHOOK: Input: default@part_to_mm@key_mm=456
 #### A masked pattern was here ####
-0	455
-0	456
-98	455
-98	456
-100	455
-100	456
 PREHOOK: query: alter table part_to_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@part_to_mm
 PREHOOK: Output: default@part_to_mm
+FAILED: Error in acquiring locks: Transaction already opened. txnid:63
 POSTHOOK: query: alter table part_to_mm set tblproperties("transactional"="true", "transactional_properties"="insert_only")
 POSTHOOK: type: ALTERTABLE_PROPERTIES
 POSTHOOK: Input: default@part_to_mm
@@ -869,12 +658,6 @@ POSTHOOK: Input: default@part_to_mm
 POSTHOOK: Input: default@part_to_mm@key_mm=455
 POSTHOOK: Input: default@part_to_mm@key_mm=456
 #### A masked pattern was here ####
-0	455
-0	456
-98	455
-98	456
-100	455
-100	456
 PREHOOK: query: insert into table part_to_mm partition(key_mm='456') select key from intermediate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate
@@ -919,18 +702,6 @@ POSTHOOK: Input: default@part_to_mm@key_mm=455
 POSTHOOK: Input: default@part_to_mm@key_mm=456
 POSTHOOK: Input: default@part_to_mm@key_mm=457
 #### A masked pattern was here ####
-0	455
-0	456
-0	456
-0	457
-98	455
-98	456
-98	456
-98	457
-100	455
-100	456
-100	456
-100	457
 PREHOOK: query: drop table part_to_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@part_to_mm


[18/18] hive git commit: HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)

Posted by we...@apache.org.
HIVE-14879 : integrate MM tables into ACID: replace MM metastore calls and structures with ACID ones (Wei Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/77511070
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/77511070
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/77511070

Branch: refs/heads/hive-14535
Commit: 77511070dd8b7176e98454b9a7010eedfbd5d981
Parents: 1ceaf35
Author: Wei Zheng <we...@apache.org>
Authored: Tue May 16 15:52:03 2017 -0700
Committer: Wei Zheng <we...@apache.org>
Committed: Tue May 16 15:52:03 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/common/JavaUtils.java    |   67 +
 .../hadoop/hive/common/ValidWriteIds.java       |  218 -
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   23 +-
 .../listener/DummyRawStoreFailEvent.java        |   48 -
 .../hadoop/hive/ql/history/TestHiveHistory.java |    2 +-
 metastore/if/hive_metastore.thrift              |   44 -
 .../upgrade/derby/038-HIVE-14637.derby.sql      |    6 -
 .../upgrade/derby/hive-schema-2.2.0.derby.sql   |   11 +-
 .../derby/upgrade-2.1.0-to-2.2.0.derby.sql      |    2 -
 .../upgrade/mssql/023-HIVE-14637.mssql.sql      |   15 -
 .../upgrade/mssql/hive-schema-2.2.0.mssql.sql   |   21 +-
 .../mssql/upgrade-2.1.0-to-2.2.0.mssql.sql      |    1 -
 .../upgrade/mysql/038-HIVE-14637.mysql.sql      |   15 -
 .../upgrade/mysql/hive-schema-2.2.0.mysql.sql   |   16 -
 .../mysql/upgrade-2.1.0-to-2.2.0.mysql.sql      |    1 -
 .../upgrade/oracle/038-HIVE-14637.oracle.sql    |   15 -
 .../upgrade/oracle/hive-schema-2.2.0.oracle.sql |   20 +-
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |    1 -
 .../postgres/037-HIVE-14637.postgres.sql        |   16 -
 .../postgres/hive-schema-2.2.0.postgres.sql     |   22 +-
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |    1 -
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 6078 +++++--------
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  504 -
 .../ThriftHiveMetastore_server.skeleton.cpp     |   20 -
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp | 1645 +---
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |  407 +-
 .../hive/metastore/api/ClientCapabilities.java  |   32 +-
 .../metastore/api/FinalizeWriteIdRequest.java   |  684 --
 .../metastore/api/FinalizeWriteIdResult.java    |  283 -
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../metastore/api/GetNextWriteIdRequest.java    |  490 -
 .../metastore/api/GetNextWriteIdResult.java     |  387 -
 .../hive/metastore/api/GetTablesRequest.java    |   32 +-
 .../hive/metastore/api/GetTablesResult.java     |   36 +-
 .../metastore/api/GetValidWriteIdsRequest.java  |  490 -
 .../metastore/api/GetValidWriteIdsResult.java   |  740 --
 .../metastore/api/HeartbeatWriteIdRequest.java  |  589 --
 .../metastore/api/HeartbeatWriteIdResult.java   |  283 -
 .../apache/hadoop/hive/metastore/api/Table.java |  206 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 8610 ++++++------------
 .../gen-php/metastore/ThriftHiveMetastore.php   | 2132 ++---
 .../src/gen/thrift/gen-php/metastore/Types.php  | 1216 +--
 .../hive_metastore/ThriftHiveMetastore-remote   |   28 -
 .../hive_metastore/ThriftHiveMetastore.py       | 2502 ++---
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  918 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |  166 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |  216 -
 .../hadoop/hive/metastore/HiveMetaStore.java    |  228 +-
 .../hive/metastore/HiveMetaStoreClient.java     |   23 -
 .../hadoop/hive/metastore/IMetaStoreClient.java |   10 -
 .../hadoop/hive/metastore/MmCleanerThread.java  |  397 -
 .../hadoop/hive/metastore/ObjectStore.java      |  283 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |   43 -
 .../hive/metastore/cache/CachedStore.java       |   46 -
 .../hadoop/hive/metastore/hbase/HBaseStore.java |   74 +-
 .../hadoop/hive/metastore/model/MTable.java     |   23 +-
 .../hive/metastore/model/MTableWrite.java       |   77 -
 metastore/src/model/package.jdo                 |   33 -
 .../DummyRawStoreControlledCommit.java          |   51 -
 .../DummyRawStoreForJdoConnection.java          |   50 -
 .../hadoop/hive/metastore/TestObjectStore.java  |  153 -
 .../java/org/apache/hadoop/hive/ql/Driver.java  |   55 -
 .../hive/ql/exec/AbstractFileMergeOperator.java |    9 +-
 .../apache/hadoop/hive/ql/exec/CopyTask.java    |    7 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   58 +-
 .../hadoop/hive/ql/exec/FetchOperator.java      |   27 +-
 .../apache/hadoop/hive/ql/exec/FetchTask.java   |    5 -
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |   32 +-
 .../hadoop/hive/ql/exec/ImportCommitTask.java   |    7 +-
 .../hadoop/hive/ql/exec/ImportCommitWork.java   |   16 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |   29 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |  115 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |    3 +-
 .../hadoop/hive/ql/io/HiveInputFormat.java      |   58 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  107 +-
 .../apache/hadoop/hive/ql/metadata/Table.java   |    4 -
 .../hive/ql/optimizer/GenMapRedUtils.java       |    4 +-
 .../ql/optimizer/physical/SkewJoinResolver.java |    2 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |    3 +-
 .../hive/ql/parse/ExportSemanticAnalyzer.java   |   25 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |   89 +-
 .../hadoop/hive/ql/parse/IndexUpdater.java      |   29 +-
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |   15 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   68 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java      |   18 +-
 .../hadoop/hive/ql/plan/FileMergeDesc.java      |   19 +-
 .../hadoop/hive/ql/plan/FileSinkDesc.java       |    4 +-
 .../hadoop/hive/ql/plan/LoadMultiFilesDesc.java |    8 +
 .../hadoop/hive/ql/plan/LoadTableDesc.java      |   49 +-
 .../hadoop/hive/ql/exec/TestExecDriver.java     |    2 +-
 ql/src/test/queries/clientpositive/mm_all.q     |  205 -
 .../queries/clientpositive/mm_conversions.q     |   87 +-
 ql/src/test/queries/clientpositive/mm_exim.q    |   98 +
 .../queries/clientpositive/mm_insertonly_acid.q |   16 -
 .../results/clientpositive/llap/mm_all.q.out    | 1982 +---
 .../clientpositive/llap/mm_conversions.q.out    |  797 +-
 ql/src/test/results/clientpositive/mm_all.q.out | 1570 +---
 .../results/clientpositive/mm_conversions.q.out |  586 +-
 .../test/results/clientpositive/mm_exim.q.out   |  457 +
 .../clientpositive/mm_insertonly_acid.q.out     |  115 -
 100 files changed, 8686 insertions(+), 28880 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
index 3916fe3..28490e2 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
@@ -28,6 +28,8 @@ import java.net.URLClassLoader;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -37,6 +39,10 @@ import org.slf4j.LoggerFactory;
  */
 public final class JavaUtils {
 
+  public static final String DELTA_PREFIX = "delta";
+  public static final String DELTA_DIGITS = "%07d";
+  public static final int DELTA_DIGITS_LEN = 7;
+  public static final String STATEMENT_DIGITS = "%04d";
   private static final Logger LOG = LoggerFactory.getLogger(JavaUtils.class);
   private static final Method SUN_MISC_UTIL_RELEASE;
 
@@ -158,4 +164,65 @@ public final class JavaUtils {
   private JavaUtils() {
     // prevent instantiation
   }
+
+  public static Long extractTxnId(Path file) {
+    String fileName = file.getName();
+    String[] parts = fileName.split("_", 4);  // e.g. delta_0000001_0000001_0000
+    if (parts.length < 4 || !DELTA_PREFIX.equals(parts[0])) {
+      LOG.debug("Cannot extract transaction ID for a MM table: " + file
+          + " (" + Arrays.toString(parts) + ")");
+      return null;
+    }
+    long writeId = -1;
+    try {
+      writeId = Long.parseLong(parts[1]);
+    } catch (NumberFormatException ex) {
+      LOG.debug("Cannot extract transaction ID for a MM table: " + file
+          + "; parsing " + parts[1] + " got " + ex.getMessage());
+      return null;
+    }
+    return writeId;
+  }
+
+  public static class IdPathFilter implements PathFilter {
+    private final String mmDirName;
+    private final boolean isMatch, isIgnoreTemp;
+    public IdPathFilter(long writeId, int stmtId, boolean isMatch) {
+      this(writeId, stmtId, isMatch, false);
+    }
+    public IdPathFilter(long writeId, int stmtId, boolean isMatch, boolean isIgnoreTemp) {
+      this.mmDirName = DELTA_PREFIX + "_" + String.format(DELTA_DIGITS, writeId) + "_" +
+          String.format(DELTA_DIGITS, writeId) + "_" + String.format(STATEMENT_DIGITS, stmtId);
+      this.isMatch = isMatch;
+      this.isIgnoreTemp = isIgnoreTemp;
+    }
+
+    @Override
+    public boolean accept(Path path) {
+      String name = path.getName();
+      if (name.equals(mmDirName)) {
+        return isMatch;
+      }
+      if (isIgnoreTemp && name.length() > 0) {
+        char c = name.charAt(0);
+        if (c == '.' || c == '_') return false; // Regardless of isMatch, ignore this.
+      }
+      return !isMatch;
+    }
+  }
+
+  public static class AnyIdDirFilter implements PathFilter {
+    @Override
+    public boolean accept(Path path) {
+      String name = path.getName();
+      if (!name.startsWith(DELTA_PREFIX + "_")) return false;
+      String idStr = name.substring(DELTA_PREFIX.length() + 1, DELTA_PREFIX.length() + 1 + DELTA_DIGITS_LEN);
+      try {
+        Long.parseLong(idStr);
+      } catch (NumberFormatException ex) {
+        return false;
+      }
+      return true;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java
deleted file mode 100644
index 4cbeb89..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common;
-
-import java.util.Arrays;
-import java.util.HashSet;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ValidWriteIds {
-  public static final ValidWriteIds NO_WRITE_IDS = new ValidWriteIds(-1, -1, false, null);
-
-  public static final String MM_PREFIX = "mm";
-  private static final String CURRENT_SUFFIX = ".current";
-
-  private final static Logger LOG = LoggerFactory.getLogger(ValidWriteIds.class);
-
-  private static final String VALID_WRITEIDS_PREFIX = "hive.valid.write.ids.";
-  private final long lowWatermark, highWatermark;
-  private final boolean areIdsValid;
-  private final HashSet<Long> ids;
-  private String source = null;
-
-  public ValidWriteIds(
-      long lowWatermark, long highWatermark, boolean areIdsValid, HashSet<Long> ids) {
-    this.lowWatermark = lowWatermark;
-    this.highWatermark = highWatermark;
-    this.areIdsValid = areIdsValid;
-    this.ids = ids;
-  }
-
-  public static ValidWriteIds createFromConf(Configuration conf, String dbName, String tblName) {
-    return createFromConf(conf, dbName + "." + tblName);
-  }
-
-  public static ValidWriteIds createFromConf(Configuration conf, String fullTblName) {
-    String key = createConfKey(fullTblName);
-    String idStr = conf.get(key, null);
-    String current = conf.get(key + CURRENT_SUFFIX, null);
-    if (idStr == null || idStr.isEmpty()) return null;
-    return new ValidWriteIds(idStr, current);
-  }
-
-  private static String createConfKey(String dbName, String tblName) {
-    return createConfKey(dbName + "." + tblName);
-  }
-
-  private static String createConfKey(String fullName) {
-    return VALID_WRITEIDS_PREFIX + fullName;
-  }
-
-  private ValidWriteIds(String src, String current) {
-    // TODO: lifted from ACID config implementation... optimize if needed? e.g. ranges, base64
-    String[] values = src.split(":");
-    highWatermark = Long.parseLong(values[0]);
-    lowWatermark = Long.parseLong(values[1]);
-    if (values.length > 2) {
-      areIdsValid = Long.parseLong(values[2]) > 0;
-      ids = new HashSet<Long>();
-      for(int i = 3; i < values.length; ++i) {
-        ids.add(Long.parseLong(values[i]));
-      }
-      if (current != null) {
-        long currentId = Long.parseLong(current);
-        if (areIdsValid) {
-          ids.add(currentId);
-        } else {
-          ids.remove(currentId);
-        }
-      }
-    } else if (current != null) {
-        long currentId = Long.parseLong(current);
-        areIdsValid = true;
-        ids = new HashSet<Long>();
-        ids.add(currentId);
-    } else {
-      areIdsValid = false;
-      ids = null;
-    }
-  }
-
-  public static void addCurrentToConf(
-      Configuration conf, String dbName, String tblName, long mmWriteId) {
-    String key = createConfKey(dbName, tblName) + CURRENT_SUFFIX;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Setting " + key + " => " + mmWriteId);
-    }
-    conf.set(key, Long.toString(mmWriteId));
-  }
-
-  public void addToConf(Configuration conf, String dbName, String tblName) {
-    if (source == null) {
-      source = toString();
-    }
-    String key = createConfKey(dbName, tblName);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Setting " + key + " => " + source
-          + " (old value was " + conf.get(key, null) + ")");
-    }
-    conf.set(key, source);
-  }
-
-  public static void clearConf(Configuration conf, String dbName, String tblName) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Unsetting " + createConfKey(dbName, tblName));
-    }
-    conf.unset(createConfKey(dbName, tblName));
-  }
-
-  public String toString() {
-    // TODO: lifted from ACID config implementation... optimize if needed? e.g. ranges, base64
-    StringBuilder buf = new StringBuilder();
-    buf.append(highWatermark);
-    buf.append(':');
-    buf.append(lowWatermark);
-    if (ids != null) {
-      buf.append(':');
-      buf.append(areIdsValid ? 1 : 0);
-      for (long id : ids) {
-        buf.append(':');
-        buf.append(id);
-      }
-    }
-    return buf.toString();
-  }
-
-  public boolean isValid(long writeId) {
-    if (writeId < 0) throw new RuntimeException("Incorrect write ID " + writeId);
-    if (writeId <= lowWatermark) return true;
-    if (writeId >= highWatermark) return false;
-    return ids != null && (areIdsValid == ids.contains(writeId));
-  }
-
-  public static String getMmFilePrefix(long mmWriteId) {
-    return MM_PREFIX + "_" + mmWriteId;
-  }
-
-
-  public static class IdPathFilter implements PathFilter {
-    private final String mmDirName;
-    private final boolean isMatch, isIgnoreTemp;
-    public IdPathFilter(long writeId, boolean isMatch) {
-      this(writeId, isMatch, false);
-    }
-    public IdPathFilter(long writeId, boolean isMatch, boolean isIgnoreTemp) {
-      this.mmDirName = ValidWriteIds.getMmFilePrefix(writeId);
-      this.isMatch = isMatch;
-      this.isIgnoreTemp = isIgnoreTemp;
-    }
-
-    @Override
-    public boolean accept(Path path) {
-      String name = path.getName();
-      if (name.equals(mmDirName)) {
-        return isMatch;
-      }
-      if (isIgnoreTemp && name.length() > 0) {
-        char c = name.charAt(0);
-        if (c == '.' || c == '_') return false; // Regardless of isMatch, ignore this.
-      }
-      return !isMatch;
-    }
-  }
-
-  public static class AnyIdDirFilter implements PathFilter {
-    @Override
-    public boolean accept(Path path) {
-      String name = path.getName();
-      if (!name.startsWith(MM_PREFIX + "_")) return false;
-      String idStr = name.substring(MM_PREFIX.length() + 1);
-      try {
-        Long.parseLong(idStr);
-      } catch (NumberFormatException ex) {
-        return false;
-      }
-      return true;
-    }
-  }
-  public static Long extractWriteId(Path file) {
-    String fileName = file.getName();
-    String[] parts = fileName.split("_", 3);
-    if (parts.length < 2 || !MM_PREFIX.equals(parts[0])) {
-      LOG.info("Cannot extract write ID for a MM table: " + file
-          + " (" + Arrays.toString(parts) + ")");
-      return null;
-    }
-    long writeId = -1;
-    try {
-      writeId = Long.parseLong(parts[1]);
-    } catch (NumberFormatException ex) {
-      LOG.info("Cannot extract write ID for a MM table: " + file
-          + "; parsing " + parts[1] + " got " + ex.getMessage());
-      return null;
-    }
-    return writeId;
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index d32f1e5..a49f667 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -300,10 +300,7 @@ public class HiveConf extends Configuration {
       HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_MEMORY_TTL,
       HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY,
       HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_HBASE_TTL,
-      HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS,
-      HiveConf.ConfVars.HIVE_METASTORE_MM_THREAD_SCAN_INTERVAL,
-      HiveConf.ConfVars.HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT,
-      HiveConf.ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT
+      HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS
       };
 
   /**
@@ -3385,24 +3382,6 @@ public class HiveConf extends Configuration {
         "Log tracing id that can be used by upstream clients for tracking respective logs. " +
         "Truncated to " + LOG_PREFIX_LENGTH + " characters. Defaults to use auto-generated session id."),
 
-    HIVE_METASTORE_MM_THREAD_SCAN_INTERVAL("hive.metastore.mm.thread.scan.interval", "900s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "MM table housekeeping thread interval in this metastore instance. 0 to disable."),
-
-    HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT("hive.metastore.mm.heartbeat.timeout", "1800s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "MM write ID times out after this long if a heartbeat is not send. Currently disabled."),
-
-    HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT("hive.metastore.mm.absolute.timeout", "7d",
-        new TimeValidator(TimeUnit.SECONDS),
-        "MM write ID cannot be outstanding for more than this long."),
-
-    HIVE_METASTORE_MM_ABORTED_GRACE_PERIOD("hive.metastore.mm.aborted.grace.period", "1d",
-        new TimeValidator(TimeUnit.SECONDS),
-        "MM write ID will not be removed up for that long after it has been aborted;\n" +
-        "this is to work around potential races e.g. with FS visibility, when deleting files."),
-
-
     HIVE_MM_AVOID_GLOBSTATUS_ON_S3("hive.mm.avoid.s3.globstatus", true,
         "Whether to use listFiles (optimized on S3) instead of globStatus when on S3."),
 

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 93ff498..309dbac 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -63,7 +63,6 @@ import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
@@ -920,51 +919,4 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
       String tableName) throws MetaException, NoSuchObjectException {
     return objectStore.getAggrColStatsForTablePartitions(dbName, tableName);
   }
-
-  @Override
-  @CanNotRetry
-  public Boolean commitTransactionExpectDeadlock() {
-    return null;
-  }
-
-  @Override
-  public void createTableWrite(Table arg0, long arg1, char arg2, long arg3) {
-  }
-
-  @Override
-  public void deleteTableWrites(String arg0, String arg1, long arg2, long arg3)
-      throws MetaException {
-  }
-
-  @Override
-  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
-    return null;
-  }
-
-  @Override
-  public Collection<String> getAllPartitionLocations(String arg0, String arg1) {
-    return null;
-  }
-
-  @Override
-  public MTableWrite getTableWrite(String arg0, String arg1, long arg2)
-      throws MetaException {
-    return null;
-  }
-
-  @Override
-  public List<Long> getTableWriteIds(String arg0, String arg1, long arg2,
-      long arg3, char arg4) throws MetaException {
-    return null;
-  }
-
-  @Override
-  public List<MTableWrite> getTableWrites(String arg0, String arg1, long arg2,
-      long arg3) throws MetaException {
-    return null;
-  }
-
-  @Override
-  public void updateTableWrite(MTableWrite arg0) {
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
index 0c51a68..c70925a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
@@ -103,7 +103,7 @@ public class TestHiveHistory extends TestCase {
         db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
         db.createTable(src, cols, null, TextInputFormat.class,
             IgnoreKeyTextOutputFormat.class);
-        db.loadTable(hadoopDataFile[i], src, false, false, false, false, false, null);
+        db.loadTable(hadoopDataFile[i], src, false, false, false, false, false, null, 0);
         i++;
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift
index 2800e23..64c782e 100755
--- a/metastore/if/hive_metastore.thrift
+++ b/metastore/if/hive_metastore.thrift
@@ -306,8 +306,6 @@ struct Table {
   13: optional PrincipalPrivilegeSet privileges,
   14: optional bool temporary=false,
   15: optional bool rewriteEnabled,     // rewrite enabled or not
-  16: optional i64 mmNextWriteId,
-  17: optional i64 mmWatermarkWriteId
 }
 
 struct Partition {
@@ -907,44 +905,6 @@ struct CacheFileMetadataRequest {
   4: optional bool isAllParts
 }
 
-
-struct GetNextWriteIdRequest {
-  1: required string dbName,
-  2: required string tblName
-}
-struct GetNextWriteIdResult {
-  1: required i64 writeId
-}
-
-struct FinalizeWriteIdRequest {
-  1: required string dbName,
-  2: required string tblName,
-  3: required i64 writeId,
-  4: required bool commit
-}
-struct FinalizeWriteIdResult {
-}
-
-struct HeartbeatWriteIdRequest {
-  1: required string dbName,
-  2: required string tblName,
-  3: required i64 writeId
-}
-struct HeartbeatWriteIdResult {
-}
-
-struct GetValidWriteIdsRequest {
-  1: required string dbName,
-  2: required string tblName
-}
-struct GetValidWriteIdsResult {
-  1: required i64 lowWatermarkId,
-  2: required i64 highWatermarkId,
-  3: optional bool areIdsValid,
-  4: optional list<i64> ids
-}
-
-
 struct GetAllFunctionsResponse {
   1: optional list<Function> functions
 }
@@ -1532,10 +1492,6 @@ service ThriftHiveMetastore extends fb303.FacebookService
   ClearFileMetadataResult clear_file_metadata(1:ClearFileMetadataRequest req)
   CacheFileMetadataResult cache_file_metadata(1:CacheFileMetadataRequest req)
 
-  GetNextWriteIdResult get_next_write_id(1:GetNextWriteIdRequest req)
-  FinalizeWriteIdResult finalize_write_id(1:FinalizeWriteIdRequest req)
-  HeartbeatWriteIdResult heartbeat_write_id(1:HeartbeatWriteIdRequest req)
-  GetValidWriteIdsResult get_valid_write_ids(1:GetValidWriteIdsRequest req)
 }
 
 // * Note about the DDL_TIME: When creating or altering a table or a partition,

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/derby/038-HIVE-14637.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/038-HIVE-14637.derby.sql b/metastore/scripts/upgrade/derby/038-HIVE-14637.derby.sql
deleted file mode 100644
index cb6e5f6..0000000
--- a/metastore/scripts/upgrade/derby/038-HIVE-14637.derby.sql
+++ /dev/null
@@ -1,6 +0,0 @@
-ALTER TABLE "TBLS" ADD "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1;
-ALTER TABLE "TBLS" ADD "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0;
-CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "CREATED" BIGINT NOT NULL, "LAST_HEARTBEAT" BIGINT NOT NULL);
-ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID");
-ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-CREATE UNIQUE INDEX "APP"."UNIQUEWRITE" ON "APP"."TBL_WRITES" ("TBL_ID", "WRITE_ID");

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
index a4977b6..6dd3dee 100644
--- a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
@@ -60,7 +60,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "
 
 CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
 
-CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL, "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1, "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0);
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL);
 
 CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
 
@@ -112,15 +112,6 @@ ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY
 
 CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID");
 
-CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "CREATED" BIGINT NOT NULL, "LAST_HEARTBEAT" BIGINT NOT NULL);
-
-ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID");
-
-ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-CREATE UNIQUE INDEX "APP"."UNIQUEWRITE" ON "APP"."TBL_WRITES" ("TBL_ID", "WRITE_ID");
-
-
 
 -- ----------------------------------------------
 -- DDL Statements for indexes

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
index 3e87091..b05942f 100644
--- a/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
@@ -3,6 +3,4 @@ RUN '037-HIVE-14496.derby.sql';
 RUN '038-HIVE-10562.derby.sql';
 RUN '039-HIVE-12274.derby.sql';
 
-RUN '037-HIVE-14637.derby.sql';
-
 UPDATE "APP".VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/mssql/023-HIVE-14637.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/023-HIVE-14637.mssql.sql b/metastore/scripts/upgrade/mssql/023-HIVE-14637.mssql.sql
deleted file mode 100644
index 9666d2b..0000000
--- a/metastore/scripts/upgrade/mssql/023-HIVE-14637.mssql.sql
+++ /dev/null
@@ -1,15 +0,0 @@
-ALTER TABLE TBLS ADD MM_WATERMARK_WRITE_ID BIGINT DEFAULT -1;
-ALTER TABLE TBLS ADD MM_NEXT_WRITE_ID BIGINT DEFAULT 0;
-
-CREATE TABLE TBL_WRITES 
-(
-  TW_ID BIGINT NOT NULL,
-  TBL_ID BIGINT NOT NULL,
-  WRITE_ID BIGINT NOT NULL,
-  STATE CHAR(1) NOT NULL,
-  CREATED BIGINT NOT NULL,
-  LAST_HEARTBEAT BIGINT NOT NULL
-);
-ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID);
-ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID);

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
index 3621ef6..b6fdc7b 100644
--- a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
@@ -359,9 +359,7 @@ CREATE TABLE TBLS
     TBL_TYPE nvarchar(128) NULL,
     VIEW_EXPANDED_TEXT text NULL,
     VIEW_ORIGINAL_TEXT text NULL,
-    IS_REWRITE_ENABLED bit NOT NULL,
-    MM_WATERMARK_WRITE_ID BIGINT NULL DEFAULT -1,
-    MM_NEXT_WRITE_ID BIGINT NULL DEFAULT 0
+    IS_REWRITE_ENABLED bit NOT NULL
 );
 
 ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
@@ -596,23 +594,6 @@ CREATE TABLE NOTIFICATION_SEQUENCE
 ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
 
 
-CREATE TABLE TBL_WRITES 
-(
-  TW_ID BIGINT NOT NULL,
-  TBL_ID BIGINT NOT NULL,
-  WRITE_ID BIGINT NOT NULL,
-  STATE CHAR(1) NOT NULL,
-  CREATED BIGINT NOT NULL,
-  LAST_HEARTBEAT BIGINT NOT NULL
-);
-
-ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID);
-
-ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID);
-
-
 -- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey]
 
 -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
index b786b16..4995349 100644
--- a/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
@@ -1,7 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE;
 
 :r 022-HIVE-14496.mssql.sql
-:r 023-HIVE-14637.mssql.sql
 :r 023-HIVE-10562.mssql.sql
 :r 024-HIVE-12274.mssql.sql
 

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/mysql/038-HIVE-14637.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/038-HIVE-14637.mysql.sql b/metastore/scripts/upgrade/mysql/038-HIVE-14637.mysql.sql
deleted file mode 100644
index 9e34db2..0000000
--- a/metastore/scripts/upgrade/mysql/038-HIVE-14637.mysql.sql
+++ /dev/null
@@ -1,15 +0,0 @@
-alter table `TBLS` ADD COLUMN `MM_WATERMARK_WRITE_ID` bigint(20) DEFAULT -1;
-alter table `TBLS` ADD COLUMN `MM_NEXT_WRITE_ID` bigint(20) DEFAULT 0;
-
-CREATE TABLE IF NOT EXISTS `TBL_WRITES`
-(
-  `TW_ID` BIGINT NOT NULL,
-  `TBL_ID` BIGINT NOT NULL,
-  `WRITE_ID` BIGINT NOT NULL,
-  `STATE` CHAR(1) NOT NULL,
-  `CREATED` BIGINT NOT NULL,
-  `LAST_HEARTBEAT` BIGINT NOT NULL,
-  PRIMARY KEY (`TW_ID`),
-  UNIQUE KEY `UNIQUEWRITE` (`TBL_ID`,`WRITE_ID`),
-  CONSTRAINT `TBL_WRITES_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
index 20cfbc4..d1852df 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
@@ -588,8 +588,6 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
   `VIEW_EXPANDED_TEXT` mediumtext,
   `VIEW_ORIGINAL_TEXT` mediumtext,
   `IS_REWRITE_ENABLED` bit(1) NOT NULL,
-  `MM_WATERMARK_WRITE_ID` bigint(20) DEFAULT -1,
-  `MM_NEXT_WRITE_ID` bigint(20) DEFAULT 0,
   PRIMARY KEY (`TBL_ID`),
   UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
   KEY `TBLS_N50` (`SD_ID`),
@@ -831,20 +829,6 @@ CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS`
 
 CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE;
 
-CREATE TABLE IF NOT EXISTS `TBL_WRITES`
-(
-  `TW_ID` BIGINT NOT NULL,
-  `TBL_ID` BIGINT NOT NULL,
-  `WRITE_ID` BIGINT NOT NULL,
-  `STATE` CHAR(1) NOT NULL,
-  `CREATED` BIGINT NOT NULL,
-  `LAST_HEARTBEAT` BIGINT NOT NULL,
-  PRIMARY KEY (`TW_ID`),
-  UNIQUE KEY `UNIQUEWRITE` (`TBL_ID`,`WRITE_ID`),
-  CONSTRAINT `TBL_WRITES_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-
 -- ----------------------------
 -- Transaction and Lock Tables
 -- ----------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
index f4c69a5..e221439 100644
--- a/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
@@ -1,7 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' ';
 
 SOURCE 037-HIVE-14496.mysql.sql;
-SOURCE 038-HIVE-14637.mysql.sql;
 SOURCE 038-HIVE-10562.mysql.sql;
 SOURCE 039-HIVE-12274.mysql.sql;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/oracle/038-HIVE-14637.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/038-HIVE-14637.oracle.sql b/metastore/scripts/upgrade/oracle/038-HIVE-14637.oracle.sql
deleted file mode 100644
index 218eefe..0000000
--- a/metastore/scripts/upgrade/oracle/038-HIVE-14637.oracle.sql
+++ /dev/null
@@ -1,15 +0,0 @@
-ALTER TABLE TBLS ADD MM_WATERMARK_WRITE_ID NUMBER DEFAULT -1;
-ALTER TABLE TBLS ADD MM_NEXT_WRITE_ID NUMBER DEFAULT 0;
-
-CREATE TABLE TBL_WRITES
-(
-  TW_ID NUMBER NOT NULL,
-  TBL_ID NUMBER NOT NULL,
-  WRITE_ID NUMBER NOT NULL,
-  STATE CHAR(1) NOT NULL,
-  CREATED NUMBER NOT NULL,
-  LAST_HEARTBEAT NUMBER NOT NULL
-);
-ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID);
-ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID);

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
index c9b1aeb..4aaa5e7 100644
--- a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
@@ -376,9 +376,7 @@ CREATE TABLE TBLS
     TBL_TYPE VARCHAR2(128) NULL,
     VIEW_EXPANDED_TEXT CLOB NULL,
     VIEW_ORIGINAL_TEXT CLOB NULL,
-    IS_REWRITE_ENABLED NUMBER(1) NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)),
-    MM_WATERMARK_WRITE_ID NUMBER DEFAULT -1,
-    MM_NEXT_WRITE_ID NUMBER DEFAULT 0
+    IS_REWRITE_ENABLED NUMBER(1) NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
 );
 
 ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
@@ -801,22 +799,6 @@ ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAIN
 
 CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
 
-CREATE TABLE TBL_WRITES
-(
-  TW_ID NUMBER NOT NULL,
-  TBL_ID NUMBER NOT NULL,
-  WRITE_ID NUMBER NOT NULL,
-  STATE CHAR(1) NOT NULL,
-  CREATED NUMBER NOT NULL,
-  LAST_HEARTBEAT NUMBER NOT NULL
-);
-
-ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID);
-
-ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID);
-
 ------------------------------
 -- Transaction and lock tables
 ------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
index b2f35de..53ec681 100644
--- a/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
@@ -1,7 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual;
 
 @037-HIVE-14496.oracle.sql;
-@038-HIVE-14637.oracle.sql;
 @038-HIVE-10562.oracle.sql;
 @039-HIVE-12274.oracle.sql;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/postgres/037-HIVE-14637.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/037-HIVE-14637.postgres.sql b/metastore/scripts/upgrade/postgres/037-HIVE-14637.postgres.sql
deleted file mode 100644
index 310f51e..0000000
--- a/metastore/scripts/upgrade/postgres/037-HIVE-14637.postgres.sql
+++ /dev/null
@@ -1,16 +0,0 @@
-
-ALTER TABLE "TBLS" ADD COLUMN "MM_WATERMARK_WRITE_ID" bigint DEFAULT -1;
-ALTER TABLE "TBLS" ADD COLUMN "MM_NEXT_WRITE_ID" bigint DEFAULT 0;
-
-CREATE TABLE "TBL_WRITES"
-(
-  "TW_ID" BIGINT NOT NULL,
-  "TBL_ID" BIGINT NOT NULL,
-  "WRITE_ID" BIGINT NOT NULL,
-  "STATE" CHAR(1) NOT NULL,
-  "CREATED" BIGINT NOT NULL,
-  "LAST_HEARTBEAT" BIGINT NOT NULL
-);
-ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID");
-ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE;
-ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "UNIQUEWRITE" UNIQUE ("TBL_ID", "WRITE_ID");

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
index 424c6a1..5feab4e 100644
--- a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
@@ -373,9 +373,7 @@ CREATE TABLE "TBLS" (
     "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
     "VIEW_EXPANDED_TEXT" text,
     "VIEW_ORIGINAL_TEXT" text,
-    "IS_REWRITE_ENABLED" boolean NOT NULL,
-    "MM_WATERMARK_WRITE_ID" bigint DEFAULT -1,
-    "MM_NEXT_WRITE_ID" bigint DEFAULT 0
+    "IS_REWRITE_ENABLED" boolean NOT NULL
 );
 
 
@@ -609,24 +607,6 @@ CREATE TABLE "KEY_CONSTRAINTS"
 CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID");
 
 
-
-CREATE TABLE "TBL_WRITES"
-(
-  "TW_ID" BIGINT NOT NULL,
-  "TBL_ID" BIGINT NOT NULL,
-  "WRITE_ID" BIGINT NOT NULL,
-  "STATE" CHAR(1) NOT NULL,
-  "CREATED" BIGINT NOT NULL,
-  "LAST_HEARTBEAT" BIGINT NOT NULL
-);
-
-ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID");
-
-ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "UNIQUEWRITE" UNIQUE ("TBL_ID", "WRITE_ID");
-
-
 --
 -- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
 --

http://git-wip-us.apache.org/repos/asf/hive/blob/77511070/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
index e6daeca..732e184 100644
--- a/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
@@ -1,7 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0';
 
 \i 036-HIVE-14496.postgres.sql;
-\i 037-HIVE-14637.postgres.sql;
 \i 037-HIVE-10562.postgres.sql;
 \i 038-HIVE-12274.postgres.sql;