You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/19 21:44:39 UTC

[01/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Repository: hive
Updated Branches:
  refs/heads/master-txnstats 174c6748f -> 651e79509


http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query82.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query82.q.out b/ql/src/test/results/clientpositive/perf/tez/query82.q.out
index f1765e7..bb5a9e9 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query82.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query82.q.out
@@ -46,83 +46,85 @@ Stage-0
     limit:100
     Stage-1
       Reducer 4 vectorized
-      File Output Operator [FS_96]
-        Limit [LIM_95] (rows=100 width=88)
+      File Output Operator [FS_97]
+        Limit [LIM_96] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_94] (rows=633595212 width=88)
+          Select Operator [SEL_95] (rows=633595212 width=88)
             Output:["_col0","_col1","_col2"]
           <-Reducer 3 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_93]
-              Group By Operator [GBY_92] (rows=633595212 width=88)
+            SHUFFLE [RS_94]
+              Group By Operator [GBY_93] (rows=633595212 width=88)
                 Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2
               <-Reducer 2 [SIMPLE_EDGE]
                 SHUFFLE [RS_22]
                   PartitionCols:_col0, _col1, _col2
                   Group By Operator [GBY_21] (rows=1267190424 width=88)
                     Output:["_col0","_col1","_col2"],keys:_col2, _col3, _col4
-                    Merge Join Operator [MERGEJOIN_72] (rows=1267190424 width=88)
-                      Conds:RS_91._col0=RS_75._col0(Inner),RS_75._col0=RS_18._col1(Inner),Output:["_col2","_col3","_col4"]
-                    <-Map 5 [SIMPLE_EDGE] vectorized
-                      PARTITION_ONLY_SHUFFLE [RS_75]
-                        PartitionCols:_col0
-                        Select Operator [SEL_74] (rows=25666 width=1436)
-                          Output:["_col0","_col1","_col2","_col3"]
-                          Filter Operator [FIL_73] (rows=25666 width=1436)
-                            predicate:((i_manufact_id) IN (437, 129, 727, 663) and i_current_price BETWEEN 30 AND 60 and i_item_sk is not null)
-                            TableScan [TS_3] (rows=462000 width=1436)
-                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc","i_current_price","i_manufact_id"]
-                    <-Reducer 8 [SIMPLE_EDGE]
-                      PARTITION_ONLY_SHUFFLE [RS_18]
-                        PartitionCols:_col1
-                        Merge Join Operator [MERGEJOIN_71] (rows=4593600 width=15)
-                          Conds:RS_83._col0=RS_86._col0(Inner),Output:["_col1"]
-                        <-Map 10 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_86]
-                            PartitionCols:_col0
-                            Select Operator [SEL_85] (rows=8116 width=1119)
-                              Output:["_col0"]
-                              Filter Operator [FIL_84] (rows=8116 width=1119)
-                                predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2002-05-30 00:00:00' AND TIMESTAMP'2002-07-29 00:00:00' and d_date_sk is not null)
-                                TableScan [TS_9] (rows=73049 width=1119)
-                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
-                        <-Map 7 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_83]
-                            PartitionCols:_col0
-                            Select Operator [SEL_82] (rows=4176000 width=15)
-                              Output:["_col0","_col1"]
-                              Filter Operator [FIL_81] (rows=4176000 width=15)
-                                predicate:(inv_date_sk is not null and inv_item_sk is not null and inv_quantity_on_hand BETWEEN 100 AND 500)
-                                TableScan [TS_6] (rows=37584000 width=15)
-                                  default@inventory,inventory,Tbl:COMPLETE,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_quantity_on_hand"]
-                    <-Map 1 [SIMPLE_EDGE] vectorized
-                      SHUFFLE [RS_91]
-                        PartitionCols:_col0
-                        Select Operator [SEL_90] (rows=575995635 width=88)
-                          Output:["_col0"]
-                          Filter Operator [FIL_89] (rows=575995635 width=88)
-                            predicate:((ss_item_sk BETWEEN DynamicValue(RS_17_item_i_item_sk_min) AND DynamicValue(RS_17_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_17_item_i_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_18_inventory_inv_item_sk_min) AND DynamicValue(RS_18_inventory_inv_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_18_inventory_inv_item_sk_bloom_filter))) and ss_item_sk is not null)
-                            TableScan [TS_0] (rows=575995635 width=88)
-                              default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_item_sk"]
-                            <-Reducer 6 [BROADCAST_EDGE] vectorized
-                              BROADCAST [RS_80]
-                                Group By Operator [GBY_79] (rows=1 width=12)
-                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                <-Map 5 [CUSTOM_SIMPLE_EDGE] vectorized
-                                  PARTITION_ONLY_SHUFFLE [RS_78]
-                                    Group By Operator [GBY_77] (rows=1 width=12)
-                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                      Select Operator [SEL_76] (rows=25666 width=1436)
-                                        Output:["_col0"]
-                                         Please refer to the previous Select Operator [SEL_74]
-                            <-Reducer 9 [BROADCAST_EDGE] vectorized
-                              BROADCAST [RS_88]
-                                Group By Operator [GBY_87] (rows=1 width=12)
-                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=4593600)"]
-                                <-Reducer 8 [CUSTOM_SIMPLE_EDGE]
-                                  PARTITION_ONLY_SHUFFLE [RS_50]
-                                    Group By Operator [GBY_49] (rows=1 width=12)
-                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=4593600)"]
-                                      Select Operator [SEL_48] (rows=4593600 width=15)
-                                        Output:["_col0"]
-                                         Please refer to the previous Merge Join Operator [MERGEJOIN_71]
+                    Top N Key Operator [TNK_43] (rows=1267190424 width=88)
+                      keys:_col2, _col3, _col4,sort order:+++,top n:100
+                      Merge Join Operator [MERGEJOIN_73] (rows=1267190424 width=88)
+                        Conds:RS_92._col0=RS_76._col0(Inner),RS_76._col0=RS_18._col1(Inner),Output:["_col2","_col3","_col4"]
+                      <-Map 5 [SIMPLE_EDGE] vectorized
+                        PARTITION_ONLY_SHUFFLE [RS_76]
+                          PartitionCols:_col0
+                          Select Operator [SEL_75] (rows=25666 width=1436)
+                            Output:["_col0","_col1","_col2","_col3"]
+                            Filter Operator [FIL_74] (rows=25666 width=1436)
+                              predicate:((i_manufact_id) IN (437, 129, 727, 663) and i_current_price BETWEEN 30 AND 60 and i_item_sk is not null)
+                              TableScan [TS_3] (rows=462000 width=1436)
+                                default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc","i_current_price","i_manufact_id"]
+                      <-Reducer 8 [SIMPLE_EDGE]
+                        PARTITION_ONLY_SHUFFLE [RS_18]
+                          PartitionCols:_col1
+                          Merge Join Operator [MERGEJOIN_72] (rows=4593600 width=15)
+                            Conds:RS_84._col0=RS_87._col0(Inner),Output:["_col1"]
+                          <-Map 10 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_87]
+                              PartitionCols:_col0
+                              Select Operator [SEL_86] (rows=8116 width=1119)
+                                Output:["_col0"]
+                                Filter Operator [FIL_85] (rows=8116 width=1119)
+                                  predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2002-05-30 00:00:00' AND TIMESTAMP'2002-07-29 00:00:00' and d_date_sk is not null)
+                                  TableScan [TS_9] (rows=73049 width=1119)
+                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
+                          <-Map 7 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_84]
+                              PartitionCols:_col0
+                              Select Operator [SEL_83] (rows=4176000 width=15)
+                                Output:["_col0","_col1"]
+                                Filter Operator [FIL_82] (rows=4176000 width=15)
+                                  predicate:(inv_date_sk is not null and inv_item_sk is not null and inv_quantity_on_hand BETWEEN 100 AND 500)
+                                  TableScan [TS_6] (rows=37584000 width=15)
+                                    default@inventory,inventory,Tbl:COMPLETE,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_quantity_on_hand"]
+                      <-Map 1 [SIMPLE_EDGE] vectorized
+                        SHUFFLE [RS_92]
+                          PartitionCols:_col0
+                          Select Operator [SEL_91] (rows=575995635 width=88)
+                            Output:["_col0"]
+                            Filter Operator [FIL_90] (rows=575995635 width=88)
+                              predicate:((ss_item_sk BETWEEN DynamicValue(RS_17_item_i_item_sk_min) AND DynamicValue(RS_17_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_17_item_i_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_18_inventory_inv_item_sk_min) AND DynamicValue(RS_18_inventory_inv_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_18_inventory_inv_item_sk_bloom_filter))) and ss_item_sk is not null)
+                              TableScan [TS_0] (rows=575995635 width=88)
+                                default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_item_sk"]
+                              <-Reducer 6 [BROADCAST_EDGE] vectorized
+                                BROADCAST [RS_81]
+                                  Group By Operator [GBY_80] (rows=1 width=12)
+                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                  <-Map 5 [CUSTOM_SIMPLE_EDGE] vectorized
+                                    PARTITION_ONLY_SHUFFLE [RS_79]
+                                      Group By Operator [GBY_78] (rows=1 width=12)
+                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                        Select Operator [SEL_77] (rows=25666 width=1436)
+                                          Output:["_col0"]
+                                           Please refer to the previous Select Operator [SEL_75]
+                              <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                BROADCAST [RS_89]
+                                  Group By Operator [GBY_88] (rows=1 width=12)
+                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=4593600)"]
+                                  <-Reducer 8 [CUSTOM_SIMPLE_EDGE]
+                                    PARTITION_ONLY_SHUFFLE [RS_51]
+                                      Group By Operator [GBY_50] (rows=1 width=12)
+                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=4593600)"]
+                                        Select Operator [SEL_49] (rows=4593600 width=15)
+                                          Output:["_col0"]
+                                           Please refer to the previous Merge Join Operator [MERGEJOIN_72]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query99.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query99.q.out b/ql/src/test/results/clientpositive/perf/tez/query99.q.out
index b0f6a45..456fd8c 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query99.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query99.q.out
@@ -86,128 +86,130 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_141]
-        Limit [LIM_140] (rows=100 width=135)
+      File Output Operator [FS_142]
+        Limit [LIM_141] (rows=100 width=135)
           Number of rows:100
-          Select Operator [SEL_139] (rows=210822976 width=135)
+          Select Operator [SEL_140] (rows=210822976 width=135)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_138]
-              Select Operator [SEL_137] (rows=210822976 width=135)
+            SHUFFLE [RS_139]
+              Select Operator [SEL_138] (rows=210822976 width=135)
                 Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
-                Group By Operator [GBY_136] (rows=210822976 width=135)
+                Group By Operator [GBY_137] (rows=210822976 width=135)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)"],keys:KEY._col0, KEY._col1, KEY._col2
                 <-Reducer 5 [SIMPLE_EDGE]
                   SHUFFLE [RS_30]
                     PartitionCols:_col0, _col1, _col2
                     Group By Operator [GBY_29] (rows=421645953 width=135)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(_col3)","sum(_col4)","sum(_col5)","sum(_col6)","sum(_col7)"],keys:_col0, _col1, _col2
-                      Select Operator [SEL_27] (rows=421645953 width=135)
-                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-                        Merge Join Operator [MERGEJOIN_100] (rows=421645953 width=135)
-                          Conds:RS_24._col3=RS_127._col0(Inner),Output:["_col0","_col1","_col8","_col10","_col12"]
-                        <-Map 14 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_127]
-                            PartitionCols:_col0
-                            Select Operator [SEL_126] (rows=1 width=0)
-                              Output:["_col0","_col1"]
-                              Filter Operator [FIL_125] (rows=1 width=0)
-                                predicate:sm_ship_mode_sk is not null
-                                TableScan [TS_12] (rows=1 width=0)
-                                  default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_type"]
-                        <-Reducer 4 [SIMPLE_EDGE]
-                          SHUFFLE [RS_24]
-                            PartitionCols:_col3
-                            Merge Join Operator [MERGEJOIN_99] (rows=383314495 width=135)
-                              Conds:RS_21._col4=RS_119._col0(Inner),Output:["_col0","_col1","_col3","_col8","_col10"]
-                            <-Map 12 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_119]
-                                PartitionCols:_col0
-                                Select Operator [SEL_118] (rows=27 width=1029)
-                                  Output:["_col0","_col1"]
-                                  Filter Operator [FIL_117] (rows=27 width=1029)
-                                    predicate:w_warehouse_sk is not null
-                                    TableScan [TS_9] (rows=27 width=1029)
-                                      default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name"]
-                            <-Reducer 3 [SIMPLE_EDGE]
-                              SHUFFLE [RS_21]
-                                PartitionCols:_col4
-                                Merge Join Operator [MERGEJOIN_98] (rows=348467716 width=135)
-                                  Conds:RS_18._col2=RS_111._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col8"]
-                                <-Map 10 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_111]
-                                    PartitionCols:_col0
-                                    Select Operator [SEL_110] (rows=60 width=2045)
-                                      Output:["_col0","_col1"]
-                                      Filter Operator [FIL_109] (rows=60 width=2045)
-                                        predicate:cc_call_center_sk is not null
-                                        TableScan [TS_6] (rows=60 width=2045)
-                                          default@call_center,call_center,Tbl:COMPLETE,Col:NONE,Output:["cc_call_center_sk","cc_name"]
-                                <-Reducer 2 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_18]
-                                    PartitionCols:_col2
-                                    Merge Join Operator [MERGEJOIN_97] (rows=316788826 width=135)
-                                      Conds:RS_135._col1=RS_103._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4"]
-                                    <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_103]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_102] (rows=8116 width=1119)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_101] (rows=8116 width=1119)
-                                            predicate:(d_date_sk is not null and d_month_seq BETWEEN 1212 AND 1223)
-                                            TableScan [TS_3] (rows=73049 width=1119)
-                                              default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_month_seq"]
-                                    <-Map 1 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_135]
-                                        PartitionCols:_col1
-                                        Select Operator [SEL_134] (rows=287989836 width=135)
-                                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                                          Filter Operator [FIL_133] (rows=287989836 width=135)
-                                            predicate:((cs_call_center_sk BETWEEN DynamicValue(RS_19_call_center_cc_call_center_sk_min) AND DynamicValue(RS_19_call_center_cc_call_center_sk_max) and in_bloom_filter(cs_call_center_sk, DynamicValue(RS_19_call_center_cc_call_center_sk_bloom_filter))) and (cs_ship_date_sk BETWEEN DynamicValue(RS_16_date_dim_d_date_sk_min) AND DynamicValue(RS_16_date_dim_d_date_sk_max) and in_bloom_filter(cs_ship_date_sk, DynamicValue(RS_16_date_dim_d_date_sk_bloom_filter))) and (cs_ship_mode_sk BETWEEN DynamicValue(RS_25_ship_mode_sm_ship_mode_sk_min) AND DynamicValue(RS_25_ship_mode_sm_ship_mode_sk_max) and in_bloom_filter(cs_ship_mode_sk, DynamicValue(RS_25_ship_mode_sm_ship_mode_sk_bloom_filter))) and (cs_warehouse_sk BETWEEN DynamicValue(RS_22_warehouse_w_warehouse_sk_min) AND DynamicValue(RS_22_warehouse_w_warehouse_sk_max) and in_bloom_filter(cs_warehouse_sk, DynamicValue(RS_22_warehouse_w_warehouse_sk_bloom_filter))) and cs_call_center_sk is not n
 ull and cs_ship_date_sk is not null and cs_ship_mode_sk is not null and cs_warehouse_sk is not null)
-                                            TableScan [TS_0] (rows=287989836 width=135)
-                                              default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_ship_date_sk","cs_call_center_sk","cs_ship_mode_sk","cs_warehouse_sk"]
-                                            <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_116]
-                                                Group By Operator [GBY_115] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_114]
-                                                    Group By Operator [GBY_113] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_112] (rows=60 width=2045)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_110]
-                                            <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_124]
-                                                Group By Operator [GBY_123] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_122]
-                                                    Group By Operator [GBY_121] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_120] (rows=27 width=1029)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_118]
-                                            <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_132]
-                                                Group By Operator [GBY_131] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_130]
-                                                    Group By Operator [GBY_129] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_128] (rows=1 width=0)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_126]
-                                            <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_108]
-                                                Group By Operator [GBY_107] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  PARTITION_ONLY_SHUFFLE [RS_106]
-                                                    Group By Operator [GBY_105] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_104] (rows=8116 width=1119)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_102]
+                      Top N Key Operator [TNK_57] (rows=421645953 width=135)
+                        keys:_col0, _col1, _col2,sort order:+++,top n:100
+                        Select Operator [SEL_27] (rows=421645953 width=135)
+                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
+                          Merge Join Operator [MERGEJOIN_101] (rows=421645953 width=135)
+                            Conds:RS_24._col3=RS_128._col0(Inner),Output:["_col0","_col1","_col8","_col10","_col12"]
+                          <-Map 14 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_128]
+                              PartitionCols:_col0
+                              Select Operator [SEL_127] (rows=1 width=0)
+                                Output:["_col0","_col1"]
+                                Filter Operator [FIL_126] (rows=1 width=0)
+                                  predicate:sm_ship_mode_sk is not null
+                                  TableScan [TS_12] (rows=1 width=0)
+                                    default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_type"]
+                          <-Reducer 4 [SIMPLE_EDGE]
+                            SHUFFLE [RS_24]
+                              PartitionCols:_col3
+                              Merge Join Operator [MERGEJOIN_100] (rows=383314495 width=135)
+                                Conds:RS_21._col4=RS_120._col0(Inner),Output:["_col0","_col1","_col3","_col8","_col10"]
+                              <-Map 12 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_120]
+                                  PartitionCols:_col0
+                                  Select Operator [SEL_119] (rows=27 width=1029)
+                                    Output:["_col0","_col1"]
+                                    Filter Operator [FIL_118] (rows=27 width=1029)
+                                      predicate:w_warehouse_sk is not null
+                                      TableScan [TS_9] (rows=27 width=1029)
+                                        default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name"]
+                              <-Reducer 3 [SIMPLE_EDGE]
+                                SHUFFLE [RS_21]
+                                  PartitionCols:_col4
+                                  Merge Join Operator [MERGEJOIN_99] (rows=348467716 width=135)
+                                    Conds:RS_18._col2=RS_112._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col8"]
+                                  <-Map 10 [SIMPLE_EDGE] vectorized
+                                    SHUFFLE [RS_112]
+                                      PartitionCols:_col0
+                                      Select Operator [SEL_111] (rows=60 width=2045)
+                                        Output:["_col0","_col1"]
+                                        Filter Operator [FIL_110] (rows=60 width=2045)
+                                          predicate:cc_call_center_sk is not null
+                                          TableScan [TS_6] (rows=60 width=2045)
+                                            default@call_center,call_center,Tbl:COMPLETE,Col:NONE,Output:["cc_call_center_sk","cc_name"]
+                                  <-Reducer 2 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_18]
+                                      PartitionCols:_col2
+                                      Merge Join Operator [MERGEJOIN_98] (rows=316788826 width=135)
+                                        Conds:RS_136._col1=RS_104._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4"]
+                                      <-Map 8 [SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_104]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_103] (rows=8116 width=1119)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_102] (rows=8116 width=1119)
+                                              predicate:(d_date_sk is not null and d_month_seq BETWEEN 1212 AND 1223)
+                                              TableScan [TS_3] (rows=73049 width=1119)
+                                                default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_month_seq"]
+                                      <-Map 1 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_136]
+                                          PartitionCols:_col1
+                                          Select Operator [SEL_135] (rows=287989836 width=135)
+                                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                                            Filter Operator [FIL_134] (rows=287989836 width=135)
+                                              predicate:((cs_call_center_sk BETWEEN DynamicValue(RS_19_call_center_cc_call_center_sk_min) AND DynamicValue(RS_19_call_center_cc_call_center_sk_max) and in_bloom_filter(cs_call_center_sk, DynamicValue(RS_19_call_center_cc_call_center_sk_bloom_filter))) and (cs_ship_date_sk BETWEEN DynamicValue(RS_16_date_dim_d_date_sk_min) AND DynamicValue(RS_16_date_dim_d_date_sk_max) and in_bloom_filter(cs_ship_date_sk, DynamicValue(RS_16_date_dim_d_date_sk_bloom_filter))) and (cs_ship_mode_sk BETWEEN DynamicValue(RS_25_ship_mode_sm_ship_mode_sk_min) AND DynamicValue(RS_25_ship_mode_sm_ship_mode_sk_max) and in_bloom_filter(cs_ship_mode_sk, DynamicValue(RS_25_ship_mode_sm_ship_mode_sk_bloom_filter))) and (cs_warehouse_sk BETWEEN DynamicValue(RS_22_warehouse_w_warehouse_sk_min) AND DynamicValue(RS_22_warehouse_w_warehouse_sk_max) and in_bloom_filter(cs_warehouse_sk, DynamicValue(RS_22_warehouse_w_warehouse_sk_bloom_filter))) and cs_call_center_sk is not
  null and cs_ship_date_sk is not null and cs_ship_mode_sk is not null and cs_warehouse_sk is not null)
+                                              TableScan [TS_0] (rows=287989836 width=135)
+                                                default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_ship_date_sk","cs_call_center_sk","cs_ship_mode_sk","cs_warehouse_sk"]
+                                              <-Reducer 11 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_117]
+                                                  Group By Operator [GBY_116] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_115]
+                                                      Group By Operator [GBY_114] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_113] (rows=60 width=2045)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_111]
+                                              <-Reducer 13 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_125]
+                                                  Group By Operator [GBY_124] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_123]
+                                                      Group By Operator [GBY_122] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_121] (rows=27 width=1029)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_119]
+                                              <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_133]
+                                                  Group By Operator [GBY_132] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_131]
+                                                      Group By Operator [GBY_130] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_129] (rows=1 width=0)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_127]
+                                              <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_109]
+                                                  Group By Operator [GBY_108] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    PARTITION_ONLY_SHUFFLE [RS_107]
+                                                      Group By Operator [GBY_106] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_105] (rows=8116 width=1119)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_103]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/tez/topnkey.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/topnkey.q.out b/ql/src/test/results/clientpositive/tez/topnkey.q.out
new file mode 100644
index 0000000..66b9191
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/topnkey.q.out
@@ -0,0 +1,162 @@
+PREHOOK: query: EXPLAIN
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+
+Stage-0
+  Fetch Operator
+    limit:5
+    Stage-1
+      Reducer 3
+      File Output Operator [FS_10]
+        Limit [LIM_9] (rows=5 width=95)
+          Number of rows:5
+          Select Operator [SEL_8] (rows=250 width=95)
+            Output:["_col0","_col1"]
+          <-Reducer 2 [SIMPLE_EDGE]
+            SHUFFLE [RS_7]
+              Group By Operator [GBY_5] (rows=250 width=95)
+                Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+              <-Map 1 [SIMPLE_EDGE]
+                SHUFFLE [RS_4]
+                  PartitionCols:_col0
+                  Group By Operator [GBY_3] (rows=250 width=95)
+                    Output:["_col0","_col1"],aggregations:["sum(_col1)"],keys:_col0
+                    Top N Key Operator [TNK_11] (rows=500 width=178)
+                      keys:_col0,sort order:+,top n:5
+                      Select Operator [SEL_1] (rows=500 width=178)
+                        Output:["_col0","_col1"]
+                        TableScan [TS_0] (rows=500 width=178)
+                          default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
+
+PREHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+0	0
+10	10
+100	200
+103	206
+104	208
+PREHOOK: query: EXPLAIN
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+
+Stage-0
+  Fetch Operator
+    limit:5
+    Stage-1
+      Reducer 3
+      File Output Operator [FS_9]
+        Limit [LIM_8] (rows=5 width=87)
+          Number of rows:5
+          Select Operator [SEL_7] (rows=250 width=87)
+            Output:["_col0"]
+          <-Reducer 2 [SIMPLE_EDGE]
+            SHUFFLE [RS_6]
+              Group By Operator [GBY_4] (rows=250 width=87)
+                Output:["_col0"],keys:KEY._col0
+              <-Map 1 [SIMPLE_EDGE]
+                SHUFFLE [RS_3]
+                  PartitionCols:_col0
+                  Group By Operator [GBY_2] (rows=250 width=87)
+                    Output:["_col0"],keys:key
+                    Top N Key Operator [TNK_10] (rows=500 width=87)
+                      keys:key,sort order:+,top n:5
+                      Select Operator [SEL_1] (rows=500 width=87)
+                        Output:["key"]
+                        TableScan [TS_0] (rows=500 width=87)
+                          default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
+
+PREHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+0
+10
+100
+103
+104
+PREHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+
+Stage-0
+  Fetch Operator
+    limit:5
+    Stage-1
+      Reducer 3
+      File Output Operator [FS_13]
+        Limit [LIM_12] (rows=5 width=178)
+          Number of rows:5
+          Select Operator [SEL_11] (rows=791 width=178)
+            Output:["_col0","_col1"]
+          <-Reducer 2 [SIMPLE_EDGE]
+            SHUFFLE [RS_10]
+              Select Operator [SEL_9] (rows=791 width=178)
+                Output:["_col0","_col1"]
+                Merge Join Operator [MERGEJOIN_28] (rows=791 width=178)
+                  Conds:RS_6._col0=RS_7._col0(Inner),Output:["_col0","_col2"]
+                <-Map 1 [SIMPLE_EDGE]
+                  SHUFFLE [RS_6]
+                    PartitionCols:_col0
+                    Select Operator [SEL_2] (rows=500 width=87)
+                      Output:["_col0"]
+                      Filter Operator [FIL_16] (rows=500 width=87)
+                        predicate:key is not null
+                        TableScan [TS_0] (rows=500 width=87)
+                          default@src,src1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
+                <-Map 4 [SIMPLE_EDGE]
+                  SHUFFLE [RS_7]
+                    PartitionCols:_col0
+                    Select Operator [SEL_5] (rows=500 width=178)
+                      Output:["_col0","_col1"]
+                      Filter Operator [FIL_17] (rows=500 width=178)
+                        predicate:key is not null
+                        TableScan [TS_3] (rows=500 width=178)
+                          default@src,src2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
+
+PREHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/tez/vector_topnkey.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_topnkey.q.out b/ql/src/test/results/clientpositive/tez/vector_topnkey.q.out
new file mode 100644
index 0000000..d6f7cc2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_topnkey.q.out
@@ -0,0 +1,162 @@
+PREHOOK: query: explain vectorization detail
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+
+Stage-0
+  Fetch Operator
+    limit:5
+    Stage-1
+      Reducer 3 vectorized
+      File Output Operator [FS_20]
+        Limit [LIM_19] (rows=5 width=95)
+          Number of rows:5
+          Select Operator [SEL_18] (rows=250 width=95)
+            Output:["_col0","_col1"]
+          <-Reducer 2 [SIMPLE_EDGE] vectorized
+            SHUFFLE [RS_17]
+              Group By Operator [GBY_16] (rows=250 width=95)
+                Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+              <-Map 1 [SIMPLE_EDGE] vectorized
+                SHUFFLE [RS_15]
+                  PartitionCols:_col0
+                  Group By Operator [GBY_14] (rows=250 width=95)
+                    Output:["_col0","_col1"],aggregations:["sum(_col1)"],keys:_col0
+                    Top N Key Operator [TNK_13] (rows=500 width=178)
+                      keys:_col0,sort order:+,top n:5
+                      Select Operator [SEL_12] (rows=500 width=178)
+                        Output:["_col0","_col1"]
+                        TableScan [TS_0] (rows=500 width=178)
+                          default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
+
+PREHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+0	0
+10	10
+100	200
+103	206
+104	208
+PREHOOK: query: explain vectorization detail
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+
+Stage-0
+  Fetch Operator
+    limit:5
+    Stage-1
+      Reducer 3 vectorized
+      File Output Operator [FS_19]
+        Limit [LIM_18] (rows=5 width=87)
+          Number of rows:5
+          Select Operator [SEL_17] (rows=250 width=87)
+            Output:["_col0"]
+          <-Reducer 2 [SIMPLE_EDGE] vectorized
+            SHUFFLE [RS_16]
+              Group By Operator [GBY_15] (rows=250 width=87)
+                Output:["_col0"],keys:KEY._col0
+              <-Map 1 [SIMPLE_EDGE] vectorized
+                SHUFFLE [RS_14]
+                  PartitionCols:_col0
+                  Group By Operator [GBY_13] (rows=250 width=87)
+                    Output:["_col0"],keys:key
+                    Top N Key Operator [TNK_12] (rows=500 width=87)
+                      keys:key,sort order:+,top n:5
+                      Select Operator [SEL_11] (rows=500 width=87)
+                        Output:["key"]
+                        TableScan [TS_0] (rows=500 width=87)
+                          default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
+
+PREHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+0
+10
+100
+103
+104
+PREHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+
+Stage-0
+  Fetch Operator
+    limit:5
+    Stage-1
+      Reducer 3 vectorized
+      File Output Operator [FS_37]
+        Limit [LIM_36] (rows=5 width=178)
+          Number of rows:5
+          Select Operator [SEL_35] (rows=791 width=178)
+            Output:["_col0","_col1"]
+          <-Reducer 2 [SIMPLE_EDGE]
+            SHUFFLE [RS_10]
+              Select Operator [SEL_9] (rows=791 width=178)
+                Output:["_col0","_col1"]
+                Merge Join Operator [MERGEJOIN_28] (rows=791 width=178)
+                  Conds:RS_31._col0=RS_34._col0(Inner),Output:["_col0","_col2"]
+                <-Map 1 [SIMPLE_EDGE] vectorized
+                  SHUFFLE [RS_31]
+                    PartitionCols:_col0
+                    Select Operator [SEL_30] (rows=500 width=87)
+                      Output:["_col0"]
+                      Filter Operator [FIL_29] (rows=500 width=87)
+                        predicate:key is not null
+                        TableScan [TS_0] (rows=500 width=87)
+                          default@src,src1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
+                <-Map 4 [SIMPLE_EDGE] vectorized
+                  SHUFFLE [RS_34]
+                    PartitionCols:_col0
+                    Select Operator [SEL_33] (rows=500 width=178)
+                      Output:["_col0","_col1"]
+                      Filter Operator [FIL_32] (rows=500 width=178)
+                        predicate:key is not null
+                        TableScan [TS_3] (rows=500 width=178)
+                          default@src,src2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
+
+PREHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/topnkey.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/topnkey.q.out b/ql/src/test/results/clientpositive/topnkey.q.out
new file mode 100644
index 0000000..31f3a70
--- /dev/null
+++ b/ql/src/test/results/clientpositive/topnkey.q.out
@@ -0,0 +1,301 @@
+PREHOOK: query: EXPLAIN
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: key (type: string), UDFToInteger(substr(value, 5)) (type: int)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                aggregations: sum(_col1)
+                keys: _col0 (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 475 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 5 Data size: 475 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0
+10	10
+100	200
+103	206
+104	208
+PREHOOK: query: EXPLAIN
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+              TopN Hash Memory Usage: 0.1
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0
+10
+100
+103
+104
+PREHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src1
+            filterExpr: key is not null (type: boolean)
+            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+          TableScan
+            alias: src2
+            filterExpr: key is not null (type: boolean)
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col2
+          Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: _col0 (type: string), _col2 (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/vector_topnkey.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_topnkey.q.out b/ql/src/test/results/clientpositive/vector_topnkey.q.out
new file mode 100644
index 0000000..ed829e2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_topnkey.q.out
@@ -0,0 +1,480 @@
+PREHOOK: query: explain vectorization detail
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+            TableScan Vectorization:
+                native: true
+                vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+            Select Operator
+              expressions: key (type: string), UDFToInteger(substr(value, 5)) (type: int)
+              outputColumnNames: _col0, _col1
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumnNums: [0, 4]
+                  selectExpressions: CastStringToLong(col 3:string)(children: StringSubstrColStart(col 1:string, start 4) -> 3:string) -> 4:int
+              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                aggregations: sum(_col1)
+                Group By Vectorization:
+                    aggregators: VectorUDAFSumLong(col 4:int) -> bigint
+                    className: VectorGroupByOperator
+                    groupByMode: HASH
+                    keyExpressions: col 0:string
+                    native: false
+                    vectorProcessingMode: HASH
+                    projectedOutputColumnNums: [0]
+                keys: _col0 (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Reduce Sink Vectorization:
+                      className: VectorReduceSinkOperator
+                      native: false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+                  Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+          inputFormatFeatureSupport: [DECIMAL_64]
+          featureSupportInUse: [DECIMAL_64]
+          inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+          rowBatchContext:
+              dataColumnCount: 2
+              includeColumns: [0, 1]
+              dataColumns: key:string, value:string
+              partitionColumnCount: 0
+              scratchColumnTypeNames: [string, bigint]
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            TableScan Vectorization:
+                native: true
+                vectorizationSchemaColumns: [0:_col0:string, 1:_col1:bigint]
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+              Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+          rowBatchContext:
+              dataColumnCount: 2
+              includeColumns: [0, 1]
+              dataColumns: _col0:string, _col1:bigint
+              partitionColumnCount: 0
+              scratchColumnTypeNames: []
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 475 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 5 Data size: 475 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0
+10	10
+100	200
+103	206
+104	208
+PREHOOK: query: explain vectorization detail
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            TableScan Vectorization:
+                native: true
+                vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: key
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumnNums: [0]
+              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                Group By Vectorization:
+                    className: VectorGroupByOperator
+                    groupByMode: HASH
+                    keyExpressions: col 0:string
+                    native: false
+                    vectorProcessingMode: HASH
+                    projectedOutputColumnNums: []
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Reduce Sink Vectorization:
+                      className: VectorReduceSinkOperator
+                      native: false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+                  Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+          inputFormatFeatureSupport: [DECIMAL_64]
+          featureSupportInUse: [DECIMAL_64]
+          inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+          rowBatchContext:
+              dataColumnCount: 2
+              includeColumns: [0]
+              dataColumns: key:string, value:string
+              partitionColumnCount: 0
+              scratchColumnTypeNames: []
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            TableScan Vectorization:
+                native: true
+                vectorizationSchemaColumns: [0:_col0:string]
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+              Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+              TopN Hash Memory Usage: 0.1
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+          rowBatchContext:
+              dataColumnCount: 1
+              includeColumns: [0]
+              dataColumns: _col0:string
+              partitionColumnCount: 0
+              scratchColumnTypeNames: []
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0
+10
+100
+103
+104
+PREHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src1
+            filterExpr: key is not null (type: boolean)
+            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+          TableScan
+            alias: src2
+            filterExpr: key is not null (type: boolean)
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: string)
+      Map Vectorization:
+          enabled: false
+          enabledConditionsNotMet: Vectorized map work only works with 1 TableScanOperator IS false
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col2
+          Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: _col0 (type: string), _col2 (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            TableScan Vectorization:
+                native: true
+                vectorizationSchemaColumns: [0:_col0:string, 1:_col1:string]
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+              Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: string)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+          rowBatchContext:
+              dataColumnCount: 2
+              includeColumns: [0, 1]
+              dataColumns: _col0:string, _col1:string
+              partitionColumnCount: 0
+              scratchColumnTypeNames: []
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
index 9393fb8..044fd16 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
@@ -941,6 +941,25 @@ public final class ObjectInspectorUtils {
     return 0;
   }
 
+  public static int compare(Object[] o1, ObjectInspector[] oi1, Object[] o2,
+                            ObjectInspector[] oi2, boolean[] columnSortOrderIsDesc) {
+    assert (o1.length == oi1.length);
+    assert (o2.length == oi2.length);
+    assert (o1.length == o2.length);
+
+    for (int i = 0; i < o1.length; i++) {
+      int r = compare(o1[i], oi1[i], o2[i], oi2[i]);
+      if (r != 0) {
+        if (columnSortOrderIsDesc[i]) {
+          return r;
+        } else {
+          return -r;
+        }
+      }
+    }
+    return 0;
+  }
+
   /**
    * Whether comparison is supported for this type.
    * Currently all types that references any map are not comparable.


[45/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 0000000,8ff056f..9bee0db
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@@ -1,0 -1,2532 +1,2532 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.cache;
+ 
+ 
+ import java.nio.ByteBuffer;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.EmptyStackException;
+ import java.util.HashMap;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Stack;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.ScheduledExecutorService;
+ import java.util.concurrent.ThreadFactory;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.regex.Matcher;
+ import java.util.regex.Pattern;
+ 
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.DatabaseName;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.Deadline;
+ import org.apache.hadoop.hive.metastore.FileMetadataHandler;
+ import org.apache.hadoop.hive.metastore.ObjectStore;
+ import org.apache.hadoop.hive.metastore.PartFilterExprUtil;
+ import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
+ import org.apache.hadoop.hive.metastore.RawStore;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.Warehouse;
 -import org.apache.hadoop.hive.metastore.api.AggrStats;
 -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 -import org.apache.hadoop.hive.metastore.api.Database;
 -import org.apache.hadoop.hive.metastore.api.FieldSchema;
 -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 -import org.apache.hadoop.hive.metastore.api.Function;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 -import org.apache.hadoop.hive.metastore.api.ISchema;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 -import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 -import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 -import org.apache.hadoop.hive.metastore.api.MetaException;
 -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 -import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 -import org.apache.hadoop.hive.metastore.api.Partition;
 -import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 -import org.apache.hadoop.hive.metastore.api.PrincipalType;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 -import org.apache.hadoop.hive.metastore.api.WMNullablePool;
 -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
++import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType;
+ import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator;
+ import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory;
 -import org.apache.hadoop.hive.metastore.api.Role;
 -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 -import org.apache.hadoop.hive.metastore.api.RuntimeStat;
 -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableMeta;
 -import org.apache.hadoop.hive.metastore.api.Type;
 -import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 -import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMMapping;
 -import org.apache.hadoop.hive.metastore.api.WMPool;
 -import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.hadoop.hive.metastore.utils.StringUtils;
+ import org.apache.thrift.TException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+ 
+ // TODO filter->expr
+ // TODO functionCache
+ // TODO constraintCache
+ // TODO need sd nested copy?
+ // TODO String intern
+ // TODO monitor event queue
+ // TODO initial load slow?
+ // TODO size estimation
+ 
+ public class CachedStore implements RawStore, Configurable {
+   private static ScheduledExecutorService cacheUpdateMaster = null;
+   private static List<Pattern> whitelistPatterns = null;
+   private static List<Pattern> blacklistPatterns = null;
+   // Default value set to 100 milliseconds for test purpose
+   private static long DEFAULT_CACHE_REFRESH_PERIOD = 100;
+   // Time after which metastore cache is updated from metastore DB by the background update thread
+   private static long cacheRefreshPeriodMS = DEFAULT_CACHE_REFRESH_PERIOD;
+   private static AtomicBoolean isCachePrewarmed = new AtomicBoolean(false);
+   private static TablesPendingPrewarm tblsPendingPrewarm = new TablesPendingPrewarm();
+   private RawStore rawStore = null;
+   private Configuration conf;
+   private PartitionExpressionProxy expressionProxy = null;
+   private static final SharedCache sharedCache = new SharedCache();
+ 
+   static final private Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName());
+ 
+   @Override
+   public void setConf(Configuration conf) {
+     setConfInternal(conf);
+     initBlackListWhiteList(conf);
+     initSharedCache(conf);
+     startCacheUpdateService(conf, false, true);
+   }
+ 
+   /**
+    * Similar to setConf but used from within the tests
+    * This does start the background thread for prewarm and update
+    * @param conf
+    */
+   void setConfForTest(Configuration conf) {
+     setConfInternal(conf);
+     initBlackListWhiteList(conf);
+     initSharedCache(conf);
+   }
+ 
+   private void setConfInternal(Configuration conf) {
+     String rawStoreClassName =
+         MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName());
+     if (rawStore == null) {
+       try {
+         rawStore = (JavaUtils.getClass(rawStoreClassName, RawStore.class)).newInstance();
+       } catch (Exception e) {
+         throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
+       }
+     }
+     rawStore.setConf(conf);
+     Configuration oldConf = this.conf;
+     this.conf = conf;
+     if (expressionProxy != null && conf != oldConf) {
+       LOG.warn("Unexpected setConf when we were already configured");
+     } else {
+       expressionProxy = PartFilterExprUtil.createExpressionProxy(conf);
+     }
+   }
+ 
+   private void initSharedCache(Configuration conf) {
+     long maxSharedCacheSizeInBytes =
+         MetastoreConf.getSizeVar(conf, ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY);
+     sharedCache.initialize(maxSharedCacheSizeInBytes);
+     if (maxSharedCacheSizeInBytes > 0) {
+       LOG.info("Maximum memory that the cache will use: {} GB",
+           maxSharedCacheSizeInBytes / (1024 * 1024 * 1024));
+     }
+   }
+ 
+   @VisibleForTesting
+   /**
+    * This initializes the caches in SharedCache by getting the objects from Metastore DB via
+    * ObjectStore and populating the respective caches
+    */
+   static void prewarm(RawStore rawStore) {
+     if (isCachePrewarmed.get()) {
+       return;
+     }
+     long startTime = System.nanoTime();
+     LOG.info("Prewarming CachedStore");
+     while (!isCachePrewarmed.get()) {
+       // Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy
+       Deadline.registerIfNot(1000000);
+       Collection<String> catalogsToCache;
+       try {
+         catalogsToCache = catalogsToCache(rawStore);
+         LOG.info("Going to cache catalogs: "
+             + org.apache.commons.lang.StringUtils.join(catalogsToCache, ", "));
+         List<Catalog> catalogs = new ArrayList<>(catalogsToCache.size());
+         for (String catName : catalogsToCache) {
+           catalogs.add(rawStore.getCatalog(catName));
+         }
+         sharedCache.populateCatalogsInCache(catalogs);
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.warn("Failed to populate catalogs in cache, going to try again", e);
+         // try again
+         continue;
+       }
+       LOG.info("Finished prewarming catalogs, starting on databases");
+       List<Database> databases = new ArrayList<>();
+       for (String catName : catalogsToCache) {
+         try {
+           List<String> dbNames = rawStore.getAllDatabases(catName);
+           LOG.info("Number of databases to prewarm in catalog {}: {}", catName, dbNames.size());
+           for (String dbName : dbNames) {
+             try {
+               databases.add(rawStore.getDatabase(catName, dbName));
+             } catch (NoSuchObjectException e) {
+               // Continue with next database
+               LOG.warn("Failed to cache database "
+                   + DatabaseName.getQualified(catName, dbName) + ", moving on", e);
+             }
+           }
+         } catch (MetaException e) {
+           LOG.warn("Failed to cache databases in catalog " + catName + ", moving on", e);
+         }
+       }
+       sharedCache.populateDatabasesInCache(databases);
+       LOG.info(
+           "Databases cache is now prewarmed. Now adding tables, partitions and statistics to the cache");
+       int numberOfDatabasesCachedSoFar = 0;
+       for (Database db : databases) {
+         String catName = StringUtils.normalizeIdentifier(db.getCatalogName());
+         String dbName = StringUtils.normalizeIdentifier(db.getName());
+         List<String> tblNames;
+         try {
+           tblNames = rawStore.getAllTables(catName, dbName);
+         } catch (MetaException e) {
+           LOG.warn("Failed to cache tables for database "
+               + DatabaseName.getQualified(catName, dbName) + ", moving on");
+           // Continue with next database
+           continue;
+         }
+         tblsPendingPrewarm.addTableNamesForPrewarming(tblNames);
+         int totalTablesToCache = tblNames.size();
+         int numberOfTablesCachedSoFar = 0;
+         while (tblsPendingPrewarm.hasMoreTablesToPrewarm()) {
+           try {
+             String tblName =
+                 StringUtils.normalizeIdentifier(tblsPendingPrewarm.getNextTableNameToPrewarm());
+             if (!shouldCacheTable(catName, dbName, tblName)) {
+               continue;
+             }
+             Table table;
+             try {
+               table = rawStore.getTable(catName, dbName, tblName);
+             } catch (MetaException e) {
+               // It is possible the table is deleted during fetching tables of the database,
+               // in that case, continue with the next table
+               continue;
+             }
+             List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+             try {
+               ColumnStatistics tableColStats = null;
+               List<Partition> partitions = null;
+               List<ColumnStatistics> partitionColStats = null;
+               AggrStats aggrStatsAllPartitions = null;
+               AggrStats aggrStatsAllButDefaultPartition = null;
+               if (table.isSetPartitionKeys()) {
+                 Deadline.startTimer("getPartitions");
+                 partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE);
+                 Deadline.stopTimer();
+                 List<String> partNames = new ArrayList<>(partitions.size());
+                 for (Partition p : partitions) {
+                   partNames.add(Warehouse.makePartName(table.getPartitionKeys(), p.getValues()));
+                 }
+                 if (!partNames.isEmpty()) {
+                   // Get partition column stats for this table
+                   Deadline.startTimer("getPartitionColumnStatistics");
+                   partitionColStats = rawStore.getPartitionColumnStatistics(catName, dbName,
+                       tblName, partNames, colNames);
+                   Deadline.stopTimer();
+                   // Get aggregate stats for all partitions of a table and for all but default
+                   // partition
+                   Deadline.startTimer("getAggrPartitionColumnStatistics");
+                   aggrStatsAllPartitions =
+                       rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+                   Deadline.stopTimer();
+                   // Remove default partition from partition names and get aggregate
+                   // stats again
+                   List<FieldSchema> partKeys = table.getPartitionKeys();
+                   String defaultPartitionValue =
+                       MetastoreConf.getVar(rawStore.getConf(), ConfVars.DEFAULTPARTITIONNAME);
+                   List<String> partCols = new ArrayList<>();
+                   List<String> partVals = new ArrayList<>();
+                   for (FieldSchema fs : partKeys) {
+                     partCols.add(fs.getName());
+                     partVals.add(defaultPartitionValue);
+                   }
+                   String defaultPartitionName = FileUtils.makePartName(partCols, partVals);
+                   partNames.remove(defaultPartitionName);
+                   Deadline.startTimer("getAggrPartitionColumnStatistics");
+                   aggrStatsAllButDefaultPartition =
+                       rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+                   Deadline.stopTimer();
+                 }
+               } else {
+                 Deadline.startTimer("getTableColumnStatistics");
+                 tableColStats =
+                     rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
+                 Deadline.stopTimer();
+               }
++              // TODO## should this take write ID into account? or at least cache write ID to verify?
+               // If the table could not cached due to memory limit, stop prewarm
+               boolean isSuccess = sharedCache.populateTableInCache(table, tableColStats, partitions,
+                   partitionColStats, aggrStatsAllPartitions, aggrStatsAllButDefaultPartition);
+               if (isSuccess) {
+                 LOG.trace("Cached Database: {}'s Table: {}.", dbName, tblName);
+               } else {
+                 LOG.info(
+                     "Unable to cache Database: {}'s Table: {}, since the cache memory is full. "
+                         + "Will stop attempting to cache any more tables.",
+                     dbName, tblName);
+                 completePrewarm(startTime);
+                 return;
+               }
+             } catch (MetaException | NoSuchObjectException e) {
+               // Continue with next table
+               continue;
+             }
+             LOG.debug("Processed database: {}'s table: {}. Cached {} / {}  tables so far.", dbName,
+                 tblName, ++numberOfTablesCachedSoFar, totalTablesToCache);
+           } catch (EmptyStackException e) {
+             // We've prewarmed this database, continue with the next one
+             continue;
+           }
+         }
+         LOG.debug("Processed database: {}. Cached {} / {} databases so far.", dbName,
+             ++numberOfDatabasesCachedSoFar, databases.size());
+       }
+       completePrewarm(startTime);
+     }
+   }
+ 
+   private static void completePrewarm(long startTime) {
+     isCachePrewarmed.set(true);
+     LOG.info("CachedStore initialized");
+     long endTime = System.nanoTime();
+     LOG.info("Time taken in prewarming = " + (endTime - startTime) / 1000000 + "ms");
+     sharedCache.completeTableCachePrewarm();
+   }
+ 
+   static class TablesPendingPrewarm {
+     private Stack<String> tableNames = new Stack<>();
+ 
+     private synchronized void addTableNamesForPrewarming(List<String> tblNames) {
+       tableNames.clear();
+       if (tblNames != null) {
+         tableNames.addAll(tblNames);
+       }
+     }
+ 
+     private synchronized boolean hasMoreTablesToPrewarm() {
+       return !tableNames.empty();
+     }
+ 
+     private synchronized String getNextTableNameToPrewarm() {
+       return tableNames.pop();
+     }
+ 
+     private synchronized void prioritizeTableForPrewarm(String tblName) {
+       // If the table is in the pending prewarm list, move it to the top
+       if (tableNames.remove(tblName)) {
+         tableNames.push(tblName);
+       }
+     }
+   }
+ 
+   @VisibleForTesting
+   static void setCachePrewarmedState(boolean state) {
+     isCachePrewarmed.set(state);
+   }
+ 
+   private static void initBlackListWhiteList(Configuration conf) {
+     if (whitelistPatterns == null || blacklistPatterns == null) {
+       whitelistPatterns = createPatterns(MetastoreConf.getAsString(conf,
+           MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_WHITELIST));
+       blacklistPatterns = createPatterns(MetastoreConf.getAsString(conf,
+           MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST));
+     }
+   }
+ 
+   private static Collection<String> catalogsToCache(RawStore rs) throws MetaException {
+     Collection<String> confValue =
+         MetastoreConf.getStringCollection(rs.getConf(), ConfVars.CATALOGS_TO_CACHE);
+     if (confValue == null || confValue.isEmpty() ||
+         (confValue.size() == 1 && confValue.contains(""))) {
+       return rs.getCatalogs();
+     } else {
+       return confValue;
+     }
+   }
+ 
+   @VisibleForTesting
+   /**
+    * This starts a background thread, which initially populates the SharedCache and later
+    * periodically gets updates from the metastore db
+    *
+    * @param conf
+    * @param runOnlyOnce
+    * @param shouldRunPrewarm
+    */
+   static synchronized void startCacheUpdateService(Configuration conf, boolean runOnlyOnce,
+       boolean shouldRunPrewarm) {
+     if (cacheUpdateMaster == null) {
+       initBlackListWhiteList(conf);
+       if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) {
+         cacheRefreshPeriodMS = MetastoreConf.getTimeVar(conf,
+             ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS);
+       }
+       LOG.info("CachedStore: starting cache update service (run every {} ms", cacheRefreshPeriodMS);
+       cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() {
+         @Override
+         public Thread newThread(Runnable r) {
+           Thread t = Executors.defaultThreadFactory().newThread(r);
+           t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId());
+           t.setDaemon(true);
+           return t;
+         }
+       });
+       if (!runOnlyOnce) {
+         cacheUpdateMaster.scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0,
+             cacheRefreshPeriodMS, TimeUnit.MILLISECONDS);
+       }
+     }
+     if (runOnlyOnce) {
+       // Some tests control the execution of the background update thread
+       cacheUpdateMaster.schedule(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0,
+           TimeUnit.MILLISECONDS);
+     }
+   }
+ 
+   @VisibleForTesting
+   static synchronized boolean stopCacheUpdateService(long timeout) {
+     boolean tasksStoppedBeforeShutdown = false;
+     if (cacheUpdateMaster != null) {
+       LOG.info("CachedStore: shutting down cache update service");
+       try {
+         tasksStoppedBeforeShutdown =
+             cacheUpdateMaster.awaitTermination(timeout, TimeUnit.MILLISECONDS);
+       } catch (InterruptedException e) {
+         LOG.info("CachedStore: cache update service was interrupted while waiting for tasks to "
+             + "complete before shutting down. Will make a hard stop now.");
+       }
+       cacheUpdateMaster.shutdownNow();
+       cacheUpdateMaster = null;
+     }
+     return tasksStoppedBeforeShutdown;
+   }
+ 
+   @VisibleForTesting
+   static void setCacheRefreshPeriod(long time) {
+     cacheRefreshPeriodMS = time;
+   }
+ 
+   static class CacheUpdateMasterWork implements Runnable {
+     private boolean shouldRunPrewarm = true;
+     private final RawStore rawStore;
+ 
+     CacheUpdateMasterWork(Configuration conf, boolean shouldRunPrewarm) {
+       this.shouldRunPrewarm = shouldRunPrewarm;
+       String rawStoreClassName =
+           MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName());
+       try {
+         rawStore = JavaUtils.getClass(rawStoreClassName, RawStore.class).newInstance();
+         rawStore.setConf(conf);
+       } catch (InstantiationException | IllegalAccessException | MetaException e) {
+         // MetaException here really means ClassNotFound (see the utility method).
+         // So, if any of these happen, that means we can never succeed.
+         throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
+       }
+     }
+ 
+     @Override
+     public void run() {
+       if (!shouldRunPrewarm) {
+         // TODO: prewarm and update can probably be merged.
+         update();
+       } else {
+         try {
+           prewarm(rawStore);
+         } catch (Exception e) {
+           LOG.error("Prewarm failure", e);
+           return;
+         }
+       }
+     }
+ 
+     void update() {
+       Deadline.registerIfNot(1000000);
+       LOG.debug("CachedStore: updating cached objects");
+       try {
+         for (String catName : catalogsToCache(rawStore)) {
+           List<String> dbNames = rawStore.getAllDatabases(catName);
+           // Update the database in cache
+           updateDatabases(rawStore, catName, dbNames);
+           for (String dbName : dbNames) {
+             // Update the tables in cache
+             updateTables(rawStore, catName, dbName);
+             List<String> tblNames;
+             try {
+               tblNames = rawStore.getAllTables(catName, dbName);
+             } catch (MetaException e) {
+               // Continue with next database
+               continue;
+             }
+             for (String tblName : tblNames) {
+               if (!shouldCacheTable(catName, dbName, tblName)) {
+                 continue;
+               }
+               // Update the table column stats for a table in cache
+               updateTableColStats(rawStore, catName, dbName, tblName);
+               // Update the partitions for a table in cache
+               updateTablePartitions(rawStore, catName, dbName, tblName);
+               // Update the partition col stats for a table in cache
+               updateTablePartitionColStats(rawStore, catName, dbName, tblName);
+               // Update aggregate partition column stats for a table in cache
+               updateTableAggregatePartitionColStats(rawStore, catName, dbName, tblName);
+             }
+           }
+       }
+       sharedCache.incrementUpdateCount();
+       } catch (MetaException e) {
+         LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e);
+       }
+     }
+ 
+ 
+     private void updateDatabases(RawStore rawStore, String catName, List<String> dbNames) {
+       // Prepare the list of databases
+       List<Database> databases = new ArrayList<>();
+       for (String dbName : dbNames) {
+         Database db;
+         try {
+           db = rawStore.getDatabase(catName, dbName);
+           databases.add(db);
+         } catch (NoSuchObjectException e) {
+           LOG.info("Updating CachedStore: database - " + catName + "." + dbName
+               + " does not exist.", e);
+         }
+       }
+       sharedCache.refreshDatabasesInCache(databases);
+     }
+ 
+     private void updateTables(RawStore rawStore, String catName, String dbName) {
+       List<Table> tables = new ArrayList<>();
+       try {
+         List<String> tblNames = rawStore.getAllTables(catName, dbName);
+         for (String tblName : tblNames) {
+           if (!shouldCacheTable(catName, dbName, tblName)) {
+             continue;
+           }
+           Table table = rawStore.getTable(StringUtils.normalizeIdentifier(catName),
+               StringUtils.normalizeIdentifier(dbName),
+               StringUtils.normalizeIdentifier(tblName));
+           tables.add(table);
+         }
+         sharedCache.refreshTablesInCache(catName, dbName, tables);
+       } catch (MetaException e) {
+         LOG.debug("Unable to refresh cached tables for database: " + dbName, e);
+       }
+     }
+ 
+ 
+     private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) {
+       try {
+         Table table = rawStore.getTable(catName, dbName, tblName);
+         if (!table.isSetPartitionKeys()) {
+           List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+           Deadline.startTimer("getTableColumnStatistics");
++          // TODO## should this take write ID into account? or at least cache write ID to verify?
+           ColumnStatistics tableColStats =
+               rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
+           Deadline.stopTimer();
+           if (tableColStats != null) {
++            // TODO## should this take write ID into account? or at least cache write ID to verify?
+             sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName),
+                 StringUtils.normalizeIdentifier(dbName),
+                 StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj());
+           }
+         }
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Unable to refresh table column stats for table: " + tblName, e);
+       }
+     }
+ 
+     private void updateTablePartitions(RawStore rawStore, String catName, String dbName, String tblName) {
+       try {
+         Deadline.startTimer("getPartitions");
+         List<Partition> partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE);
+         Deadline.stopTimer();
+         sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName),
+             StringUtils.normalizeIdentifier(dbName),
+             StringUtils.normalizeIdentifier(tblName), partitions);
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
+       }
+     }
+ 
+     private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) {
+       try {
+         Table table = rawStore.getTable(catName, dbName, tblName);
+         List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+         List<String> partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
+         // Get partition column stats for this table
+         Deadline.startTimer("getPartitionColumnStatistics");
++        // TODO## should this take write ID into account? or at least cache write ID to verify?
+         List<ColumnStatistics> partitionColStats =
+             rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
+         Deadline.stopTimer();
+         sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats);
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
+       }
+     }
+ 
+     // Update cached aggregate stats for all partitions of a table and for all
+     // but default partition
+     private void updateTableAggregatePartitionColStats(RawStore rawStore, String catName, String dbName,
+                                                        String tblName) {
+       try {
+         Table table = rawStore.getTable(catName, dbName, tblName);
+         List<String> partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
+         List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+         if ((partNames != null) && (partNames.size() > 0)) {
+           Deadline.startTimer("getAggregareStatsForAllPartitions");
+           AggrStats aggrStatsAllPartitions =
+               rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+           Deadline.stopTimer();
+           // Remove default partition from partition names and get aggregate stats again
+           List<FieldSchema> partKeys = table.getPartitionKeys();
+           String defaultPartitionValue =
+               MetastoreConf.getVar(rawStore.getConf(), ConfVars.DEFAULTPARTITIONNAME);
+           List<String> partCols = new ArrayList<String>();
+           List<String> partVals = new ArrayList<String>();
+           for (FieldSchema fs : partKeys) {
+             partCols.add(fs.getName());
+             partVals.add(defaultPartitionValue);
+           }
+           String defaultPartitionName = FileUtils.makePartName(partCols, partVals);
+           partNames.remove(defaultPartitionName);
+           Deadline.startTimer("getAggregareStatsForAllPartitionsExceptDefault");
+           AggrStats aggrStatsAllButDefaultPartition =
+               rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+           Deadline.stopTimer();
+           sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName),
+               StringUtils.normalizeIdentifier(dbName),
+               StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions,
+               aggrStatsAllButDefaultPartition);
+         }
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Updating CachedStore: unable to read aggregate column stats of table: " + tblName,
+             e);
+       }
+     }
+   }
+ 
+   @Override
+   public Configuration getConf() {
+     return rawStore.getConf();
+   }
+ 
+   @Override
+   public void shutdown() {
+     rawStore.shutdown();
+   }
+ 
+   @Override
+   public boolean openTransaction() {
+     return rawStore.openTransaction();
+   }
+ 
+   @Override
+   public boolean commitTransaction() {
+     return rawStore.commitTransaction();
+   }
+ 
+   @Override
+   public boolean isActiveTransaction() {
+     return rawStore.isActiveTransaction();
+   }
+ 
+   @Override
+   public void rollbackTransaction() {
+     rawStore.rollbackTransaction();
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+     rawStore.createCatalog(cat);
+     sharedCache.addCatalogToCache(cat);
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat) throws MetaException,
+       InvalidOperationException {
+     rawStore.alterCatalog(catName, cat);
+     sharedCache.alterCatalogInCache(StringUtils.normalizeIdentifier(catName), cat);
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     if (!sharedCache.isCatalogCachePrewarmed()) {
+       return rawStore.getCatalog(catalogName);
+     }
+     Catalog cat = sharedCache.getCatalogFromCache(normalizeIdentifier(catalogName));
+     if (cat == null) {
+       throw new NoSuchObjectException();
+     }
+     return cat;
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     if (!sharedCache.isCatalogCachePrewarmed()) {
+       return rawStore.getCatalogs();
+     }
+     return sharedCache.listCachedCatalogs();
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     rawStore.dropCatalog(catalogName);
+     catalogName = catalogName.toLowerCase();
+     sharedCache.removeCatalogFromCache(catalogName);
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+     rawStore.createDatabase(db);
+     sharedCache.addDatabaseToCache(db);
+   }
+ 
+   @Override
+   public Database getDatabase(String catName, String dbName) throws NoSuchObjectException {
+     if (!sharedCache.isDatabaseCachePrewarmed()) {
+       return rawStore.getDatabase(catName, dbName);
+     }
+     dbName = dbName.toLowerCase();
+     Database db = sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(catName),
+             StringUtils.normalizeIdentifier(dbName));
+     if (db == null) {
+       throw new NoSuchObjectException();
+     }
+     return db;
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException {
+     boolean succ = rawStore.dropDatabase(catName, dbName);
+     if (succ) {
+       sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(catName),
+           StringUtils.normalizeIdentifier(dbName));
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean alterDatabase(String catName, String dbName, Database db)
+       throws NoSuchObjectException, MetaException {
+     boolean succ = rawStore.alterDatabase(catName, dbName, db);
+     if (succ) {
+       sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(catName),
+           StringUtils.normalizeIdentifier(dbName), db);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+     if (!sharedCache.isDatabaseCachePrewarmed()) {
+       return rawStore.getDatabases(catName, pattern);
+     }
+     return sharedCache.listCachedDatabases(catName, pattern);
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+     if (!sharedCache.isDatabaseCachePrewarmed()) {
+       return rawStore.getAllDatabases(catName);
+     }
+     return sharedCache.listCachedDatabases(catName);
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+     return rawStore.createType(type);
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+     return rawStore.getType(typeName);
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+     return rawStore.dropType(typeName);
+   }
+ 
+   private void validateTableType(Table tbl) {
+     // If the table has property EXTERNAL set, update table type
+     // accordingly
+     String tableType = tbl.getTableType();
+     boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL"));
+     if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
+       if (isExternal) {
+         tableType = TableType.EXTERNAL_TABLE.toString();
+       }
+     }
+     if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
+       if (!isExternal) {
+         tableType = TableType.MANAGED_TABLE.toString();
+       }
+     }
+     tbl.setTableType(tableType);
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     rawStore.createTable(tbl);
+     String catName = normalizeIdentifier(tbl.getCatName());
+     String dbName = normalizeIdentifier(tbl.getDbName());
+     String tblName = normalizeIdentifier(tbl.getTableName());
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
+     validateTableType(tbl);
+     sharedCache.addTableToCache(catName, dbName, tblName, tbl);
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tblName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.dropTable(catName, dbName, tblName);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removeTableFromCache(catName, dbName, tblName);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public Table getTable(String catName, String dbName, String tblName) throws MetaException {
++    return getTable(catName, dbName, tblName, -1, null);
++  }
++
++  // TODO: if writeIdList is not null, check isolation level compliance for SVS,
++  // possibly with getTableFromCache() with table snapshot in cache.
++  @Override
++  public Table getTable(String catName, String dbName, String tblName,
++                        long txnId, String writeIdList)
++      throws MetaException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      return rawStore.getTable(catName, dbName, tblName);
++      return rawStore.getTable(catName, dbName, tblName, txnId,writeIdList);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
 -    if (tbl == null) {
++    if (tbl == null || writeIdList != null) {
+       // This table is not yet loaded in cache
+       // If the prewarm thread is working on this table's database,
+       // let's move this table to the top of tblNamesBeingPrewarmed stack,
+       // so that it gets loaded to the cache faster and is available for subsequent requests
+       tblsPendingPrewarm.prioritizeTableForPrewarm(tblName);
 -      return rawStore.getTable(catName, dbName, tblName);
++      return rawStore.getTable(catName, dbName, tblName, txnId, writeIdList);
+     }
+     if (tbl != null) {
+       tbl.unsetPrivileges();
+       tbl.setRewriteEnabled(tbl.isRewriteEnabled());
+     }
+     return tbl;
+   }
+ 
+   @Override
+   public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartition(part);
+     if (succ) {
+       String dbName = normalizeIdentifier(part.getDbName());
+       String tblName = normalizeIdentifier(part.getTableName());
+       String catName = part.isSetCatName() ? normalizeIdentifier(part.getCatName()) : DEFAULT_CATALOG_NAME;
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.addPartitionToCache(catName, dbName, tblName, part);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartitions(catName, dbName, tblName, parts);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.addPartitionsToCache(catName, dbName, tblName, parts);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec,
+       boolean ifNotExists) throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
+       while (iterator.hasNext()) {
+         Partition part = iterator.next();
+         sharedCache.addPartitionToCache(catName, dbName, tblName, part);
+       }
+     }
+     return succ;
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tblName, List<String> part_vals)
+       throws MetaException, NoSuchObjectException {
++    return getPartition(catName, dbName, tblName, part_vals, -1, null);
++  }
++
++  // TODO: the same as getTable()
++  @Override
++  public Partition getPartition(String catName, String dbName, String tblName,
++                                List<String> part_vals, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      return rawStore.getPartition(catName, dbName, tblName, part_vals);
++      return rawStore.getPartition(
++          catName, dbName, tblName, part_vals, txnId, writeIdList);
+     }
+     Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals);
 -    if (part == null) {
++    if (part == null || writeIdList != null) {
+       // The table containing the partition is not yet loaded in cache
 -      return rawStore.getPartition(catName, dbName, tblName, part_vals);
++      return rawStore.getPartition(
++          catName, dbName, tblName, part_vals, txnId, writeIdList);
+     }
+     return part;
+   }
+ 
+   @Override
+   public boolean doesPartitionExist(String catName, String dbName, String tblName,
+       List<FieldSchema> partKeys, List<String> part_vals)
+       throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table containing the partition is not yet loaded in cache
+       return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals);
+     }
+     return sharedCache.existPartitionFromCache(catName, dbName, tblName, part_vals);
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String dbName, String tblName, List<String> part_vals)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.dropPartition(catName, dbName, tblName, part_vals);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removePartitionFromCache(catName, dbName, tblName, part_vals);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public void dropPartitions(String catName, String dbName, String tblName, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     rawStore.dropPartitions(catName, dbName, tblName, partNames);
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
+     List<List<String>> partVals = new ArrayList<>();
+     for (String partName : partNames) {
+       partVals.add(partNameToVals(partName));
+     }
+     sharedCache.removePartitionsFromCache(catName, dbName, tblName, partVals);
+   }
+ 
+   @Override
+   public List<Partition> getPartitions(String catName, String dbName, String tblName, int max)
+       throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitions(catName, dbName, tblName, max);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table containing the partitions is not yet loaded in cache
+       return rawStore.getPartitions(catName, dbName, tblName, max);
+     }
+     List<Partition> parts = sharedCache.listCachedPartitions(catName, dbName, tblName, max);
+     return parts;
+   }
+ 
+   @Override
+   public Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max) {
+     return rawStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max);
+   }
+ 
+   @Override
 -  public void alterTable(String catName, String dbName, String tblName, Table newTable)
 -      throws InvalidObjectException, MetaException {
 -    rawStore.alterTable(catName, dbName, tblName, newTable);
++  public void alterTable(String catName, String dbName, String tblName, Table newTable,
++      long txnId, String validWriteIds) throws InvalidObjectException, MetaException {
++    rawStore.alterTable(catName, dbName, tblName, newTable, txnId, validWriteIds);
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tblName = normalizeIdentifier(tblName);
+     String newTblName = normalizeIdentifier(newTable.getTableName());
+     if (!shouldCacheTable(catName, dbName, tblName) &&
+         !shouldCacheTable(catName, dbName, newTblName)) {
+       return;
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table is not yet loaded in cache
+       return;
+     }
+     if (shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) {
+       // If old table is in the cache and the new table can also be cached
+       sharedCache.alterTableInCache(catName, dbName, tblName, newTable);
+     } else if (!shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) {
+       // If old table is *not* in the cache but the new table can be cached
+       sharedCache.addTableToCache(catName, dbName, newTblName, newTable);
+     } else if (shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) {
+       // If old table is in the cache but the new table *cannot* be cached
+       sharedCache.removeTableFromCache(catName, dbName, tblName);
+     }
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException {
+     rawStore.updateCreationMetadata(catName, dbname, tablename, cm);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getTables(catName, dbName, pattern);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName), pattern, (short) -1);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType)
+       throws MetaException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getTables(catName, dbName, pattern, tableType);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName), pattern, tableType);
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getMaterializedViewsForRewriting(catName, dbName);
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
+                                       List<String> tableTypes) throws MetaException {
+     // TODO Check if all required tables are allowed, if so, get it from cache
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getTableMeta(catName, dbNames, tableNames, tableTypes);
+     }
+     return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbNames),
+         StringUtils.normalizeIdentifier(tableNames), tableTypes);
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbName, List<String> tblNames)
+       throws MetaException, UnknownDBException {
+     dbName = normalizeIdentifier(dbName);
+     catName = normalizeIdentifier(catName);
+     boolean missSomeInCache = false;
+     for (String tblName : tblNames) {
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         missSomeInCache = true;
+         break;
+       }
+     }
+     if (!isCachePrewarmed.get() || missSomeInCache) {
+       return rawStore.getTableObjectsByName(catName, dbName, tblNames);
+     }
+     List<Table> tables = new ArrayList<>();
+     for (String tblName : tblNames) {
+       tblName = normalizeIdentifier(tblName);
+       Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+       if (tbl == null) {
+         tbl = rawStore.getTable(catName, dbName, tblName);
+       }
+       tables.add(tbl);
+     }
+     return tables;
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getAllTables(catName, dbName);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName));
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+                                              short max_tables)
+       throws MetaException, UnknownDBException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.listTableNamesByFilter(catName, dbName, filter, max_tables);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName), filter, max_tables);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tblName,
+       short max_parts) throws MetaException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.listPartitionNames(catName, dbName, tblName, max_parts);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table is not yet loaded in cache
+       return rawStore.listPartitionNames(catName, dbName, tblName, max_parts);
+     }
+     List<String> partitionNames = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, max_parts)) {
+       if (max_parts == -1 || count < max_parts) {
+         partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues()));
+       }
+     }
+     return partitionNames;
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name,
+       List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending,
+       List<FieldSchema> order, long maxParts) throws MetaException {
+     throw new UnsupportedOperationException();
+   }
+ 
+   @Override
+   public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
 -                             Partition newPart) throws InvalidObjectException, MetaException {
 -    rawStore.alterPartition(catName, dbName, tblName, partVals, newPart);
++                             Partition newPart, long queryTxnId, String queryValidWriteIds)
++                                 throws InvalidObjectException, MetaException {
++    rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds);
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tblName = normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
+     sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart);
+   }
+ 
+   @Override
+   public void alterPartitions(String catName, String dbName, String tblName,
 -                              List<List<String>> partValsList, List<Partition> newParts)
++                              List<List<String>> partValsList, List<Partition> newParts,
++                              long writeId, long txnId, String validWriteIds)
+       throws InvalidObjectException, MetaException {
 -    rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
++    rawStore.alterPartitions(
++        catName, dbName, tblName, partValsList, newParts, writeId, txnId, validWriteIds);
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tblName = normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
++    // TODO: modify the following method for the case when writeIdList != null.
+     sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts);
+   }
+ 
+   private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr,
+       String defaultPartName, short maxParts, List<String> result, SharedCache sharedCache)
+       throws MetaException, NoSuchObjectException {
+     List<Partition> parts =
+         sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getCatName()),
+             StringUtils.normalizeIdentifier(table.getDbName()),
+             StringUtils.normalizeIdentifier(table.getTableName()), maxParts);
+     for (Partition part : parts) {
+       result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
+     }
+     if (defaultPartName == null || defaultPartName.isEmpty()) {
+       defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME);
+     }
+     return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, defaultPartName,
+         result);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByFilter(String catName, String dbName, String tblName,
+       String filter, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts);
+   }
+ 
+   @Override
+   public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
+       String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts,
+           result);
+     }
+     List<String> partNames = new LinkedList<>();
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts,
+           result);
+     }
+     boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(table, expr,
+         defaultPartitionName, maxParts, partNames, sharedCache);
+     return hasUnknownPartitions;
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter);
+   }
+ 
+   @Override
+   public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr)
+       throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
+     }
+     String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME);
+     List<String> partNames = new LinkedList<>();
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
+     }
+     getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames,
+         sharedCache);
+     return partNames.size();
+   }
+ 
+   private static List<String> partNameToVals(String name) {
+     if (name == null) {
+       return null;
+     }
+     List<String> vals = new ArrayList<>();
+     String[] kvp = name.split("/");
+     for (String kv : kvp) {
+       vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1)));
+     }
+     return vals;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+     }
+     List<Partition> partitions = new ArrayList<>();
+     for (String partName : partNames) {
+       Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName));
+       if (part!=null) {
+         partitions.add(part);
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
+   public Table markPartitionForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partVals, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return rawStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType);
+   }
+ 
+   @Override
+   public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partName, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return rawStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType);
+   }
+ 
+   @Override
+   public boolean addRole(String rowName, String ownerName)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.addRole(rowName, ownerName);
+   }
+ 
+   @Override
+   public boolean removeRole(String roleName)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.removeRole(roleName);
+   }
+ 
+   @Override
+   public boolean grantRole(Role role, String userName,
+       PrincipalType principalType, String grantor, PrincipalType grantorType,
+       boolean grantOption)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return rawStore.grantRole(role, userName, principalType, grantor, grantorType, grantOption);
+   }
+ 
+   @Override
+   public boolean revokeRole(Role role, String userName,
+       PrincipalType principalType, boolean grantOption)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.revokeRole(role, userName, principalType, grantOption);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getUserPrivilegeSet(userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getDBPrivilegeSet(catName, dbName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName,
+       String tableName, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return rawStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName,
+       String tableName, String partition, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName,
+       String tableName, String partitionName, String columnName,
+       String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return rawStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalGlobalGrants(
+       String principalName, PrincipalType principalType) {
+     return rawStore.listPrincipalGlobalGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName) {
+     return rawStore.listPrincipalDBGrants(principalName, principalType, catName, dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName) {
+     return rawStore.listAllTableGrants(principalName, principalType, catName, dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName) {
+     return rawStore.listPrincipalPartitionGrants(principalName, principalType, catName, dbName, tableName, partValues, partName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, String columnName) {
+     return rawStore.listPrincipalTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName,
+       String columnName) {
+     return rawStore.listPrincipalPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partValues, partName, columnName);
+   }
+ 
+   @Override
+   public boolean grantPrivileges(PrivilegeBag privileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.grantPrivileges(privileges);
+   }
+ 
+   @Override
+   public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.revokePrivileges(privileges, grantOption);
+   }
+ 
+   @Override
+   public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.refreshPrivileges(objToRefresh, authorizer, grantPrivileges);
+   }
+ 
+   @Override
+   public Role getRole(String roleName) throws NoSuchObjectException {
+     return rawStore.getRole(roleName);
+   }
+ 
+   @Override
+   public List<String> listRoleNames() {
+     return rawStore.listRoleNames();
+   }
+ 
+   @Override
+   public List<Role> listRoles(String principalName,
+       PrincipalType principalType) {
+     return rawStore.listRoles(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+       PrincipalType principalType) {
+     return rawStore.listRolesWithGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+     return rawStore.listRoleMembers(roleName);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames);
+     }
+     Partition p = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals);
+     if (p != null) {
+       String partName = Warehouse.makePartName(table.getPartitionKeys(), partVals);
+       PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName,
+           userName, groupNames);
+       p.setPrivileges(privs);
+     }
+     return p;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsWithAuth(String catName, String dbName, String tblName,
+       short maxParts, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames);
+     }
+     List<Partition> partitions = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) {
+       if (maxParts == -1 || count < maxParts) {
+         String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
+         PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName,
+             userName, groupNames);
+         part.setPrivileges(privs);
+         partitions.add(part);
+         count++;
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesPs(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts);
+     }
+     List<String> partNames = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) {
+       boolean psMatch = true;
+       for (int i=0;i<partVals.size();i++) {
+         String psVal = partVals.get(i);
+         String partVal = part.getValues().get(i);
+         if (psVal!=null && !psVal.isEmpty() && !psVal.equals(partVal)) {
+           psMatch = false;
+           break;
+         }
+       }
+       if (!psMatch) {
+         continue;
+       }
+       if (maxParts == -1 || count < maxParts) {
+         partNames.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
+         count++;
+       }
+     }
+     return partNames;
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsPsWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName,
+           groupNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName,
+           groupNames);
+     }
+     List<Partition> partitions = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) {
+       boolean psMatch = true;
+       for (int i = 0; i < partVals.size(); i++) {
+         String psVal = partVals.get(i);
+         String partVal = part.getValues().get(i);
+         if (psVal != null && !psVal.isEmpty() && !psVal.equals(partVal)) {
+           psMatch = false;
+           break;
+         }
+       }
+       if (!psMatch) {
+         continue;
+       }
+       if (maxParts == -1 || count < maxParts) {
+         String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
+         PrincipalPrivilegeSet privs =
+             getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames);
+         part.setPrivileges(privs);
+         partitions.add(part);
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
 -  public boolean updateTableColumnStatistics(ColumnStatistics colStats)
++  public boolean updateTableColumnStatistics(ColumnStatistics colStats, long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
 -    boolean succ = rawStore.updateTableColumnStatistics(colStats);
++    boolean succ = rawStore.updateTableColumnStatistics(colStats, txnId, validWriteIds, writeId);
+     if (succ) {
+       String catName = colStats.getStatsDesc().isSetCatName() ?
+           normalizeIdentifier(colStats.getStatsDesc().getCatName()) :
+           getDefaultCatalog(conf);
+       String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName());
+       String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName());
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+       if (table == null) {
+         // The table is not yet loaded in cache
+         return succ;
+       }
+       List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
+       List<String> colNames = new ArrayList<>();
+       for (ColumnStatisticsObj statsObj : statsObjs) {
+         colNames.add(statsObj.getColName());
+       }
+       StatsSetupConst.setColumnStatsState(table.getParameters(), colNames);
+       sharedCache.alterTableInCache(catName, dbName, tblName, table);
+       sharedCache.updateTableColStatsInCache(catName, dbName, tblName, statsObjs);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName,
+       List<String> colNames) throws MetaException, NoSuchObjectException {
++    return getTableColumnStatistics(catName, dbName, tblName, colNames, -1, null);
++  }
++
++  // TODO: the same as getTable()
++  @Override
++  public ColumnStatistics getTableColumnStatistics(
++      String catName, String dbName, String tblName, List<String> colNames,
++      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
++      return rawStore.getTableColumnStatistics(
++          catName, dbName, tblName, colNames, txnId, writeIdList);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
 -    if (table == null) {
++    if (table == null || writeIdList != null) {
+       // The table is not yet loaded in cache
 -      return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
++      return rawStore.getTableColumnStatistics(
++          catName, dbName, tblName, colNames, txnId, writeIdList);
+     }
+     ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName);
+     List<ColumnStatisticsObj> colStatObjs =
+         sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames);
+     return new ColumnStatistics(csd, colStatObjs);
+   }
+ 
+   @Override
+   public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName,
+                                              String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removeTableColStatsFromCache(catName, dbName, tblName, colName);
+     }
+     return succ;
+   }
+ 
+   @Override
 -  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals)
++  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals,
++      long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
 -    boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals);
++    boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals, txnId, validWriteIds, writeId);
+     if (succ) {
+       String catName = colStats.getStatsDesc().isSetCatName() ?
+           normalizeIdentifier(colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME;
+       String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName());
+       String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName());
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
+       Partition part = getPartition(catName, dbName, tblName, partVals);
+       List<String> colNames = new ArrayList<>();
+       for (ColumnStatisticsObj statsObj : statsObjs) {
+         colNames.add(statsObj.getColName());
+       }
+       StatsSetupConst.setColumnStatsState(part.getParameters(), colNames);
+       sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part);
+       sharedCache.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, colStats.getStatsObj());
+     }
+     return succ;
+   }
+ 
+   @Override
+   // TODO: calculate from cached values.
+   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName, String tblName,
+       List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException {
+     return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
+   }
+ 
+   @Override
++  public List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return rawStore.getPartitionColumnStatistics(
++        catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName,
+       List<String> partVals, String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     boolean succ =
+         rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removePartitionColStatsFromCache(catName, dbName, tblName, partVals, colName);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List<String> partNames,
+       List<String> colNames) throws MetaException, NoSuchObjectException {
++    return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, -1, null);
++  }
++
++  @Override
++  // TODO: the same as getTable() for transactional stats.
++  public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
++                                      List<String> partNames, List<String> colNames,
++                                      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
+     List<ColumnStatisticsObj> colStats;
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
++      rawStore.get_aggr_stats_for(
++          catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
 -    if (table == null) {
++    if (table == null || writeIdList != null) {
+       // The table is not yet loaded in cache
 -      return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
++      return rawStore.get_aggr_stats_for(
++          catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+     }
+     List<String> allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
+     if (partNames.size() == allPartNames.size()) {
+       colStats = sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALL);
+       if (colStats != null) {
+         return new AggrStats(colStats, partNames.size());
+       }
+     } else if (partNames.size() == (allPartNames.size() - 1)) {
+       String defaultPartitionName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME);
+       if (!partNames.contains(defaultPartitionName)) {
+         colStats =
+             sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALLBUTDEFAULT);
+         if (colStats != null) {
+           return new AggrStats(colStats, partNames.size());
+         }
+       }
+     }
+     LOG.debug("Didn't find aggr stats in cache. Merging them. tblName= {}, parts= {}, cols= {}",
+         tblName, partNames, colNames);
+     MergedColumnStatsForPartitions mergedColStats =
+         mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache);
+     return new AggrStats(mergedColStats.getColStats(), mergedColStats.getPartsFound());
+   }
+ 
+   private MergedColumnStatsForPartitions mergeColStatsForPartitions(
+       String catName, String dbName, String tblName, List<String> partNames, List<String> colNames,
+       SharedCache sharedCache) throws MetaException {
+     final boolean useDensityFunctionForNDVEstimation =
+         MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION);
+     final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER);
+     Map<ColumnStatsAggregator, List<ColStatsObjWithSourceInfo>> colStatsMap = new HashMap<>();
+     boolean areAllPartsFound = true;
+     long partsFound = 0;
+     for (String colName : colNames) {
+       long partsFoundForColumn = 0;
+       ColumnStatsAggregator colStatsAggregator = null;
+       List<ColStatsObjWithSourceInfo> colStatsWithPartInfoList = new ArrayList<>();
+       for (String partName : partNames) {
+         ColumnStatisticsObj colStatsForPart =
+             sharedCache.getPartitionColStatsFromCache(catName, dbName, tblName, partNameToVals(partName), colName);
+         if (colStatsForPart != null) {
+           ColStatsObjWithSourceInfo colStatsWithPartInfo =
+               new ColStatsObjWithSourceInfo(colStatsForPart, catName, dbName, tblName, partName);
+           colStatsWithPartInfoList.add(colStatsWithPartInfo);
+           if (colStatsAggregator == null) {
+             colStatsAggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(
+                 colStatsForPart.getStatsData().getSetField(), useDensityFunctionForNDVEstimation,
+                 ndvTuner);
+           }
+           partsFoundForColumn++;
+         } else {
+           LOG.debug(
+               "Stats not found in CachedStore for: dbName={} tblName={} partName={} colName={}",
+               dbName, tblName, partName, colName);
+         }
+       }
+       if (colStatsWithPartInfoList.size() > 0) {
+         colStatsMap.put(colStatsAggregator, colStatsWithPartInfoList);
+       }
+       if (partsFoundForColumn == partNames.size()) {
+         partsFound++;
+       }
+       if (colStatsMap.size() < 1) {
+         LOG.debug("No stats data found for: dbName={} tblName= {} partNames= {} colNames= ", dbName,
+             tblName, partNames, colNames);
+         return new MergedColumnStatsForPartitions(new ArrayList<ColumnStatisticsObj>(), 0);
+       }
+     }
+     // Note that enableBitVector does not apply here because ColumnStatisticsObj
+     // itself will tell whether bitvector is null or not and aggr logic can automatically apply.
+     return new MergedColumnStatsForPartitions(MetaStoreUtils.aggrPartitionStats(colStatsMap,
+         partNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner), partsFound);
+   }
+ 
+   class MergedColumnStatsForPartitions {
+     List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>();
+     long partsFound;
+ 
+     MergedColumnStatsForPartitions(List<ColumnStatisticsObj> colStats, long partsFound) {
+       this.colStats = colStats;
+       this.partsFound = partsFound;
+     }
+ 
+     List<ColumnStatisticsObj> getColStats() {
+       return colStats;
+     }
+ 
+     long getPartsFound() {
+       return partsFound;
+     }
+   }
+ 
+   @Override
+   public long cleanupEvents() {
+     return rawStore.cleanupEvents();
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) {
+     return rawStore.addToken(tokenIdentifier, delegationToken);
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) {
+     return rawStore.removeToken(tokenIdentifier);
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) {
+     return rawStore.getToken(tokenIdentifier);
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() {
+    

<TRUNCATED>

[13/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query15.q.out b/ql/src/test/results/clientpositive/perf/tez/query15.q.out
index b593a2e..3c7ae66 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query15.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query15.q.out
@@ -52,81 +52,83 @@ Stage-0
     limit:100
     Stage-1
       Reducer 5 vectorized
-      File Output Operator [FS_98]
-        Limit [LIM_97] (rows=100 width=135)
+      File Output Operator [FS_99]
+        Limit [LIM_98] (rows=100 width=135)
           Number of rows:100
-          Select Operator [SEL_96] (rows=174233858 width=135)
+          Select Operator [SEL_97] (rows=174233858 width=135)
             Output:["_col0","_col1"]
           <-Reducer 4 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_95]
-              Group By Operator [GBY_94] (rows=174233858 width=135)
+            SHUFFLE [RS_96]
+              Group By Operator [GBY_95] (rows=174233858 width=135)
                 Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
               <-Reducer 3 [SIMPLE_EDGE]
                 SHUFFLE [RS_25]
                   PartitionCols:_col0
                   Group By Operator [GBY_24] (rows=348467716 width=135)
                     Output:["_col0","_col1"],aggregations:["sum(_col7)"],keys:_col4
-                    Select Operator [SEL_23] (rows=348467716 width=135)
-                      Output:["_col4","_col7"]
-                      Filter Operator [FIL_22] (rows=348467716 width=135)
-                        predicate:((_col3) IN ('CA', 'WA', 'GA') or (_col7 > 500) or (substr(_col4, 1, 5)) IN ('85669', '86197', '88274', '83405', '86475', '85392', '85460', '80348', '81792'))
-                        Merge Join Operator [MERGEJOIN_76] (rows=348467716 width=135)
-                          Conds:RS_19._col0=RS_20._col1(Inner),Output:["_col3","_col4","_col7"]
-                        <-Reducer 2 [SIMPLE_EDGE]
-                          SHUFFLE [RS_19]
-                            PartitionCols:_col0
-                            Merge Join Operator [MERGEJOIN_74] (rows=88000001 width=860)
-                              Conds:RS_79._col1=RS_82._col0(Inner),Output:["_col0","_col3","_col4"]
-                            <-Map 1 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_79]
-                                PartitionCols:_col1
-                                Select Operator [SEL_78] (rows=80000000 width=860)
-                                  Output:["_col0","_col1"]
-                                  Filter Operator [FIL_77] (rows=80000000 width=860)
-                                    predicate:(c_current_addr_sk is not null and c_customer_sk is not null)
-                                    TableScan [TS_0] (rows=80000000 width=860)
-                                      default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_addr_sk"]
-                            <-Map 6 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_82]
-                                PartitionCols:_col0
-                                Select Operator [SEL_81] (rows=40000000 width=1014)
-                                  Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_80] (rows=40000000 width=1014)
-                                    predicate:ca_address_sk is not null
-                                    TableScan [TS_3] (rows=40000000 width=1014)
-                                      default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state","ca_zip"]
-                        <-Reducer 8 [SIMPLE_EDGE]
-                          SHUFFLE [RS_20]
-                            PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_75] (rows=316788826 width=135)
-                              Conds:RS_93._col0=RS_85._col0(Inner),Output:["_col1","_col2"]
-                            <-Map 9 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_85]
-                                PartitionCols:_col0
-                                Select Operator [SEL_84] (rows=18262 width=1119)
-                                  Output:["_col0"]
-                                  Filter Operator [FIL_83] (rows=18262 width=1119)
-                                    predicate:((d_qoy = 2) and (d_year = 2000) and d_date_sk is not null)
-                                    TableScan [TS_9] (rows=73049 width=1119)
-                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                            <-Map 7 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_93]
-                                PartitionCols:_col0
-                                Select Operator [SEL_92] (rows=287989836 width=135)
-                                  Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_91] (rows=287989836 width=135)
-                                    predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_13_date_dim_d_date_sk_min) AND DynamicValue(RS_13_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_13_date_dim_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_sold_date_sk is not null)
-                                    TableScan [TS_6] (rows=287989836 width=135)
-                                      default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_sales_price"]
-                                    <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                      BROADCAST [RS_90]
-                                        Group By Operator [GBY_89] (rows=1 width=12)
-                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                        <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_88]
-                                            Group By Operator [GBY_87] (rows=1 width=12)
-                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                              Select Operator [SEL_86] (rows=18262 width=1119)
-                                                Output:["_col0"]
-                                                 Please refer to the previous Select Operator [SEL_84]
+                    Top N Key Operator [TNK_44] (rows=348467716 width=135)
+                      keys:_col4,sort order:+,top n:100
+                      Select Operator [SEL_23] (rows=348467716 width=135)
+                        Output:["_col4","_col7"]
+                        Filter Operator [FIL_22] (rows=348467716 width=135)
+                          predicate:((_col3) IN ('CA', 'WA', 'GA') or (_col7 > 500) or (substr(_col4, 1, 5)) IN ('85669', '86197', '88274', '83405', '86475', '85392', '85460', '80348', '81792'))
+                          Merge Join Operator [MERGEJOIN_77] (rows=348467716 width=135)
+                            Conds:RS_19._col0=RS_20._col1(Inner),Output:["_col3","_col4","_col7"]
+                          <-Reducer 2 [SIMPLE_EDGE]
+                            SHUFFLE [RS_19]
+                              PartitionCols:_col0
+                              Merge Join Operator [MERGEJOIN_75] (rows=88000001 width=860)
+                                Conds:RS_80._col1=RS_83._col0(Inner),Output:["_col0","_col3","_col4"]
+                              <-Map 1 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_80]
+                                  PartitionCols:_col1
+                                  Select Operator [SEL_79] (rows=80000000 width=860)
+                                    Output:["_col0","_col1"]
+                                    Filter Operator [FIL_78] (rows=80000000 width=860)
+                                      predicate:(c_current_addr_sk is not null and c_customer_sk is not null)
+                                      TableScan [TS_0] (rows=80000000 width=860)
+                                        default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_addr_sk"]
+                              <-Map 6 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_83]
+                                  PartitionCols:_col0
+                                  Select Operator [SEL_82] (rows=40000000 width=1014)
+                                    Output:["_col0","_col1","_col2"]
+                                    Filter Operator [FIL_81] (rows=40000000 width=1014)
+                                      predicate:ca_address_sk is not null
+                                      TableScan [TS_3] (rows=40000000 width=1014)
+                                        default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state","ca_zip"]
+                          <-Reducer 8 [SIMPLE_EDGE]
+                            SHUFFLE [RS_20]
+                              PartitionCols:_col1
+                              Merge Join Operator [MERGEJOIN_76] (rows=316788826 width=135)
+                                Conds:RS_94._col0=RS_86._col0(Inner),Output:["_col1","_col2"]
+                              <-Map 9 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_86]
+                                  PartitionCols:_col0
+                                  Select Operator [SEL_85] (rows=18262 width=1119)
+                                    Output:["_col0"]
+                                    Filter Operator [FIL_84] (rows=18262 width=1119)
+                                      predicate:((d_qoy = 2) and (d_year = 2000) and d_date_sk is not null)
+                                      TableScan [TS_9] (rows=73049 width=1119)
+                                        default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                              <-Map 7 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_94]
+                                  PartitionCols:_col0
+                                  Select Operator [SEL_93] (rows=287989836 width=135)
+                                    Output:["_col0","_col1","_col2"]
+                                    Filter Operator [FIL_92] (rows=287989836 width=135)
+                                      predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_13_date_dim_d_date_sk_min) AND DynamicValue(RS_13_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_13_date_dim_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_sold_date_sk is not null)
+                                      TableScan [TS_6] (rows=287989836 width=135)
+                                        default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_sales_price"]
+                                      <-Reducer 10 [BROADCAST_EDGE] vectorized
+                                        BROADCAST [RS_91]
+                                          Group By Operator [GBY_90] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                          <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_89]
+                                              Group By Operator [GBY_88] (rows=1 width=12)
+                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                Select Operator [SEL_87] (rows=18262 width=1119)
+                                                  Output:["_col0"]
+                                                   Please refer to the previous Select Operator [SEL_85]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query17.q.out b/ql/src/test/results/clientpositive/perf/tez/query17.q.out
index 620d88a..e185775 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query17.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query17.q.out
@@ -112,199 +112,201 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_269]
-        Limit [LIM_268] (rows=100 width=88)
+      File Output Operator [FS_270]
+        Limit [LIM_269] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_267] (rows=421657640 width=88)
+          Select Operator [SEL_268] (rows=421657640 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_266]
-              Select Operator [SEL_265] (rows=421657640 width=88)
+            SHUFFLE [RS_267]
+              Select Operator [SEL_266] (rows=421657640 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
-                Group By Operator [GBY_264] (rows=421657640 width=88)
+                Group By Operator [GBY_265] (rows=421657640 width=88)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","count(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","count(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)"],keys:KEY._col0, KEY._col1, KEY._col2
                 <-Reducer 5 [SIMPLE_EDGE]
                   SHUFFLE [RS_50]
                     PartitionCols:_col0, _col1, _col2
                     Group By Operator [GBY_49] (rows=843315281 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"],aggregations:["count(_col3)","sum(_col3)","sum(_col7)","sum(_col6)","count(_col4)","sum(_col4)","sum(_col9)","sum(_col8)","count(_col5)","sum(_col5)","sum(_col11)","sum(_col10)"],keys:_col0, _col1, _col2
-                      Select Operator [SEL_47] (rows=843315281 width=88)
-                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
-                        Merge Join Operator [MERGEJOIN_212] (rows=843315281 width=88)
-                          Conds:RS_44._col3=RS_250._col0(Inner),Output:["_col5","_col9","_col10","_col14","_col21","_col25"]
-                        <-Map 20 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_250]
-                            PartitionCols:_col0
-                            Select Operator [SEL_249] (rows=1704 width=1910)
-                              Output:["_col0","_col1"]
-                              Filter Operator [FIL_248] (rows=1704 width=1910)
-                                predicate:s_store_sk is not null
-                                TableScan [TS_32] (rows=1704 width=1910)
-                                  default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_state"]
-                        <-Reducer 4 [SIMPLE_EDGE]
-                          SHUFFLE [RS_44]
-                            PartitionCols:_col3
-                            Merge Join Operator [MERGEJOIN_211] (rows=766650239 width=88)
-                              Conds:RS_41._col1, _col2, _col4=RS_42._col7, _col8, _col9(Inner),Output:["_col3","_col5","_col9","_col10","_col14","_col21"]
-                            <-Reducer 11 [SIMPLE_EDGE]
-                              SHUFFLE [RS_42]
-                                PartitionCols:_col7, _col8, _col9
-                                Merge Join Operator [MERGEJOIN_210] (rows=348467716 width=135)
-                                  Conds:RS_28._col2, _col1=RS_29._col1, _col2(Inner),Output:["_col3","_col7","_col8","_col9","_col10"]
-                                <-Reducer 13 [SIMPLE_EDGE]
-                                  PARTITION_ONLY_SHUFFLE [RS_29]
-                                    PartitionCols:_col1, _col2
-                                    Merge Join Operator [MERGEJOIN_209] (rows=63350266 width=77)
-                                      Conds:RS_241._col0=RS_223._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                    <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_223]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_218] (rows=36525 width=1119)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_215] (rows=36525 width=1119)
-                                            predicate:((d_quarter_name) IN ('2000Q1', '2000Q2', '2000Q3') and d_date_sk is not null)
-                                            TableScan [TS_3] (rows=73049 width=1119)
-                                              default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_quarter_name"]
-                                    <-Map 19 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_241]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_240] (rows=57591150 width=77)
-                                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                                          Filter Operator [FIL_239] (rows=57591150 width=77)
-                                            predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
-                                            TableScan [TS_15] (rows=57591150 width=77)
-                                              default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_return_quantity"]
-                                <-Reducer 10 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_28]
-                                    PartitionCols:_col2, _col1
-                                    Merge Join Operator [MERGEJOIN_208] (rows=316788826 width=135)
-                                      Conds:RS_263._col0=RS_221._col0(Inner),Output:["_col1","_col2","_col3"]
-                                    <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_221]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_217] (rows=36525 width=1119)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_214] (rows=36525 width=1119)
-                                            predicate:((d_quarter_name) IN ('2000Q1', '2000Q2', '2000Q3') and d_date_sk is not null)
-                                             Please refer to the previous TableScan [TS_3]
-                                    <-Map 18 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_263]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_262] (rows=287989836 width=135)
-                                          Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_261] (rows=287989836 width=135)
-                                            predicate:((cs_bill_customer_sk BETWEEN DynamicValue(RS_29_store_returns_sr_customer_sk_min) AND DynamicValue(RS_29_store_returns_sr_customer_sk_max) and in_bloom_filter(cs_bill_customer_sk, DynamicValue(RS_29_store_returns_sr_customer_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_29_store_returns_sr_item_sk_min) AND DynamicValue(RS_29_store_returns_sr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_29_store_returns_sr_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_39_item_i_item_sk_min) AND DynamicValue(RS_39_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_39_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_26_d3_d_date_sk_min) AND DynamicValue(RS_26_d3_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_26_d3_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
-                                            TableScan [TS_9] (rows=287989836 width=135)
-                                              default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_quantity"]
-                                            <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_244]
-                                                Group By Operator [GBY_242] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
-                                                <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
-                                                  PARTITION_ONLY_SHUFFLE [RS_110]
-                                                    Group By Operator [GBY_109] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                      Select Operator [SEL_108] (rows=63350266 width=77)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_209]
-                                            <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_247]
-                                                Group By Operator [GBY_245] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
-                                                <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
-                                                  PARTITION_ONLY_SHUFFLE [RS_125]
-                                                    Group By Operator [GBY_124] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                      Select Operator [SEL_123] (rows=63350266 width=77)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_209]
-                                            <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_238]
-                                                Group By Operator [GBY_236] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 16 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_235]
-                                                    Group By Operator [GBY_234] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_233] (rows=462000 width=1436)
-                                                        Output:["_col0"]
-                                                        Select Operator [SEL_231] (rows=462000 width=1436)
-                                                          Output:["_col0","_col1","_col2"]
-                                                          Filter Operator [FIL_230] (rows=462000 width=1436)
-                                                            predicate:i_item_sk is not null
-                                                            TableScan [TS_6] (rows=462000 width=1436)
-                                                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc"]
-                                            <-Reducer 12 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_260]
-                                                Group By Operator [GBY_259] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  PARTITION_ONLY_SHUFFLE [RS_227]
-                                                    Group By Operator [GBY_225] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_222] (rows=36525 width=1119)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_217]
-                            <-Reducer 3 [SIMPLE_EDGE]
-                              SHUFFLE [RS_41]
-                                PartitionCols:_col1, _col2, _col4
-                                Merge Join Operator [MERGEJOIN_207] (rows=696954748 width=88)
-                                  Conds:RS_38._col1=RS_232._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col9","_col10"]
-                                <-Map 16 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_232]
-                                    PartitionCols:_col0
-                                     Please refer to the previous Select Operator [SEL_231]
-                                <-Reducer 2 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_38]
-                                    PartitionCols:_col1
-                                    Merge Join Operator [MERGEJOIN_206] (rows=633595212 width=88)
-                                      Conds:RS_258._col0=RS_219._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
-                                    <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_219]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_216] (rows=36524 width=1119)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_213] (rows=36524 width=1119)
-                                            predicate:((d_quarter_name = '2000Q1') and d_date_sk is not null)
-                                             Please refer to the previous TableScan [TS_3]
-                                    <-Map 1 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_258]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_257] (rows=575995635 width=88)
-                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                          Filter Operator [FIL_256] (rows=575995635 width=88)
-                                            predicate:((ss_customer_sk BETWEEN DynamicValue(RS_29_store_returns_sr_customer_sk_min) AND DynamicValue(RS_29_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_29_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_29_store_returns_sr_item_sk_min) AND DynamicValue(RS_29_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_29_store_returns_sr_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_39_item_i_item_sk_min) AND DynamicValue(RS_39_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_39_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_36_d1_d_date_sk_min) AND DynamicValue(RS_36_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_36_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_45_store_s_store_sk_min) AND DynamicValue(RS_45_store_s_store_sk_max)
  and in_bloom_filter(ss_store_sk, DynamicValue(RS_45_store_s_store_sk_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
-                                            TableScan [TS_0] (rows=575995635 width=88)
-                                              default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_quantity"]
-                                            <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_243]
-                                                 Please refer to the previous Group By Operator [GBY_242]
-                                            <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_246]
-                                                 Please refer to the previous Group By Operator [GBY_245]
-                                            <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_237]
-                                                 Please refer to the previous Group By Operator [GBY_236]
-                                            <-Reducer 21 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_255]
-                                                Group By Operator [GBY_254] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_253]
-                                                    Group By Operator [GBY_252] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_251] (rows=1704 width=1910)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_249]
-                                            <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_229]
-                                                Group By Operator [GBY_228] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  PARTITION_ONLY_SHUFFLE [RS_226]
-                                                    Group By Operator [GBY_224] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_220] (rows=36524 width=1119)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_216]
+                      Top N Key Operator [TNK_93] (rows=843315281 width=88)
+                        keys:_col0, _col1, _col2,sort order:+++,top n:100
+                        Select Operator [SEL_47] (rows=843315281 width=88)
+                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
+                          Merge Join Operator [MERGEJOIN_213] (rows=843315281 width=88)
+                            Conds:RS_44._col3=RS_251._col0(Inner),Output:["_col5","_col9","_col10","_col14","_col21","_col25"]
+                          <-Map 20 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_251]
+                              PartitionCols:_col0
+                              Select Operator [SEL_250] (rows=1704 width=1910)
+                                Output:["_col0","_col1"]
+                                Filter Operator [FIL_249] (rows=1704 width=1910)
+                                  predicate:s_store_sk is not null
+                                  TableScan [TS_32] (rows=1704 width=1910)
+                                    default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_state"]
+                          <-Reducer 4 [SIMPLE_EDGE]
+                            SHUFFLE [RS_44]
+                              PartitionCols:_col3
+                              Merge Join Operator [MERGEJOIN_212] (rows=766650239 width=88)
+                                Conds:RS_41._col1, _col2, _col4=RS_42._col7, _col8, _col9(Inner),Output:["_col3","_col5","_col9","_col10","_col14","_col21"]
+                              <-Reducer 11 [SIMPLE_EDGE]
+                                SHUFFLE [RS_42]
+                                  PartitionCols:_col7, _col8, _col9
+                                  Merge Join Operator [MERGEJOIN_211] (rows=348467716 width=135)
+                                    Conds:RS_28._col2, _col1=RS_29._col1, _col2(Inner),Output:["_col3","_col7","_col8","_col9","_col10"]
+                                  <-Reducer 13 [SIMPLE_EDGE]
+                                    PARTITION_ONLY_SHUFFLE [RS_29]
+                                      PartitionCols:_col1, _col2
+                                      Merge Join Operator [MERGEJOIN_210] (rows=63350266 width=77)
+                                        Conds:RS_242._col0=RS_224._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                      <-Map 8 [SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_224]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_219] (rows=36525 width=1119)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_216] (rows=36525 width=1119)
+                                              predicate:((d_quarter_name) IN ('2000Q1', '2000Q2', '2000Q3') and d_date_sk is not null)
+                                              TableScan [TS_3] (rows=73049 width=1119)
+                                                default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_quarter_name"]
+                                      <-Map 19 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_242]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_241] (rows=57591150 width=77)
+                                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                                            Filter Operator [FIL_240] (rows=57591150 width=77)
+                                              predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
+                                              TableScan [TS_15] (rows=57591150 width=77)
+                                                default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_return_quantity"]
+                                  <-Reducer 10 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_28]
+                                      PartitionCols:_col2, _col1
+                                      Merge Join Operator [MERGEJOIN_209] (rows=316788826 width=135)
+                                        Conds:RS_264._col0=RS_222._col0(Inner),Output:["_col1","_col2","_col3"]
+                                      <-Map 8 [SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_222]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_218] (rows=36525 width=1119)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_215] (rows=36525 width=1119)
+                                              predicate:((d_quarter_name) IN ('2000Q1', '2000Q2', '2000Q3') and d_date_sk is not null)
+                                               Please refer to the previous TableScan [TS_3]
+                                      <-Map 18 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_264]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_263] (rows=287989836 width=135)
+                                            Output:["_col0","_col1","_col2","_col3"]
+                                            Filter Operator [FIL_262] (rows=287989836 width=135)
+                                              predicate:((cs_bill_customer_sk BETWEEN DynamicValue(RS_29_store_returns_sr_customer_sk_min) AND DynamicValue(RS_29_store_returns_sr_customer_sk_max) and in_bloom_filter(cs_bill_customer_sk, DynamicValue(RS_29_store_returns_sr_customer_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_29_store_returns_sr_item_sk_min) AND DynamicValue(RS_29_store_returns_sr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_29_store_returns_sr_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_39_item_i_item_sk_min) AND DynamicValue(RS_39_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_39_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_26_d3_d_date_sk_min) AND DynamicValue(RS_26_d3_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_26_d3_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
+                                              TableScan [TS_9] (rows=287989836 width=135)
+                                                default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_quantity"]
+                                              <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_245]
+                                                  Group By Operator [GBY_243] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
+                                                  <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
+                                                    PARTITION_ONLY_SHUFFLE [RS_111]
+                                                      Group By Operator [GBY_110] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
+                                                        Select Operator [SEL_109] (rows=63350266 width=77)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_210]
+                                              <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_248]
+                                                  Group By Operator [GBY_246] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
+                                                  <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
+                                                    PARTITION_ONLY_SHUFFLE [RS_126]
+                                                      Group By Operator [GBY_125] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
+                                                        Select Operator [SEL_124] (rows=63350266 width=77)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_210]
+                                              <-Reducer 17 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_239]
+                                                  Group By Operator [GBY_237] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 16 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_236]
+                                                      Group By Operator [GBY_235] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_234] (rows=462000 width=1436)
+                                                          Output:["_col0"]
+                                                          Select Operator [SEL_232] (rows=462000 width=1436)
+                                                            Output:["_col0","_col1","_col2"]
+                                                            Filter Operator [FIL_231] (rows=462000 width=1436)
+                                                              predicate:i_item_sk is not null
+                                                              TableScan [TS_6] (rows=462000 width=1436)
+                                                                default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc"]
+                                              <-Reducer 12 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_261]
+                                                  Group By Operator [GBY_260] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    PARTITION_ONLY_SHUFFLE [RS_228]
+                                                      Group By Operator [GBY_226] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_223] (rows=36525 width=1119)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_218]
+                              <-Reducer 3 [SIMPLE_EDGE]
+                                SHUFFLE [RS_41]
+                                  PartitionCols:_col1, _col2, _col4
+                                  Merge Join Operator [MERGEJOIN_208] (rows=696954748 width=88)
+                                    Conds:RS_38._col1=RS_233._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col9","_col10"]
+                                  <-Map 16 [SIMPLE_EDGE] vectorized
+                                    SHUFFLE [RS_233]
+                                      PartitionCols:_col0
+                                       Please refer to the previous Select Operator [SEL_232]
+                                  <-Reducer 2 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_38]
+                                      PartitionCols:_col1
+                                      Merge Join Operator [MERGEJOIN_207] (rows=633595212 width=88)
+                                        Conds:RS_259._col0=RS_220._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
+                                      <-Map 8 [SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_220]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_217] (rows=36524 width=1119)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_214] (rows=36524 width=1119)
+                                              predicate:((d_quarter_name = '2000Q1') and d_date_sk is not null)
+                                               Please refer to the previous TableScan [TS_3]
+                                      <-Map 1 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_259]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_258] (rows=575995635 width=88)
+                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                            Filter Operator [FIL_257] (rows=575995635 width=88)
+                                              predicate:((ss_customer_sk BETWEEN DynamicValue(RS_29_store_returns_sr_customer_sk_min) AND DynamicValue(RS_29_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_29_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_29_store_returns_sr_item_sk_min) AND DynamicValue(RS_29_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_29_store_returns_sr_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_39_item_i_item_sk_min) AND DynamicValue(RS_39_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_39_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_36_d1_d_date_sk_min) AND DynamicValue(RS_36_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_36_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_45_store_s_store_sk_min) AND DynamicValue(RS_45_store_s_store_sk_ma
 x) and in_bloom_filter(ss_store_sk, DynamicValue(RS_45_store_s_store_sk_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
+                                              TableScan [TS_0] (rows=575995635 width=88)
+                                                default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_quantity"]
+                                              <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_244]
+                                                   Please refer to the previous Group By Operator [GBY_243]
+                                              <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_247]
+                                                   Please refer to the previous Group By Operator [GBY_246]
+                                              <-Reducer 17 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_238]
+                                                   Please refer to the previous Group By Operator [GBY_237]
+                                              <-Reducer 21 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_256]
+                                                  Group By Operator [GBY_255] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_254]
+                                                      Group By Operator [GBY_253] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_252] (rows=1704 width=1910)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_250]
+                                              <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_230]
+                                                  Group By Operator [GBY_229] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    PARTITION_ONLY_SHUFFLE [RS_227]
+                                                      Group By Operator [GBY_225] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_221] (rows=36524 width=1119)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_217]
 


[51/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 0000000,92e2805..70edb96
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@@ -1,0 -1,3422 +1,3597 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
+ 
+ import java.io.IOException;
+ import java.lang.reflect.Constructor;
+ import java.lang.reflect.InvocationHandler;
+ import java.lang.reflect.InvocationTargetException;
+ import java.lang.reflect.Method;
+ import java.lang.reflect.Proxy;
+ import java.net.InetAddress;
+ import java.net.URI;
+ import java.net.UnknownHostException;
+ import java.nio.ByteBuffer;
+ import java.security.PrivilegedExceptionAction;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.LinkedHashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.NoSuchElementException;
+ import java.util.Random;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicInteger;
+ 
+ import javax.security.auth.login.LoginException;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.hooks.URIResolverHook;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.ReflectionUtils;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.thrift.TApplicationException;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.protocol.TBinaryProtocol;
+ import org.apache.thrift.protocol.TCompactProtocol;
+ import org.apache.thrift.protocol.TProtocol;
+ import org.apache.thrift.transport.TFramedTransport;
+ import org.apache.thrift.transport.TSocket;
+ import org.apache.thrift.transport.TTransport;
+ import org.apache.thrift.transport.TTransportException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.collect.Lists;
+ 
+ /**
+  * Hive Metastore Client.
+  * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient
+  * are not public and can change. Hence this is marked as unstable.
+  * For users who require retry mechanism when the connection between metastore and client is
+  * broken, RetryingMetaStoreClient class should be used.
+  */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
+   /**
+    * Capabilities of the current client. If this client talks to a MetaStore server in a manner
+    * implying the usage of some expanded features that require client-side support that this client
+    * doesn't have (e.g. a getting a table of a new type), it will get back failures when the
+    * capability checking is enabled (the default).
+    */
+   public final static ClientCapabilities VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES));
+   // Test capability for tests.
+   public final static ClientCapabilities TEST_VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES, ClientCapability.TEST_CAPABILITY));
+ 
+   ThriftHiveMetastore.Iface client = null;
+   private TTransport transport = null;
+   private boolean isConnected = false;
+   private URI metastoreUris[];
+   private final HiveMetaHookLoader hookLoader;
+   protected final Configuration conf;  // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client.
+   private String tokenStrForm;
+   private final boolean localMetaStore;
+   private final MetaStoreFilterHook filterHook;
+   private final URIResolverHook uriResolverHook;
+   private final int fileMetadataBatchSize;
+ 
+   private Map<String, String> currentMetaVars;
+ 
+   private static final AtomicInteger connCount = new AtomicInteger(0);
+ 
+   // for thrift connects
+   private int retries = 5;
+   private long retryDelaySeconds = 0;
+   private final ClientCapabilities version;
+ 
+   //copied from ErrorMsg.java
+   private static final String REPL_EVENTS_MISSING_IN_METASTORE = "Notification events are missing in the meta store.";
 -  
++
+   static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClient.class);
+ 
+   public HiveMetaStoreClient(Configuration conf) throws MetaException {
+     this(conf, null, true);
+   }
+ 
+   public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader) throws MetaException {
+     this(conf, hookLoader, true);
+   }
+ 
+   public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded)
+     throws MetaException {
+ 
+     this.hookLoader = hookLoader;
+     if (conf == null) {
+       conf = MetastoreConf.newMetastoreConf();
+       this.conf = conf;
+     } else {
+       this.conf = new Configuration(conf);
+     }
+     version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION;
+     filterHook = loadFilterHooks();
+     uriResolverHook = loadUriResolverHook();
+     fileMetadataBatchSize = MetastoreConf.getIntVar(
+         conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX);
+ 
+     String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS);
+     localMetaStore = MetastoreConf.isEmbeddedMetaStore(msUri);
+     if (localMetaStore) {
+       if (!allowEmbedded) {
+         throw new MetaException("Embedded metastore is not allowed here. Please configure "
+             + ConfVars.THRIFT_URIS.toString() + "; it is currently set to [" + msUri + "]");
+       }
+       // instantiate the metastore server handler directly instead of connecting
+       // through the network
+       client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
+       isConnected = true;
+       snapshotActiveConf();
+       return;
+     }
+ 
+     // get the number retries
+     retries = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES);
+     retryDelaySeconds = MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
+ 
+     // user wants file store based configuration
+     if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS) != null) {
+       resolveUris();
+     } else {
+       LOG.error("NOT getting uris from conf");
+       throw new MetaException("MetaStoreURIs not found in conf file");
+     }
+ 
+     //If HADOOP_PROXY_USER is set in env or property,
+     //then need to create metastore client that proxies as that user.
+     String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
+     String proxyUser = System.getenv(HADOOP_PROXY_USER);
+     if (proxyUser == null) {
+       proxyUser = System.getProperty(HADOOP_PROXY_USER);
+     }
+     //if HADOOP_PROXY_USER is set, create DelegationToken using real user
+     if(proxyUser != null) {
+       LOG.info(HADOOP_PROXY_USER + " is set. Using delegation "
+           + "token for HiveMetaStore connection.");
+       try {
+         UserGroupInformation.getLoginUser().getRealUser().doAs(
+             new PrivilegedExceptionAction<Void>() {
+               @Override
+               public Void run() throws Exception {
+                 open();
+                 return null;
+               }
+             });
+         String delegationTokenPropString = "DelegationTokenForHiveMetaStoreServer";
+         String delegationTokenStr = getDelegationToken(proxyUser, proxyUser);
+         SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr,
+             delegationTokenPropString);
+         MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString);
+         close();
+       } catch (Exception e) {
+         LOG.error("Error while setting delegation token for " + proxyUser, e);
+         if(e instanceof MetaException) {
+           throw (MetaException)e;
+         } else {
+           throw new MetaException(e.getMessage());
+         }
+       }
+     }
+     // finally open the store
+     open();
+   }
+ 
+   private void resolveUris() throws MetaException {
+     String metastoreUrisString[] =  MetastoreConf.getVar(conf,
+             ConfVars.THRIFT_URIS).split(",");
+ 
+     List<URI> metastoreURIArray = new ArrayList<URI>();
+     try {
+       int i = 0;
+       for (String s : metastoreUrisString) {
+         URI tmpUri = new URI(s);
+         if (tmpUri.getScheme() == null) {
+           throw new IllegalArgumentException("URI: " + s
+                   + " does not have a scheme");
+         }
+         if (uriResolverHook != null) {
+           metastoreURIArray.addAll(uriResolverHook.resolveURI(tmpUri));
+         } else {
+           metastoreURIArray.add(new URI(
+                   tmpUri.getScheme(),
+                   tmpUri.getUserInfo(),
+                   HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()),
+                   tmpUri.getPort(),
+                   tmpUri.getPath(),
+                   tmpUri.getQuery(),
+                   tmpUri.getFragment()
+           ));
+         }
+       }
+       metastoreUris = new URI[metastoreURIArray.size()];
+       for (int j = 0; j < metastoreURIArray.size(); j++) {
+         metastoreUris[j] = metastoreURIArray.get(j);
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         List uriList = Arrays.asList(metastoreUris);
+         Collections.shuffle(uriList);
+         metastoreUris = (URI[]) uriList.toArray();
+       }
+     } catch (IllegalArgumentException e) {
+       throw (e);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+   }
+ 
+ 
+   private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException {
+     Class<? extends MetaStoreFilterHook> authProviderClass = MetastoreConf.
+         getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class,
+             MetaStoreFilterHook.class);
+     String msg = "Unable to create instance of " + authProviderClass.getName() + ": ";
+     try {
+       Constructor<? extends MetaStoreFilterHook> constructor =
+           authProviderClass.getConstructor(Configuration.class);
+       return constructor.newInstance(conf);
+     } catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException e) {
+       throw new IllegalStateException(msg + e.getMessage(), e);
+     }
+   }
+ 
+   //multiple clients may initialize the hook at the same time
+   synchronized private URIResolverHook loadUriResolverHook() throws IllegalStateException {
+ 
+     String uriResolverClassName =
+             MetastoreConf.getAsString(conf, ConfVars.URI_RESOLVER);
+     if (uriResolverClassName.equals("")) {
+       return null;
+     } else {
+       LOG.info("Loading uri resolver" + uriResolverClassName);
+       try {
+         Class<?> uriResolverClass = Class.forName(uriResolverClassName, true,
+                 JavaUtils.getClassLoader());
+         return (URIResolverHook) ReflectionUtils.newInstance(uriResolverClass, null);
+       } catch (Exception e) {
+         LOG.error("Exception loading uri resolver hook" + e);
+         return null;
+       }
+     }
+   }
+ 
+   /**
+    * Swaps the first element of the metastoreUris array with a random element from the
+    * remainder of the array.
+    */
+   private void promoteRandomMetaStoreURI() {
+     if (metastoreUris.length <= 1) {
+       return;
+     }
+     Random rng = new Random();
+     int index = rng.nextInt(metastoreUris.length - 1) + 1;
+     URI tmp = metastoreUris[0];
+     metastoreUris[0] = metastoreUris[index];
+     metastoreUris[index] = tmp;
+   }
+ 
+   @VisibleForTesting
+   public TTransport getTTransport() {
+     return transport;
+   }
+ 
+   @Override
+   public boolean isLocalMetaStore() {
+     return localMetaStore;
+   }
+ 
+   @Override
+   public boolean isCompatibleWith(Configuration conf) {
+     // Make a copy of currentMetaVars, there is a race condition that
+ 	// currentMetaVars might be changed during the execution of the method
+     Map<String, String> currentMetaVarsCopy = currentMetaVars;
+     if (currentMetaVarsCopy == null) {
+       return false; // recreate
+     }
+     boolean compatible = true;
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       // Since metaVars are all of different types, use string for comparison
+       String oldVar = currentMetaVarsCopy.get(oneVar.getVarname());
+       String newVar = MetastoreConf.getAsString(conf, oneVar);
+       if (oldVar == null ||
+           (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) {
+         LOG.info("Mestastore configuration " + oneVar.toString() +
+             " changed from " + oldVar + " to " + newVar);
+         compatible = false;
+       }
+     }
+     return compatible;
+   }
+ 
+   @Override
+   public void setHiveAddedJars(String addedJars) {
+     MetastoreConf.setVar(conf, ConfVars.ADDED_JARS, addedJars);
+   }
+ 
+   @Override
+   public void reconnect() throws MetaException {
+     if (localMetaStore) {
+       // For direct DB connections we don't yet support reestablishing connections.
+       throw new MetaException("For direct MetaStore DB connections, we don't support retries" +
+           " at the client level.");
+     } else {
+       close();
+ 
+       if (uriResolverHook != null) {
+         //for dynamic uris, re-lookup if there are new metastore locations
+         resolveUris();
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         // Swap the first element of the metastoreUris[] with a random element from the rest
+         // of the array. Rationale being that this method will generally be called when the default
+         // connection has died and the default connection is likely to be the first array element.
+         promoteRandomMetaStoreURI();
+       }
+       open();
+     }
+   }
+ 
+   @Override
+   public void alter_table(String dbname, String tbl_name, Table new_tbl) throws TException {
+     alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null);
+   }
+ 
+   @Override
+   public void alter_table(String defaultDatabaseName, String tblName, Table table,
+                           boolean cascade) throws TException {
+     EnvironmentContext environmentContext = new EnvironmentContext();
+     if (cascade) {
+       environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
+     }
+     alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext);
+   }
+ 
+   @Override
+   public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl,
+       EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
+     HiveMetaHook hook = getHook(new_tbl);
+     if (hook != null) {
+       hook.preAlterTable(new_tbl, envContext);
+     }
 -    client.alter_table_with_environment_context(prependCatalogToDbName(dbname, conf),
 -        tbl_name, new_tbl, envContext);
++    AlterTableRequest req = new AlterTableRequest(dbname, tbl_name, new_tbl);
++    req.setCatName(MetaStoreUtils.getDefaultCatalog(conf));
++    req.setEnvironmentContext(envContext);
++    client.alter_table_req(req);
+   }
+ 
+   @Override
+   public void alter_table(String catName, String dbName, String tblName, Table newTable,
+                          EnvironmentContext envContext) throws TException {
 -    client.alter_table_with_environment_context(prependCatalogToDbName(catName,
 -        dbName, conf), tblName, newTable, envContext);
++    // This never used to call the hook. Why? There's overload madness in metastore...
++    AlterTableRequest req = new AlterTableRequest(dbName, tblName, newTable);
++    req.setCatName(catName);
++    req.setEnvironmentContext(envContext);
++    client.alter_table_req(req);
++  }
++
++  @Override
++  public void alter_table(String catName, String dbName, String tbl_name, Table new_tbl,
++      EnvironmentContext envContext, long txnId, String validWriteIds)
++          throws InvalidOperationException, MetaException, TException {
++    HiveMetaHook hook = getHook(new_tbl);
++    if (hook != null) {
++      hook.preAlterTable(new_tbl, envContext);
++    }
++    AlterTableRequest req = new AlterTableRequest(dbName, tbl_name, new_tbl);
++    req.setCatName(catName);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIds);
++    req.setEnvironmentContext(envContext);
++    client.alter_table_req(req);
+   }
+ 
++  @Deprecated
+   @Override
+   public void renamePartition(final String dbname, final String tableName, final List<String> part_vals,
+                               final Partition newPart) throws TException {
 -    renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart);
++    renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart, -1, null);
+   }
+ 
+   @Override
+   public void renamePartition(String catName, String dbname, String tableName, List<String> part_vals,
 -                              Partition newPart) throws TException {
 -    client.rename_partition(prependCatalogToDbName(catName, dbname, conf), tableName, part_vals, newPart);
 -
++                              Partition newPart, long txnId, String validWriteIds) throws TException {
++    RenamePartitionRequest req = new RenamePartitionRequest(dbname, tableName, part_vals, newPart);
++    req.setCatName(catName);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIds);
++    client.rename_partition_req(req);
+   }
+ 
+   private void open() throws MetaException {
+     isConnected = false;
+     TTransportException tte = null;
+     boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL);
+     boolean useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL);
+     boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT);
+     boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL);
+     int clientSocketTimeout = (int) MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
+ 
+     for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
+       for (URI store : metastoreUris) {
+         LOG.info("Trying to connect to metastore with URI " + store);
+ 
+         try {
+           if (useSSL) {
+             try {
+               String trustStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_TRUSTSTORE_PATH).trim();
+               if (trustStorePath.isEmpty()) {
+                 throw new IllegalArgumentException(ConfVars.SSL_TRUSTSTORE_PATH.toString()
+                     + " Not configured for SSL connection");
+               }
+               String trustStorePassword =
+                   MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD);
+ 
+               // Create an SSL socket and connect
+               transport = SecurityUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout,
+                   trustStorePath, trustStorePassword );
+               LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet());
+             } catch(IOException e) {
+               throw new IllegalArgumentException(e);
+             } catch(TTransportException e) {
+               tte = e;
+               throw new MetaException(e.toString());
+             }
+           } else {
+             transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
+           }
+ 
+           if (useSasl) {
+             // Wrap thrift connection with SASL for secure connection.
+             try {
+               HadoopThriftAuthBridge.Client authBridge =
+                 HadoopThriftAuthBridge.getBridge().createClient();
+ 
+               // check if we should use delegation tokens to authenticate
+               // the call below gets hold of the tokens if they are set up by hadoop
+               // this should happen on the map/reduce tasks if the client added the
+               // tokens into hadoop's credential store in the front end during job
+               // submission.
+               String tokenSig = MetastoreConf.getVar(conf, ConfVars.TOKEN_SIGNATURE);
+               // tokenSig could be null
+               tokenStrForm = SecurityUtils.getTokenStrForm(tokenSig);
+ 
+               if(tokenStrForm != null) {
+                 LOG.info("HMSC::open(): Found delegation token. Creating DIGEST-based thrift connection.");
+                 // authenticate using delegation tokens via the "DIGEST" mechanism
+                 transport = authBridge.createClientTransport(null, store.getHost(),
+                     "DIGEST", tokenStrForm, transport,
+                         MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               } else {
+                 LOG.info("HMSC::open(): Could not find delegation token. Creating KERBEROS-based thrift connection.");
+                 String principalConfig =
+                     MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL);
+                 transport = authBridge.createClientTransport(
+                     principalConfig, store.getHost(), "KERBEROS", null,
+                     transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               }
+             } catch (IOException ioe) {
+               LOG.error("Couldn't create client transport", ioe);
+               throw new MetaException(ioe.toString());
+             }
+           } else {
+             if (useFramedTransport) {
+               transport = new TFramedTransport(transport);
+             }
+           }
+ 
+           final TProtocol protocol;
+           if (useCompactProtocol) {
+             protocol = new TCompactProtocol(transport);
+           } else {
+             protocol = new TBinaryProtocol(transport);
+           }
+           client = new ThriftHiveMetastore.Client(protocol);
+           try {
+             if (!transport.isOpen()) {
+               transport.open();
+               LOG.info("Opened a connection to metastore, current connections: " + connCount.incrementAndGet());
+             }
+             isConnected = true;
+           } catch (TTransportException e) {
+             tte = e;
+             if (LOG.isDebugEnabled()) {
+               LOG.warn("Failed to connect to the MetaStore Server...", e);
+             } else {
+               // Don't print full exception trace if DEBUG is not on.
+               LOG.warn("Failed to connect to the MetaStore Server...");
+             }
+           }
+ 
+           if (isConnected && !useSasl && MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)){
+             // Call set_ugi, only in unsecure mode.
+             try {
+               UserGroupInformation ugi = SecurityUtils.getUGI();
+               client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
+             } catch (LoginException e) {
+               LOG.warn("Failed to do login. set_ugi() is not successful, " +
+                        "Continuing without it.", e);
+             } catch (IOException e) {
+               LOG.warn("Failed to find ugi of client set_ugi() is not successful, " +
+                   "Continuing without it.", e);
+             } catch (TException e) {
+               LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. "
+                   + "Continuing without it.", e);
+             }
+           }
+         } catch (MetaException e) {
+           LOG.error("Unable to connect to metastore with URI " + store
+                     + " in attempt " + attempt, e);
+         }
+         if (isConnected) {
+           break;
+         }
+       }
+       // Wait before launching the next round of connection retries.
+       if (!isConnected && retryDelaySeconds > 0) {
+         try {
+           LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt.");
+           Thread.sleep(retryDelaySeconds * 1000);
+         } catch (InterruptedException ignore) {}
+       }
+     }
+ 
+     if (!isConnected) {
+       throw new MetaException("Could not connect to meta store using any of the URIs provided." +
+         " Most recent failure: " + StringUtils.stringifyException(tte));
+     }
+ 
+     snapshotActiveConf();
+ 
+     LOG.info("Connected to metastore.");
+   }
+ 
+   private void snapshotActiveConf() {
+     currentMetaVars = new HashMap<>(MetastoreConf.metaVars.length);
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       currentMetaVars.put(oneVar.getVarname(), MetastoreConf.getAsString(conf, oneVar));
+     }
+   }
+ 
+   @Override
+   public String getTokenStrForm() throws IOException {
+     return tokenStrForm;
+    }
+ 
+   @Override
+   public void close() {
+     isConnected = false;
+     currentMetaVars = null;
+     try {
+       if (null != client) {
+         client.shutdown();
+       }
+     } catch (TException e) {
+       LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e);
+     }
+     // Transport would have got closed via client.shutdown(), so we dont need this, but
+     // just in case, we make this call.
+     if ((transport != null) && transport.isOpen()) {
+       transport.close();
+       LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet());
+     }
+   }
+ 
+   @Override
+   public void setMetaConf(String key, String value) throws TException {
+     client.setMetaConf(key, value);
+   }
+ 
+   @Override
+   public String getMetaConf(String key) throws TException {
+     return client.getMetaConf(key);
+   }
+ 
+   @Override
+   public void createCatalog(Catalog catalog) throws TException {
+     client.create_catalog(new CreateCatalogRequest(catalog));
+   }
+ 
+   @Override
+   public void alterCatalog(String catalogName, Catalog newCatalog) throws TException {
+     client.alter_catalog(new AlterCatalogRequest(catalogName, newCatalog));
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catName) throws TException {
+     GetCatalogResponse rsp = client.get_catalog(new GetCatalogRequest(catName));
+     return rsp == null ? null : filterHook.filterCatalog(rsp.getCatalog());
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws TException {
+     GetCatalogsResponse rsp = client.get_catalogs();
+     return rsp == null ? null : filterHook.filterCatalogs(rsp.getNames());
+   }
+ 
+   @Override
+   public void dropCatalog(String catName) throws TException {
+     client.drop_catalog(new DropCatalogRequest(catName));
+   }
+ 
+   /**
+    * @param new_part
+    * @return the added partition
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition)
+    */
+   @Override
+   public Partition add_partition(Partition new_part) throws TException {
+     return add_partition(new_part, null);
+   }
+ 
+   public Partition add_partition(Partition new_part, EnvironmentContext envContext)
+       throws TException {
+     if (new_part != null && !new_part.isSetCatName()) {
+       new_part.setCatName(getDefaultCatalog(conf));
+     }
+     Partition p = client.add_partition_with_environment_context(new_part, envContext);
+     return deepCopy(p);
+   }
+ 
+   /**
+    * @param new_parts
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List)
+    */
+   @Override
+   public int add_partitions(List<Partition> new_parts) throws TException {
+     if (new_parts == null || new_parts.contains(null)) {
+       throw new MetaException("Partitions cannot be null.");
+     }
+     if (new_parts != null && !new_parts.isEmpty() && !new_parts.get(0).isSetCatName()) {
+       final String defaultCat = getDefaultCatalog(conf);
+       new_parts.forEach(p -> p.setCatName(defaultCat));
+     }
+     return client.add_partitions(new_parts);
+   }
+ 
+   @Override
+   public List<Partition> add_partitions(
+       List<Partition> parts, boolean ifNotExists, boolean needResults) throws TException {
+     if (parts == null || parts.contains(null)) {
+       throw new MetaException("Partitions cannot be null.");
+     }
+     if (parts.isEmpty()) {
+       return needResults ? new ArrayList<>() : null;
+     }
+     Partition part = parts.get(0);
+     // Have to set it for each partition too
+     if (!part.isSetCatName()) {
+       final String defaultCat = getDefaultCatalog(conf);
+       parts.forEach(p -> p.setCatName(defaultCat));
+     }
+     AddPartitionsRequest req = new AddPartitionsRequest(
+         part.getDbName(), part.getTableName(), parts, ifNotExists);
+     req.setCatName(part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf));
+     req.setNeedResult(needResults);
+     AddPartitionsResult result = client.add_partitions_req(req);
+     return needResults ? filterHook.filterPartitions(result.getPartitions()) : null;
+   }
+ 
+   @Override
+   public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException {
+     if (partitionSpec == null) {
+       throw new MetaException("PartitionSpec cannot be null.");
+     }
+     if (partitionSpec.getCatName() == null) {
+       partitionSpec.setCatName(getDefaultCatalog(conf));
+     }
+     return client.add_partitions_pspec(partitionSpec.toPartitionSpec());
+   }
+ 
+   @Override
+   public Partition appendPartition(String db_name, String table_name,
+       List<String> part_vals) throws TException {
+     return appendPartition(getDefaultCatalog(conf), db_name, table_name, part_vals);
+   }
+ 
+   @Override
+   public Partition appendPartition(String dbName, String tableName, String partName)
+       throws TException {
+     return appendPartition(getDefaultCatalog(conf), dbName, tableName, partName);
+   }
+ 
+   @Override
+   public Partition appendPartition(String catName, String dbName, String tableName,
+                                    String name) throws TException {
+     Partition p = client.append_partition_by_name(prependCatalogToDbName(
+         catName, dbName, conf), tableName, name);
+     return deepCopy(p);
+   }
+ 
+   @Override
+   public Partition appendPartition(String catName, String dbName, String tableName,
+                                    List<String> partVals) throws TException {
+     Partition p = client.append_partition(prependCatalogToDbName(
+         catName, dbName, conf), tableName, partVals);
+     return deepCopy(p);
+   }
+ 
+   @Deprecated
+   public Partition appendPartition(String dbName, String tableName, List<String> partVals,
+                                    EnvironmentContext ec) throws TException {
+     return client.append_partition_with_environment_context(prependCatalogToDbName(dbName, conf),
+         tableName, partVals, ec).deepCopy();
+   }
+ 
+   /**
+    * Exchange the partition between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partition after exchanging
+    */
+   @Override
+   public Partition exchange_partition(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws TException {
+     return exchange_partition(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable,
+         getDefaultCatalog(conf), destDb, destinationTableName);
+   }
+ 
+   @Override
+   public Partition exchange_partition(Map<String, String> partitionSpecs, String sourceCat,
+                                       String sourceDb, String sourceTable, String destCat,
+                                       String destDb, String destTableName) throws TException {
+     return client.exchange_partition(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf),
+         sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName);
+   }
+ 
+   /**
+    * Exchange the partitions between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partitions after exchanging
+    */
+   @Override
+   public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws TException {
+     return exchange_partitions(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable,
+         getDefaultCatalog(conf), destDb, destinationTableName);
+   }
+ 
+   @Override
++  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
++      String dbName, String tableName, List<String> partNames, List<String> colNames,
++      long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName,
++        partNames, colNames, txnId, validWriteIdList);
++  }
++
++  @Override
++  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
++      String catName, String dbName, String tableName, List<String> partNames,
++      List<String> colNames, long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames,
++        partNames);
++    rqst.setCatName(catName);
++    rqst.setTxnId(txnId);
++    rqst.setValidWriteIdList(validWriteIdList);
++    return client.get_partitions_statistics_req(rqst).getPartStats();
++  }
++
++  @Override
++  public AggrStats getAggrColStatsFor(String dbName, String tblName, List<String> colNames,
++      List<String> partNames, long txnId, String writeIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames,
++        partNames, txnId, writeIdList);  }
++
++  @Override
++  public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List<String> colNames,
++      List<String> partNames, long txnId, String writeIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    if (colNames.isEmpty() || partNames.isEmpty()) {
++      LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side.");
++      return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
++    }
++    PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames);
++    req.setCatName(catName);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(writeIdList);
++    return client.get_aggr_stats_for(req);
++  }
++
++  @Override
+   public List<Partition> exchange_partitions(Map<String, String> partitionSpecs, String sourceCat,
+                                              String sourceDb, String sourceTable, String destCat,
+                                              String destDb, String destTableName) throws TException {
+     return client.exchange_partitions(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf),
+         sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName);
+   }
+ 
+   @Override
+   public void validatePartitionNameCharacters(List<String> partVals)
+       throws TException, MetaException {
+     client.partition_name_has_valid_characters(partVals, true);
+   }
+ 
+   /**
+    * Create a new Database
+    * @param db
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database)
+    */
+   @Override
+   public void createDatabase(Database db)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
+     if (!db.isSetCatalogName()) {
+       db.setCatalogName(getDefaultCatalog(conf));
+     }
+     client.create_database(db);
+   }
+ 
+   /**
+    * @param tbl
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
+    */
+   @Override
+   public void createTable(Table tbl) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     createTable(tbl, null);
+   }
+ 
+   public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     if (!tbl.isSetCatName()) {
+       tbl.setCatName(getDefaultCatalog(conf));
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       create_table_with_environment_context(tbl, envContext);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     }
+     finally {
+       if (!success && (hook != null)) {
+         try {
+           hook.rollbackCreateTable(tbl);
+         } catch (Exception e){
+           LOG.error("Create rollback failed with", e);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public void createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, InvalidObjectException,
+         MetaException, NoSuchObjectException, TException {
+ 
+     if (!tbl.isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       tbl.setCatName(defaultCat);
+       if (primaryKeys != null) {
+         primaryKeys.forEach(pk -> pk.setCatName(defaultCat));
+       }
+       if (foreignKeys != null) {
+         foreignKeys.forEach(fk -> fk.setCatName(defaultCat));
+       }
+       if (uniqueConstraints != null) {
+         uniqueConstraints.forEach(uc -> uc.setCatName(defaultCat));
+       }
+       if (notNullConstraints != null) {
+         notNullConstraints.forEach(nn -> nn.setCatName(defaultCat));
+       }
+       if (defaultConstraints != null) {
+         defaultConstraints.forEach(def -> def.setCatName(defaultCat));
+       }
+       if (checkConstraints != null) {
+         checkConstraints.forEach(cc -> cc.setCatName(defaultCat));
+       }
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       client.create_table_with_constraints(tbl, primaryKeys, foreignKeys,
+           uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackCreateTable(tbl);
+       }
+     }
+   }
+ 
+   @Override
+   public void dropConstraint(String dbName, String tableName, String constraintName)
+       throws TException {
+     dropConstraint(getDefaultCatalog(conf), dbName, tableName, constraintName);
+   }
+ 
+   @Override
+   public void dropConstraint(String catName, String dbName, String tableName, String constraintName)
+       throws TException {
+     DropConstraintRequest rqst = new DropConstraintRequest(dbName, tableName, constraintName);
+     rqst.setCatName(catName);
+     client.drop_constraint(rqst);
+   }
+ 
+   @Override
+   public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols) throws TException {
+     if (!primaryKeyCols.isEmpty() && !primaryKeyCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       primaryKeyCols.forEach(pk -> pk.setCatName(defaultCat));
+     }
+     client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols));
+   }
+ 
+   @Override
+   public void addForeignKey(List<SQLForeignKey> foreignKeyCols) throws TException {
+     if (!foreignKeyCols.isEmpty() && !foreignKeyCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       foreignKeyCols.forEach(fk -> fk.setCatName(defaultCat));
+     }
+     client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols));
+   }
+ 
+   @Override
+   public void addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     if (!uniqueConstraintCols.isEmpty() && !uniqueConstraintCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       uniqueConstraintCols.forEach(uc -> uc.setCatName(defaultCat));
+     }
+     client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols));
+   }
+ 
+   @Override
+   public void addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     if (!notNullConstraintCols.isEmpty() && !notNullConstraintCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       notNullConstraintCols.forEach(nn -> nn.setCatName(defaultCat));
+     }
+     client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols));
+   }
+ 
+   @Override
+   public void addDefaultConstraint(List<SQLDefaultConstraint> defaultConstraints) throws
+       NoSuchObjectException, MetaException, TException {
+     if (!defaultConstraints.isEmpty() && !defaultConstraints.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       defaultConstraints.forEach(def -> def.setCatName(defaultCat));
+     }
+     client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints));
+   }
+ 
+   @Override
+   public void addCheckConstraint(List<SQLCheckConstraint> checkConstraints) throws
+       NoSuchObjectException, MetaException, TException {
+     if (!checkConstraints.isEmpty() && !checkConstraints.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       checkConstraints.forEach(cc -> cc.setCatName(defaultCat));
+     }
+     client.add_check_constraint(new AddCheckConstraintRequest(checkConstraints));
+   }
+ 
+   /**
+    * @param type
+    * @return true or false
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type)
+    */
+   public boolean createType(Type type) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, TException {
+     return client.create_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @throws NoSuchObjectException
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean)
+    */
+   @Override
+   public void dropDatabase(String name)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(getDefaultCatalog(conf), name, true, false, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, cascade);
+   }
+ 
+   @Override
+   public void dropDatabase(String catalogName, String dbName, boolean deleteData,
+                            boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     try {
+       getDatabase(catalogName, dbName);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownDb) {
+         throw e;
+       }
+       return;
+     }
+ 
+     String dbNameWithCatalog = prependCatalogToDbName(catalogName, dbName, conf);
+ 
+     if (cascade) {
+       // Note that this logic may drop some of the tables of the database
+       // even if the drop database fail for any reason
+       // TODO: Fix this
+       List<String> materializedViews = getTables(dbName, ".*", TableType.MATERIALIZED_VIEW);
+       for (String table : materializedViews) {
+         // First we delete the materialized views
+         dropTable(dbName, table, deleteData, true);
+       }
+ 
+       /**
+        * When dropping db cascade, client side hooks have to be called at each table removal.
+        * If {@link org.apache.hadoop.hive.metastore.conf.MetastoreConf#ConfVars.BATCH_RETRIEVE_MAX
+        * BATCH_RETRIEVE_MAX} is less than the number of tables in the DB, we'll have to call the
+        * hooks one by one each alongside with a
+        * {@link #dropTable(String, String, boolean, boolean, EnvironmentContext) dropTable} call to
+        * ensure transactionality.
+        */
+       List<String> tableNameList = getAllTables(dbName);
+       int tableCount = tableNameList.size();
+       int maxBatchSize = MetastoreConf.getIntVar(conf, ConfVars.BATCH_RETRIEVE_MAX);
+       LOG.debug("Selecting dropDatabase method for " + dbName + " (" + tableCount + " tables), " +
+              ConfVars.BATCH_RETRIEVE_MAX.getVarname() + "=" + maxBatchSize);
+ 
+       if (tableCount > maxBatchSize) {
+         LOG.debug("Dropping database in a per table batch manner.");
+         dropDatabaseCascadePerTable(catalogName, dbName, tableNameList, deleteData, maxBatchSize);
+       } else {
+         LOG.debug("Dropping database in a per DB manner.");
+         dropDatabaseCascadePerDb(catalogName, dbName, tableNameList, deleteData);
+       }
+ 
+     } else {
+       client.drop_database(dbNameWithCatalog, deleteData, cascade);
+     }
+   }
+ 
+   /**
+    * Handles dropDatabase by invoking drop_table in HMS for each table.
+    * Useful when table list in DB is too large to fit in memory. It will retrieve tables in
+    * chunks and for each table with a drop_table hook it will invoke drop_table on both HMS and
+    * the hook. This is a timely operation so hookless tables are skipped and will be dropped on
+    * server side when the client invokes drop_database.
+    * Note that this is 'less transactional' than dropDatabaseCascadePerDb since we're dropping
+    * table level objects, so the overall outcome of this method might result in a halfly dropped DB.
+    * @param catName
+    * @param dbName
+    * @param tableList
+    * @param deleteData
+    * @param maxBatchSize
+    * @throws TException
+    */
+   private void dropDatabaseCascadePerTable(String catName, String dbName, List<String> tableList,
+                                            boolean deleteData, int maxBatchSize) throws TException {
+     String dbNameWithCatalog = prependCatalogToDbName(catName, dbName, conf);
+     for (Table table : new TableIterable(this, catName, dbName, tableList, maxBatchSize)) {
+       boolean success = false;
+       HiveMetaHook hook = getHook(table);
+       if (hook == null) {
+         continue;
+       }
+       try {
+         hook.preDropTable(table);
+         client.drop_table_with_environment_context(dbNameWithCatalog, table.getTableName(), deleteData, null);
+         hook.commitDropTable(table, deleteData);
+         success = true;
+       } finally {
+         if (!success) {
+           hook.rollbackDropTable(table);
+         }
+       }
+     }
+     client.drop_database(dbNameWithCatalog, deleteData, true);
+   }
+ 
+   /**
+    * Handles dropDatabase by invoking drop_database in HMS.
+    * Useful when table list in DB can fit in memory, it will retrieve all tables at once and
+    * call drop_database once. Also handles drop_table hooks.
+    * @param catName
+    * @param dbName
+    * @param tableList
+    * @param deleteData
+    * @throws TException
+    */
+   private void dropDatabaseCascadePerDb(String catName, String dbName, List<String> tableList,
+                                         boolean deleteData) throws TException {
+     String dbNameWithCatalog = prependCatalogToDbName(catName, dbName, conf);
+     List<Table> tables = getTableObjectsByName(catName, dbName, tableList);
+     boolean success = false;
+     try {
+       for (Table table : tables) {
+         HiveMetaHook hook = getHook(table);
+         if (hook == null) {
+           continue;
+         }
+         hook.preDropTable(table);
+       }
+       client.drop_database(dbNameWithCatalog, deleteData, true);
+       for (Table table : tables) {
+         HiveMetaHook hook = getHook(table);
+         if (hook == null) {
+           continue;
+         }
+         hook.commitDropTable(table, deleteData);
+       }
+       success = true;
+     } finally {
+       if (!success) {
+         for (Table table : tables) {
+           HiveMetaHook hook = getHook(table);
+           if (hook == null) {
+             continue;
+           }
+           hook.rollbackDropTable(table);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData)
+       throws TException {
+     return dropPartition(getDefaultCatalog(conf), dbName, tableName, partName, deleteData);
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String db_name, String tbl_name, String name,
+                                boolean deleteData) throws TException {
+     return client.drop_partition_by_name_with_environment_context(prependCatalogToDbName(
+         catName, db_name, conf), tbl_name, name, deleteData, null);
+   }
+ 
+   private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() {
+     Map<String, String> warehouseOptions = new HashMap<>();
+     warehouseOptions.put("ifPurge", "TRUE");
+     return new EnvironmentContext(warehouseOptions);
+   }
+ 
+   // A bunch of these are in HiveMetaStoreClient but not IMetaStoreClient.  I have marked these
+   // as deprecated and not updated them for the catalogs.  If we really want to support them we
+   // should add them to IMetaStoreClient.
+ 
+   @Deprecated
+   public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+       EnvironmentContext env_context) throws TException {
+     return client.drop_partition_with_environment_context(prependCatalogToDbName(db_name, conf),
+         tbl_name, part_vals, true, env_context);
+   }
+ 
+   @Deprecated
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean dropData,
+                                EnvironmentContext ec) throws TException {
+     return client.drop_partition_by_name_with_environment_context(prependCatalogToDbName(dbName, conf),
+         tableName, partName, dropData, ec);
+   }
+ 
+   @Deprecated
+   public boolean dropPartition(String dbName, String tableName, List<String> partVals)
+       throws TException {
+     return client.drop_partition(prependCatalogToDbName(dbName, conf), tableName, partVals, true);
+   }
+ 
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, boolean deleteData) throws TException {
+     return dropPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals,
+         PartitionDropOptions.instance().deleteData(deleteData));
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String db_name, String tbl_name,
+                                List<String> part_vals, boolean deleteData) throws TException {
+     return dropPartition(catName, db_name, tbl_name, part_vals, PartitionDropOptions.instance()
+             .deleteData(deleteData));
+   }
+ 
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+                                List<String> part_vals, PartitionDropOptions options) throws TException {
+     return dropPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals, options);
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String db_name, String tbl_name,
+                                List<String> part_vals, PartitionDropOptions options)
+       throws TException {
+     if (options == null) {
+       options = PartitionDropOptions.instance();
+     }
+     if (part_vals != null) {
+       for (String partVal : part_vals) {
+         if (partVal == null) {
+           throw new MetaException("The partition value must not be null.");
+         }
+       }
+     }
+     return client.drop_partition_with_environment_context(prependCatalogToDbName(
+         catName, db_name, conf), tbl_name, part_vals, options.deleteData,
+         options.purgeData ? getEnvironmentContextWithIfPurgeSet() : null);
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+                                         List<ObjectPair<Integer, byte[]>> partExprs,
+                                         PartitionDropOptions options)
+       throws TException {
+     return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs, options);
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
+ 
+     return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists)
+                                               .returnResults(needResult));
+ 
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists) throws NoSuchObjectException, MetaException, TException {
+     // By default, we need the results from dropPartitions();
+     return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists));
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String catName, String dbName, String tblName,
+                                         List<ObjectPair<Integer, byte[]>> partExprs,
+                                         PartitionDropOptions options) throws TException {
+     RequestPartsSpec rps = new RequestPartsSpec();
+     List<DropPartitionsExpr> exprs = new ArrayList<>(partExprs.size());
+     for (ObjectPair<Integer, byte[]> partExpr : partExprs) {
+       DropPartitionsExpr dpe = new DropPartitionsExpr();
+       dpe.setExpr(partExpr.getSecond());
+       dpe.setPartArchiveLevel(partExpr.getFirst());
+       exprs.add(dpe);
+     }
+     rps.setExprs(exprs);
+     DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
+     req.setCatName(catName);
+     req.setDeleteData(options.deleteData);
+     req.setNeedResult(options.returnResults);
+     req.setIfExists(options.ifExists);
+     if (options.purgeData) {
+       LOG.info("Dropped partitions will be purged!");
+       req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet());
+     }
+     return client.drop_partitions_req(req).getPartitions();
+   }
+ 
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     dropTable(getDefaultCatalog(conf), dbname, name, deleteData, ignoreUnknownTab, null);
+   }
+ 
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, boolean ifPurge) throws TException {
+     dropTable(getDefaultCatalog(conf), dbname, name, deleteData, ignoreUnknownTab, ifPurge);
+   }
+ 
+   @Override
+   public void dropTable(String dbname, String name) throws TException {
+     dropTable(getDefaultCatalog(conf), dbname, name, true, true, null);
+   }
+ 
+   @Override
+   public void dropTable(String catName, String dbName, String tableName, boolean deleteData,
+                         boolean ignoreUnknownTable, boolean ifPurge) throws TException {
+     //build new environmentContext with ifPurge;
+     EnvironmentContext envContext = null;
+     if(ifPurge){
+       Map<String, String> warehouseOptions;
+       warehouseOptions = new HashMap<>();
+       warehouseOptions.put("ifPurge", "TRUE");
+       envContext = new EnvironmentContext(warehouseOptions);
+     }
+     dropTable(catName, dbName, tableName, deleteData, ignoreUnknownTable, envContext);
+ 
+   }
+ 
+   /**
+    * Drop the table and choose whether to: delete the underlying table data;
+    * throw if the table doesn't exist; save the data in the trash.
+    *
+    * @param catName catalog name
+    * @param dbname database name
+    * @param name table name
+    * @param deleteData
+    *          delete the underlying data or just delete the table in metadata
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @param envContext
+    *          for communicating with thrift
+    * @throws MetaException
+    *           could not drop table properly
+    * @throws NoSuchObjectException
+    *           the table wasn't found
+    * @throws TException
+    *           a thrift communication error occurred
+    * @throws UnsupportedOperationException
+    *           dropping an index table is not allowed
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
+    *      java.lang.String, boolean)
+    */
+   public void dropTable(String catName, String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     Table tbl;
+     try {
+       tbl = getTable(catName, dbname, name);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+       return;
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preDropTable(tbl);
+     }
+     boolean success = false;
+     try {
+       drop_table_with_environment_context(catName, dbname, name, deleteData, envContext);
+       if (hook != null) {
+         hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge"))));
+       }
+       success=true;
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackDropTable(tbl);
+       }
+     }
+   }
+ 
+   @Override
++  public void truncateTable(String dbName, String tableName, List<String> partNames,
++      long txnId, String validWriteIds, long writeId) throws TException {
++    truncateTableInternal(getDefaultCatalog(conf),
++        dbName, tableName, partNames, txnId, validWriteIds, writeId);
++  }
++
++  @Override
+   public void truncateTable(String dbName, String tableName, List<String> partNames) throws TException {
 -    truncateTable(getDefaultCatalog(conf), dbName, tableName, partNames);
++    truncateTableInternal(getDefaultCatalog(conf), dbName, tableName, partNames, -1, null, -1);
+   }
+ 
+   @Override
+   public void truncateTable(String catName, String dbName, String tableName, List<String> partNames)
+       throws TException {
 -    client.truncate_table(prependCatalogToDbName(catName, dbName, conf), tableName, partNames);
++    truncateTableInternal(catName, dbName, tableName, partNames, -1, null, -1);
++  }
++
++  private void truncateTableInternal(String catName, String dbName, String tableName,
++      List<String> partNames, long txnId, String validWriteIds, long writeId)
++          throws MetaException, TException {
++    TruncateTableRequest req = new TruncateTableRequest(
++        prependCatalogToDbName(catName, dbName, conf), tableName);
++    req.setPartNames(partNames);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIds);
++    req.setWriteId(writeId);
++    client.truncate_table_req(req);
+   }
+ 
+   /**
+    * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
+    *
+    * @param request Inputs for path of the data files to be recycled to cmroot and
+    *                isPurge flag when set to true files which needs to be recycled are not moved to Trash
+    * @return Response which is currently void
+    */
+   @Override
+   public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException {
+     return client.cm_recycle(request);
+   }
+ 
+   /**
+    * @param type
+    * @return true if the type is dropped
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String)
+    */
+   public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException {
+     return client.drop_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @return map of types
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String)
+    */
+   public Map<String, Type> getTypeAll(String name) throws MetaException,
+       TException {
+     Map<String, Type> result = null;
+     Map<String, Type> fromClient = client.get_type_all(name);
+     if (fromClient != null) {
+       result = new LinkedHashMap<>();
+       for (String key : fromClient.keySet()) {
+         result.put(key, deepCopy(fromClient.get(key)));
+       }
+     }
+     return result;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String databasePattern) throws TException {
+     return getDatabases(getDefaultCatalog(conf), databasePattern);
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String databasePattern) throws TException {
+     return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName(
+         catName, databasePattern, conf)));
+   }
+ 
+   @Override
+   public List<String> getAllDatabases() throws TException {
+     return getAllDatabases(getDefaultCatalog(conf));
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws TException {
+     return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName(catName, null, conf)));
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name, short max_parts)
+       throws TException {
+     return listPartitions(getDefaultCatalog(conf), db_name, tbl_name, max_parts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String catName, String db_name, String tbl_name,
+                                         int max_parts) throws TException {
+     List<Partition> parts = client.get_partitions(prependCatalogToDbName(catName, db_name, conf),
+         tbl_name, shrinkMaxtoShort(max_parts));
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException {
+     return listPartitionSpecs(getDefaultCatalog(conf), dbName, tableName, maxParts);
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName,
+                                                int maxParts) throws TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts)));
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name,
+                                         List<String> part_vals, short max_parts) throws TException {
+     return listPartitions(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String catName, String db_name, String tbl_name,
+                                         List<String> part_vals, int max_parts) throws TException {
+     List<Partition> parts = client.get_partitions_ps(prependCatalogToDbName(catName, db_name, conf),
+         tbl_name, part_vals, shrinkMaxtoShort(max_parts));
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name, String tbl_name,
+                                                     short max_parts, String user_name,
+                                                     List<String> group_names) throws TException {
+     return listPartitionsWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, max_parts, user_name,
+         group_names);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                                     int maxParts, String userName,
+                                                     List<String> groupNames) throws TException {
+     List<Partition> parts = client.get_partitions_with_auth(prependCatalogToDbName(catName,
+         dbName, conf), tableName, shrinkMaxtoShort(maxParts), userName, groupNames);
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name, String tbl_name,
+                                                     List<String> part_vals, short max_parts,
+                                                     String user_name, List<String> group_names)
+       throws TException {
+     return listPartitionsWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts,
+         user_name, group_names);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                                     List<String> partialPvals, int maxParts,
+                                                     String userName, List<String> groupNames)
+       throws TException {
+     List<Partition> parts = client.get_partitions_ps_with_auth(prependCatalogToDbName(catName,
+         dbName, conf), tableName, partialPvals, shrinkMaxtoShort(maxParts), userName, groupNames);
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+       String filter, short max_parts) throws TException {
+     return listPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter, max_parts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsByFilter(String catName, String db_name, String tbl_name,
+                                                 String filter, int max_parts) throws TException {
+     List<Partition> parts =client.get_partitions_by_filter(prependCatalogToDbName(
+         catName, db_name, conf), tbl_name, filter, shrinkMaxtoShort(max_parts));
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                        String filter, int max_parts)
+       throws TException {
+     return listPartitionSpecsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter, max_parts);
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name,
+                                                        String tbl_name, String filter,
+                                                        int max_parts) throws TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_part_specs_by_filter(prependCatalogToDbName(catName, db_name, conf), tbl_name, filter,
+             max_parts)));
+   }
+ 
+   @Override
+   public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr,
+                                       String default_partition_name, short max_parts,
+                                       List<Partition> result) throws TException {
+     return listPartitionsByExpr(getDefaultCatalog(conf), db_name, tbl_name, expr,
+         default_partition_name, max_parts, result);
+   }
+ 
+   @Override
+   public boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr,
+       String default_partition_name, int max_parts, List<Partition> result)
+           throws TException {
+     assert result != null;
+     PartitionsByExprRequest req = new PartitionsByExprRequest(
+         db_name, tbl_name, ByteBuffer.wrap(expr));
+     if (default_partition_name != null) {
+       req.setDefaultPartitionName(default_partition_name);
+     }
+     if (max_parts >= 0) {
+       req.setMaxParts(shrinkMaxtoShort(max_parts));
+     }
+     PartitionsByExprResult r;
+     try {
+       r = client.get_partitions_by_expr(req);
+     } catch (TApplicationException te) {
+       // TODO: backward compat for Hive <= 0.12. Can be removed later.
+       if (te.getType() != TApplicationException.UNKNOWN_METHOD
+           && te.getType() != TApplicationException.WRONG_METHOD_NAME) {
+         throw te;
+       }
+       throw new IncompatibleMetastoreException(
+           "Metastore doesn't support listPartitionsByExpr: " + te.getMessage());
+     }
+     r.setPartitions(filterHook.filterPartitions(r.getPartitions()));
+     // TODO: in these methods, do we really need to deepcopy?
+     deepCopyPartitions(r.getPartitions(), result);
+     return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst.
+   }
+ 
+   @Override
+   public Database getDatabase(String name) throws TException {
+     return getDatabase(getDefaultCatalog(conf), name);
+   }
+ 
+   @Override
+   public Database getDatabase(String catalogName, String databaseName) throws TException {
+     Database d = client.get_database(prependCatalogToDbName(catalogName, databaseName, conf));
+     return deepCopy(filterHook.filterDatabase(d));
+   }
+ 
+   @Override
+   public Partition getPartition(String db_name, String tbl_name, List<String> part_vals)
+       throws TException {
+     return getPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals);
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tblName,
+                                 List<String> partVals) throws TException {
+     Partition p = client.get_partition(prependCatalogToDbName(catName, dbName, conf), tblName, partVals);
+     return deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String db_name, String tbl_name,
+       List<String> part_names) throws TException {
+     return getPartitionsByNames(getDefaultCatalog(conf), db_name, tbl_name, part_names);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String db_name, String tbl_name,
+                                               List<String> part_names) throws TException {
+     List<Partition> parts =
+         client.get_partitions_by_names(prependCatalogToDbName(catName, db_name, conf), tbl_name, part_names);
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+       throws MetaException, TException, NoSuchObjectException {
+     if (!request.isSetCatName()) {
+       request.setCatName(getDefaultCatalog(conf));
+     }
+     return client.get_partition_values(request);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuthInfo(String db_name, String tbl_name,
+       List<String> part_vals, String user_name, List<String> group_names)
+       throws TException {
+     return getPartitionWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, part_vals,
+         user_name, group_names);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName,
+                                             List<String> pvals, String userName,
+                                             List<String> groupNames) throws TException {
+     Partition p = client.get_partition_with_auth(prependCatalogToDbName(catName, dbName, conf), tableName,
+         pvals, userName, groupNames);
+     return deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   @Override
+   public Table getTable(String dbname, String name) throws TException {
+     return getTable(getDefaultCatalog(conf), dbname, name);
+   }
+ 
+   @Override
++  public Table getTable(String dbname, String name,
++                 long txnId, String validWriteIdList)
++      throws MetaException, TException, NoSuchObjectException{
++    return getTable(getDefaultCatalog(conf), dbname, name,
++        txnId, validWriteIdList);
++  };
++
++  @Override
+   public Table getTable(String catName, String dbName, String tableName) throws TException {
+     GetTableRequest req = new GetTableRequest(dbName, tableName);
+     req.setCatName(catName);
+     req.setCapabilities(version);
+     Table t = client.get_table_req(req).getTable();
+     return deepCopy(filterHook.filterTable(t));
+   }
+ 
+   @Override
++  public Table getTable(String catName, String dbName, String tableName,
++    long txnId, String validWriteIdList) throws TException {
++    GetTableRequest req = new GetTableRequest(dbName, tableName);
++    req.setCatName(catName);
++    req.setCapabilities(version);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIdList);
++    Table t = client.get_table_req(req).getTable();
++    return deepCopy(filterHook.filterTable(t));
++  }
++
++  @Override
+   public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+       throws TException {
+     return getTableObjectsByName(getDefaultCatalog(conf), dbName, tableNames);
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbName,
+                                            List<String> tableNames) throws TException {
+     GetTablesRequest req = new GetTablesRequest(dbName);
+     req.setCatName(catName);
+     req.setTblNames(tableNames);
+     req.setCapabilities(version);
+     List<Table> tabs = client.get_table_objects_by_name_req(req).getTables();
+     return deepCopyTables(filterHook.filterTables(tabs));
+   }
+ 
+   @Override
+   public Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     return client.get_materialization_invalidation_info(cm, validTxnList);
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     client.update_creation_metadata(getDefaultCatalog(conf), dbName, tableName, cm);
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbName, String tableName,
+                                      CreationMetadata cm) throws MetaException, TException {
+     client.update_creation_metadata(catName, dbName, tableName, cm);
+ 
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+       throws TException {
+     return listTableNamesByFilter(getDefaultCatalog(conf), dbName, filter, maxTables);
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+                                              int maxTables) throws TException {
+     return filterHook.filterTableNames(catName, dbName,
+         client.get_table_names_by_filter(prependCatalogToDbName(catName, dbName, conf), filter,
+             shrinkMaxtoShort(maxTables)));
+   }
+ 
+   /**
+    * @param name
+    * @return the type
+    * @throws MetaException
+    * @throws TException
+    * @throws NoSuchObjectException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String)
+    */
+   public Type getType(String name) throws NoSuchObjectException, MetaException, TException {
+     return deepCopy(client.get_type(name));
+   }
+ 
+   @Override
+   public List<String> getTables(String dbname, String tablePattern) throws MetaException {
+     try {
+       return getTables(getDefaultCatalog(conf), dbname, tablePattern);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String tablePattern)
+       throws TException {
+     return filterHook.filterTableNames(catName, dbName,
+         client.get_tables(prependCatalogToDbName(catName, dbName, conf), tablePattern));
+   }
+ 
+   @Override
+   public List<String> getTables(String dbname, String tablePattern, TableType tableType) throws MetaException {
+     try {
+       return getTables(getDefaultCatalog(conf), dbname, tablePattern, tableType);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String tablePattern,
+                                 TableType tableType) throws TException {
+     return filterHook.filterTableNames(catName, dbName,
+         client.get_tables_by_type(prependCatalogToDbName(catName, dbName, conf), tablePattern,
+             tableType.toString()));
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String dbName) throws TException {
+     return getMaterializedViewsForRewriting(getDefaultCatalog(conf), dbName);
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbname)
+       throws MetaException {
+     try {
+       return filterHook.filterTableNames(catName, dbname,
+           client.get_materialized_views_for_rewriting(prependCatalogToDbName(catName, dbname, conf)));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+       throws MetaException {
+     try {
+       return getTableMeta(getDefaultCatalog(conf), dbPatterns, tablePatterns, tableTypes);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbPatterns, String tablePatterns,
+                                       List<String> tableTypes) throws TException {
+     return filterHook.filterTableMetas(client.get_table_meta(prependCatalogToDbName(
+         catName, dbPatterns, conf), tablePatterns, tableTypes));
+   }
+ 
+   @Override
+   public List<String> getAllTables(String dbname) throws MetaException {
+     try {
+       return getAllTables(getDefaultCatalog(conf), dbname);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws TException {
+     return filterHook.filterTableNames(catName, dbName, client.get_all_tables(
+         prependCatalogToDbName(catName, dbName, conf)));
+   }
+ 
+   @Override
+   public boolean tableExists(String databaseName, String tableName) throws TException {
+     return tableExists(getDefaultCatalog(conf), databaseName, tableName);
+   }
+ 
+   @Override
+   public boolean tableExists(String catName, String dbName, String tableName) throws TException {
+     try {
+       GetTableRequest req = new GetTableRequest(dbName, tableName);
+       req.setCatName(catName);
+       req.setCapabilities(version);
+       return filterHook.filterTable(client.get_table_req(req).getTable()) != null;
+     } catch (NoSuchObjectException e) {
+       return false;
+     }
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String dbName, String tblName,
+       short max) throws NoSuchObjectException, MetaException, TException {
+     return listPartitionNames(getDefaultCatalog(conf), dbName, tblName, max);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tableName,
+                                          int maxParts) throws TException {
+     return filterHook.filterPartitionNames(catName, dbName, tableName,
+         client.get_partition_names(prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts)));
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts) throws TException {
+     return listPartitionNames(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String db_name, String tbl_name,
+                                          List<String> part_vals, int max_parts) throws TException {
+     return filterHook.filterPartitionNames(catName, db_name, tbl_name,
+         client.get_partition_names_ps(prependCatalogToDbName(catName, db_name, conf), tbl_name,
+             part_vals, shrinkMaxtoShort(max_parts)));
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String db_name, String tbl_name,
+                                       String filter) throws TException {
+     return getNumPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter);
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tableName,
+                                       String filter) throws TException {
+     return client.get_num_partitions_by_filter(prependCatalogToDbName(catName, dbName, conf), tableName,
+         filter);
+   }
+ 
+   @Override
+   public void alter_partition(String dbName, String tblName, Partition newPart)
+       throws InvalidOperationException, MetaException, TException {
+     alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, null);
+   }
+ 
+   @Override
 -  public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
++  public void alter_partition(String dbName, String tblName, Partition newPart,
++      EnvironmentContext environmentContext)
+       throws InvalidOperationException, MetaException, TException {
+     alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, environmentContext);
+   }
+ 
+   @Override
+   public void alter_partition(String catName, String dbName, String tblName, Partition newPart,
+                               EnvironmentContext environmentContext) throws TException {
 -    client.alter_partition_with_environment_context(prependCatalogToDbName(catName, dbName, conf), tblName,
 -        newPart, environmentContext);
++    AlterPartitionsRequest req = new AlterPartitionsRequest(dbName, tblName, Lists.newArrayList(newPart));
++    req.setCatName(catName);
++    req.setEnvironmentContext(environmentContext);
++    client.alter_partitions_req(req);
+   }
+ 
+   @Override
++  public void alter_partition(String dbName, String tblName, Partition newPart,
++      EnvironmentContext environmentContext, long txnId, String writeIdList)
++      throws InvalidOperationException, MetaException, TException {
++    AlterPartitionsRequest req = new AlterPartitionsRequest(
++        dbName, tblName, Lists.newArrayList(newPart));
++    req.setEnvironmentContext(environmentContext);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(writeIdList);
++    client.alter_partitions_req(req);
++  }
++
++  @Deprecated
++  @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
+       throws TException {
 -    alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, null);
++    alter_partitions(
++        getDefaultCatalog(conf), dbName, tblName, newParts, new EnvironmentContext(), -1, null, -1);
+   }
+ 
+   @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
+                                EnvironmentContext environmentContext) throws TException {
 -    alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, environmentCo

<TRUNCATED>

[50/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 0000000,29c98d1..3a65f77
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@@ -1,0 -1,104 +1,109 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.List;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+ import org.apache.hadoop.hive.metastore.txn.TxnStore;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ 
+ /**
+  * An interface wrapper for HMSHandler.  This interface contains methods that need to be
+  * called by internal classes but that are not part of the thrift interface.
+  */
+ @InterfaceAudience.Private
+ public interface IHMSHandler extends ThriftHiveMetastore.Iface, Configurable {
+ 
+   void init() throws MetaException;
+ 
+   /**
+    * Get the id of the thread of this handler.
+    * @return thread id
+    */
+   int getThreadId();
+ 
+   /**
+    * Get a reference to the underlying RawStore.
+    * @return the RawStore instance.
+    * @throws MetaException if the creation of a new RawStore object is necessary but fails.
+    */
+   RawStore getMS() throws MetaException;
+ 
+   /**
+    * Get a reference to the underlying TxnStore.
+    * @return the TxnStore instance.
+    */
+   TxnStore getTxnHandler();
+ 
+   /**
+    * Get a reference to Hive's warehouse object (the class that does all the physical operations).
+    * @return Warehouse instance.
+    */
+   Warehouse getWh();
+ 
+   /**
+    * Equivalent to get_database, but does not write to audit logs, or fire pre-event listeners.
+    * Meant to be used for internal hive classes that don't use the thrift interface.
+    * @param catName catalog name
+    * @param name database name
+    * @return database object
+    * @throws NoSuchObjectException If the database does not exist.
+    * @throws MetaException If another error occurs.
+    */
+   Database get_database_core(final String catName, final String name)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Equivalent of get_table, but does not log audits and fire pre-event listener.
+    * Meant to be used for calls made by other hive classes, that are not using the
+    * thrift interface.
+    * @param catName catalog name
+    * @param dbname database name
+    * @param name table name
+    * @return Table object
+    * @throws NoSuchObjectException If the table does not exist.
+    * @throws MetaException  If another error occurs.
+    */
+   Table get_table_core(final String catName, final String dbname, final String name)
+       throws MetaException, NoSuchObjectException;
+ 
++  Table get_table_core(final String catName, final String dbname,
++                       final String name, final long txnId,
++                       final String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
+   /**
+    * Get a list of all transactional listeners.
+    * @return list of listeners.
+    */
+   List<TransactionalMetaStoreEventListener> getTransactionalListeners();
+ 
+   /**
+    * Get a list of all non-transactional listeners.
+    * @return list of non-transactional listeners.
+    */
+   List<MetaStoreEventListener> getListeners();
+ }


[48/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 0000000,f45b71f..07be1ba
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@@ -1,0 -1,2817 +1,2837 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.commons.lang.StringUtils.join;
+ import static org.apache.commons.lang.StringUtils.normalizeSpace;
+ import static org.apache.commons.lang.StringUtils.repeat;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ 
+ import java.sql.Blob;
+ import java.sql.Clob;
+ import java.sql.Connection;
+ import java.sql.SQLException;
+ import java.sql.Statement;
+ import java.text.ParseException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.TreeMap;
+ import java.util.stream.Collectors;
+ 
+ import javax.jdo.PersistenceManager;
+ import javax.jdo.Query;
+ import javax.jdo.Transaction;
+ import javax.jdo.datastore.JDOConnection;
+ 
+ import org.apache.commons.lang.BooleanUtils;
+ import org.apache.commons.lang.StringUtils;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.AggregateStatsCache.AggrColStats;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.Order;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.model.MConstraint;
+ import org.apache.hadoop.hive.metastore.model.MCreationMetadata;
+ import org.apache.hadoop.hive.metastore.model.MDatabase;
+ import org.apache.hadoop.hive.metastore.model.MNotificationLog;
+ import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
+ import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
+ import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
+ import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
+ import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics;
+ import org.apache.hadoop.hive.metastore.model.MWMResourcePlan;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.hive.common.util.BloomFilter;
+ import org.datanucleus.store.rdbms.query.ForwardQueryResult;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.collect.Lists;
+ 
+ /**
+  * This class contains the optimizations for MetaStore that rely on direct SQL access to
+  * the underlying database. It should use ANSI SQL and be compatible with common databases
+  * such as MySQL (note that MySQL doesn't use full ANSI mode by default), Postgres, etc.
+  *
+  * As of now, only the partition retrieval is done this way to improve job startup time;
+  * JDOQL partition retrieval is still present so as not to limit the ORM solution we have
+  * to SQL stores only. There's always a way to do without direct SQL.
+  */
+ class MetaStoreDirectSql {
+   private static final int NO_BATCHING = -1, DETECT_BATCHING = 0;
+ 
+   private static final Logger LOG = LoggerFactory.getLogger(MetaStoreDirectSql.class);
+   private final PersistenceManager pm;
++  private final Configuration conf;
+   private final String schema;
+ 
+   /**
+    * We want to avoid db-specific code in this class and stick with ANSI SQL. However:
+    * 1) mysql and postgres are differently ansi-incompatible (mysql by default doesn't support
+    * quoted identifiers, and postgres contravenes ANSI by coercing unquoted ones to lower case).
+    * MySQL's way of working around this is simpler (just set ansi quotes mode on), so we will
+    * use that. MySQL detection is done by actually issuing the set-ansi-quotes command;
+    *
+    * Use sparingly, we don't want to devolve into another DataNucleus...
+    */
+   private final DatabaseProduct dbType;
+   private final int batchSize;
+   private final boolean convertMapNullsToEmptyStrings;
+   private final String defaultPartName;
+ 
+   /**
+    * Whether direct SQL can be used with the current datastore backing {@link #pm}.
+    */
+   private final boolean isCompatibleDatastore;
+   private final boolean isAggregateStatsCacheEnabled;
+   private AggregateStatsCache aggrStatsCache;
+ 
+   @java.lang.annotation.Target(java.lang.annotation.ElementType.FIELD)
+   @java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
+   private @interface TableName {}
+ 
+   // Table names with schema name, if necessary
+   @TableName
+   private String DBS, TBLS, PARTITIONS, DATABASE_PARAMS, PARTITION_PARAMS, SORT_COLS, SD_PARAMS,
+       SDS, SERDES, SKEWED_STRING_LIST_VALUES, SKEWED_VALUES, BUCKETING_COLS, SKEWED_COL_NAMES,
+       SKEWED_COL_VALUE_LOC_MAP, COLUMNS_V2, PARTITION_KEYS, SERDE_PARAMS, PART_COL_STATS, KEY_CONSTRAINTS,
+       TAB_COL_STATS, PARTITION_KEY_VALS, PART_PRIVS, PART_COL_PRIVS, SKEWED_STRING_LIST, CDS;
+ 
++
+   public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String schema) {
+     this.pm = pm;
++    this.conf = conf;
+     this.schema = schema;
+     DatabaseProduct dbType = null;
+     try {
+       dbType = DatabaseProduct.determineDatabaseProduct(getProductName(pm));
+     } catch (SQLException e) {
+       LOG.warn("Cannot determine database product; assuming OTHER", e);
+       dbType = DatabaseProduct.OTHER;
+     }
+     this.dbType = dbType;
+     int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_PARTITION_BATCH_SIZE);
+     if (batchSize == DETECT_BATCHING) {
+       batchSize = DatabaseProduct.needsInBatching(dbType) ? 1000 : NO_BATCHING;
+     }
+     this.batchSize = batchSize;
+ 
+     for (java.lang.reflect.Field f : this.getClass().getDeclaredFields()) {
+       if (f.getAnnotation(TableName.class) == null) continue;
+       try {
+         f.set(this, getFullyQualifiedName(schema, f.getName()));
+       } catch (IllegalArgumentException | IllegalAccessException e) {
+         throw new RuntimeException("Internal error, cannot set " + f.getName());
+       }
+     }
+ 
+     convertMapNullsToEmptyStrings =
+         MetastoreConf.getBoolVar(conf, ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS);
+     defaultPartName = MetastoreConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME);
+ 
+     String jdoIdFactory = MetastoreConf.getVar(conf, ConfVars.IDENTIFIER_FACTORY);
+     if (! ("datanucleus1".equalsIgnoreCase(jdoIdFactory))){
+       LOG.warn("Underlying metastore does not use 'datanucleus1' for its ORM naming scheme."
+           + " Disabling directSQL as it uses hand-hardcoded SQL with that assumption.");
+       isCompatibleDatastore = false;
+     } else {
+       boolean isInTest = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST);
+       isCompatibleDatastore = (!isInTest || ensureDbInit()) && runTestQuery();
+       if (isCompatibleDatastore) {
+         LOG.debug("Using direct SQL, underlying DB is " + dbType);
+       }
+     }
+ 
+     isAggregateStatsCacheEnabled = MetastoreConf.getBoolVar(
+         conf, ConfVars.AGGREGATE_STATS_CACHE_ENABLED);
+     if (isAggregateStatsCacheEnabled) {
+       aggrStatsCache = AggregateStatsCache.getInstance(conf);
+     }
+   }
+ 
+   private static String getFullyQualifiedName(String schema, String tblName) {
+     return ((schema == null || schema.isEmpty()) ? "" : "\"" + schema + "\".\"")
+         + "\"" + tblName + "\"";
+   }
+ 
+ 
+   public MetaStoreDirectSql(PersistenceManager pm, Configuration conf) {
+     this(pm, conf, "");
+   }
+ 
+   static String getProductName(PersistenceManager pm) {
+     JDOConnection jdoConn = pm.getDataStoreConnection();
+     try {
+       return ((Connection)jdoConn.getNativeConnection()).getMetaData().getDatabaseProductName();
+     } catch (Throwable t) {
+       LOG.warn("Error retrieving product name", t);
+       return null;
+     } finally {
+       jdoConn.close(); // We must release the connection before we call other pm methods.
+     }
+   }
+ 
+   private boolean ensureDbInit() {
+     Transaction tx = pm.currentTransaction();
+     boolean doCommit = false;
+     if (!tx.isActive()) {
+       tx.begin();
+       doCommit = true;
+     }
+     LinkedList<Query> initQueries = new LinkedList<>();
+ 
+     try {
+       // Force the underlying db to initialize.
+       initQueries.add(pm.newQuery(MDatabase.class, "name == ''"));
+       initQueries.add(pm.newQuery(MTableColumnStatistics.class, "dbName == ''"));
+       initQueries.add(pm.newQuery(MPartitionColumnStatistics.class, "dbName == ''"));
+       initQueries.add(pm.newQuery(MConstraint.class, "childIntegerIndex < 0"));
+       initQueries.add(pm.newQuery(MNotificationLog.class, "dbName == ''"));
+       initQueries.add(pm.newQuery(MNotificationNextId.class, "nextEventId < -1"));
+       initQueries.add(pm.newQuery(MWMResourcePlan.class, "name == ''"));
+       initQueries.add(pm.newQuery(MCreationMetadata.class, "dbName == ''"));
+       initQueries.add(pm.newQuery(MPartitionPrivilege.class, "principalName == ''"));
+       initQueries.add(pm.newQuery(MPartitionColumnPrivilege.class, "principalName == ''"));
+       Query q;
+       while ((q = initQueries.peekFirst()) != null) {
+         q.execute();
+         initQueries.pollFirst();
+       }
+ 
+       return true;
+     } catch (Exception ex) {
+       doCommit = false;
+       LOG.warn("Database initialization failed; direct SQL is disabled", ex);
+       tx.rollback();
+       return false;
+     } finally {
+       if (doCommit) {
+         tx.commit();
+       }
+       for (Query q : initQueries) {
+         try {
+           q.closeAll();
+         } catch (Throwable t) {
+         }
+       }
+     }
+   }
+ 
+   private boolean runTestQuery() {
+     Transaction tx = pm.currentTransaction();
+     boolean doCommit = false;
+     if (!tx.isActive()) {
+       tx.begin();
+       doCommit = true;
+     }
+     Query query = null;
+     // Run a self-test query. If it doesn't work, we will self-disable. What a PITA...
+     String selfTestQuery = "select \"DB_ID\" from " + DBS + "";
+     try {
+       prepareTxn();
+       query = pm.newQuery("javax.jdo.query.SQL", selfTestQuery);
+       query.execute();
+       return true;
+     } catch (Throwable t) {
+       doCommit = false;
+       LOG.warn("Self-test query [" + selfTestQuery + "] failed; direct SQL is disabled", t);
+       tx.rollback();
+       return false;
+     } finally {
+       if (doCommit) {
+         tx.commit();
+       }
+       if (query != null) {
+         query.closeAll();
+       }
+     }
+   }
+ 
+   public String getSchema() {
+     return schema;
+   }
+ 
+   public boolean isCompatibleDatastore() {
+     return isCompatibleDatastore;
+   }
+ 
+   private void executeNoResult(final String queryText) throws SQLException {
+     JDOConnection jdoConn = pm.getDataStoreConnection();
+     Statement statement = null;
+     boolean doTrace = LOG.isDebugEnabled();
+     try {
+       long start = doTrace ? System.nanoTime() : 0;
+       statement = ((Connection)jdoConn.getNativeConnection()).createStatement();
+       statement.execute(queryText);
+       timingTrace(doTrace, queryText, start, doTrace ? System.nanoTime() : 0);
+     } finally {
+       if(statement != null){
+           statement.close();
+       }
+       jdoConn.close(); // We must release the connection before we call other pm methods.
+     }
+   }
+ 
+   public Database getDatabase(String catName, String dbName) throws MetaException{
+     Query queryDbSelector = null;
+     Query queryDbParams = null;
+     try {
+       dbName = dbName.toLowerCase();
+       catName = catName.toLowerCase();
+ 
+       String queryTextDbSelector= "select "
+           + "\"DB_ID\", \"NAME\", \"DB_LOCATION_URI\", \"DESC\", "
+           + "\"OWNER_NAME\", \"OWNER_TYPE\", \"CTLG_NAME\" "
+           + "FROM "+ DBS
+           + " where \"NAME\" = ? and \"CTLG_NAME\" = ? ";
+       Object[] params = new Object[] { dbName, catName };
+       queryDbSelector = pm.newQuery("javax.jdo.query.SQL", queryTextDbSelector);
+ 
+       if (LOG.isTraceEnabled()) {
+         LOG.trace("getDatabase:query instantiated : " + queryTextDbSelector
+             + " with param [" + params[0] + "]");
+       }
+ 
+       List<Object[]> sqlResult = executeWithArray(
+           queryDbSelector, params, queryTextDbSelector);
+       if ((sqlResult == null) || sqlResult.isEmpty()) {
+         return null;
+       }
+ 
+       assert(sqlResult.size() == 1);
+       if (sqlResult.get(0) == null) {
+         return null;
+       }
+ 
+       Object[] dbline = sqlResult.get(0);
+       Long dbid = extractSqlLong(dbline[0]);
+ 
+       String queryTextDbParams = "select \"PARAM_KEY\", \"PARAM_VALUE\" "
+           + " from " + DATABASE_PARAMS + " "
+           + " WHERE \"DB_ID\" = ? "
+           + " AND \"PARAM_KEY\" IS NOT NULL";
+       params[0] = dbid;
+       queryDbParams = pm.newQuery("javax.jdo.query.SQL", queryTextDbParams);
+       if (LOG.isTraceEnabled()) {
+         LOG.trace("getDatabase:query2 instantiated : " + queryTextDbParams
+             + " with param [" + params[0] + "]");
+       }
+ 
+       Map<String,String> dbParams = new HashMap<String,String>();
+       List<Object[]> sqlResult2 = ensureList(executeWithArray(
+           queryDbParams, params, queryTextDbParams));
+       if (!sqlResult2.isEmpty()) {
+         for (Object[] line : sqlResult2) {
+           dbParams.put(extractSqlString(line[0]), extractSqlString(line[1]));
+         }
+       }
+       Database db = new Database();
+       db.setName(extractSqlString(dbline[1]));
+       db.setLocationUri(extractSqlString(dbline[2]));
+       db.setDescription(extractSqlString(dbline[3]));
+       db.setOwnerName(extractSqlString(dbline[4]));
+       String type = extractSqlString(dbline[5]);
+       db.setOwnerType(
+           (null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type));
+       db.setCatalogName(extractSqlString(dbline[6]));
+       db.setParameters(MetaStoreUtils.trimMapNulls(dbParams,convertMapNullsToEmptyStrings));
+       if (LOG.isDebugEnabled()){
+         LOG.debug("getDatabase: directsql returning db " + db.getName()
+             + " locn["+db.getLocationUri()  +"] desc [" +db.getDescription()
+             + "] owner [" + db.getOwnerName() + "] ownertype ["+ db.getOwnerType() +"]");
+       }
+       return db;
+     } finally {
+       if (queryDbSelector != null){
+         queryDbSelector.closeAll();
+       }
+       if (queryDbParams != null){
+         queryDbParams.closeAll();
+       }
+     }
+   }
+ 
+   /**
+    * Get table names by using direct SQL queries.
+    * @param catName catalog name
+    * @param dbName Metastore database namme
+    * @param tableType Table type, or null if we want to get all tables
+    * @return list of table names
+    */
+   public List<String> getTables(String catName, String dbName, TableType tableType)
+       throws MetaException {
+     String queryText = "SELECT " + TBLS + ".\"TBL_NAME\""
+       + " FROM " + TBLS + " "
+       + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" "
+       + " WHERE " + DBS + ".\"NAME\" = ? AND " + DBS + ".\"CTLG_NAME\" = ? "
+       + (tableType == null ? "" : "AND " + TBLS + ".\"TBL_TYPE\" = ? ") ;
+ 
+     List<String> pms = new ArrayList<>();
+     pms.add(dbName);
+     pms.add(catName);
+     if (tableType != null) {
+       pms.add(tableType.toString());
+     }
+ 
+     Query<?> queryParams = pm.newQuery("javax.jdo.query.SQL", queryText);
+     return executeWithArray(
+         queryParams, pms.toArray(), queryText);
+   }
+ 
+   /**
+    * Get table names by using direct SQL queries.
+    *
+    * @param dbName Metastore database namme
+    * @return list of table names
+    */
+   public List<String> getMaterializedViewsForRewriting(String dbName) throws MetaException {
+     String queryText = "SELECT " + TBLS + ".\"TBL_NAME\""
+       + " FROM " + TBLS + " "
+       + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" "
+       + " WHERE " + DBS + ".\"NAME\" = ? AND " + TBLS + ".\"TBL_TYPE\" = ? " ;
+ 
+     List<String> pms = new ArrayList<String>();
+     pms.add(dbName);
+     pms.add(TableType.MATERIALIZED_VIEW.toString());
+ 
+     Query<?> queryParams = pm.newQuery("javax.jdo.query.SQL", queryText);
+     return executeWithArray(
+         queryParams, pms.toArray(), queryText);
+   }
+ 
+   /**
+    * Gets partitions by using direct SQL queries.
+    * @param catName Metastore catalog name.
+    * @param dbName Metastore db name.
+    * @param tblName Metastore table name.
+    * @param partNames Partition names to get.
+    * @return List of partitions.
+    */
+   public List<Partition> getPartitionsViaSqlFilter(final String catName, final String dbName,
+                                                    final String tblName, List<String> partNames)
+       throws MetaException {
+     if (partNames.isEmpty()) {
+       return Collections.emptyList();
+     }
+     return Batchable.runBatched(batchSize, partNames, new Batchable<String, Partition>() {
+       @Override
+       public List<Partition> run(List<String> input) throws MetaException {
+         String filter = "" + PARTITIONS + ".\"PART_NAME\" in (" + makeParams(input.size()) + ")";
+         List<Object> partitionIds = getPartitionIdsViaSqlFilter(catName, dbName, tblName,
+             filter, input, Collections.<String>emptyList(), null);
+         if (partitionIds.isEmpty()) {
+           return Collections.emptyList(); // no partitions, bail early.
+         }
+         return getPartitionsFromPartitionIds(catName, dbName, tblName, null, partitionIds);
+       }
+     });
+   }
+ 
+   /**
+    * Gets partitions by using direct SQL queries.
+    * @param filter The filter.
+    * @param max The maximum number of partitions to return.
+    * @return List of partitions.
+    */
+   public List<Partition> getPartitionsViaSqlFilter(
+       SqlFilterForPushdown filter, Integer max) throws MetaException {
+     Boolean isViewTable = isViewTable(filter.table);
+     String catName = filter.table.isSetCatName() ? filter.table.getCatName() :
+         DEFAULT_CATALOG_NAME;
+     List<Object> partitionIds = getPartitionIdsViaSqlFilter(catName,
+         filter.table.getDbName(), filter.table.getTableName(), filter.filter, filter.params,
+         filter.joins, max);
+     if (partitionIds.isEmpty()) {
+       return Collections.emptyList(); // no partitions, bail early.
+     }
+     return Batchable.runBatched(batchSize, partitionIds, new Batchable<Object, Partition>() {
+       @Override
+       public List<Partition> run(List<Object> input) throws MetaException {
+         return getPartitionsFromPartitionIds(catName, filter.table.getDbName(),
+             filter.table.getTableName(), isViewTable, input);
+       }
+     });
+   }
+ 
+   public static class SqlFilterForPushdown {
+     private final List<Object> params = new ArrayList<>();
+     private final List<String> joins = new ArrayList<>();
+     private String filter;
+     private Table table;
+   }
+ 
+   public boolean generateSqlFilterForPushdown(
+       Table table, ExpressionTree tree, SqlFilterForPushdown result) throws MetaException {
+     // Derby and Oracle do not interpret filters ANSI-properly in some cases and need a workaround.
+     boolean dbHasJoinCastBug = DatabaseProduct.hasJoinOperationOrderBug(dbType);
+     result.table = table;
+     result.filter = PartitionFilterGenerator.generateSqlFilter(table, tree, result.params,
+         result.joins, dbHasJoinCastBug, defaultPartName, dbType, schema);
+     return result.filter != null;
+   }
+ 
+   /**
+    * Gets all partitions of a table by using direct SQL queries.
+    * @param catName Metastore catalog name.
+    * @param dbName Metastore db name.
+    * @param tblName Metastore table name.
+    * @param max The maximum number of partitions to return.
+    * @return List of partitions.
+    */
+   public List<Partition> getPartitions(String catName,
+       String dbName, String tblName, Integer max) throws MetaException {
+     List<Object> partitionIds = getPartitionIdsViaSqlFilter(catName, dbName,
+         tblName, null, Collections.<String>emptyList(), Collections.<String>emptyList(), max);
+     if (partitionIds.isEmpty()) {
+       return Collections.emptyList(); // no partitions, bail early.
+     }
+ 
+     // Get full objects. For Oracle/etc. do it in batches.
+     List<Partition> result = Batchable.runBatched(batchSize, partitionIds, new Batchable<Object, Partition>() {
+       @Override
+       public List<Partition> run(List<Object> input) throws MetaException {
+         return getPartitionsFromPartitionIds(catName, dbName, tblName, null, input);
+       }
+     });
+     return result;
+   }
+ 
+   private static Boolean isViewTable(Table t) {
+     return t.isSetTableType() ?
+         t.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) : null;
+   }
+ 
+   private boolean isViewTable(String catName, String dbName, String tblName) throws MetaException {
+     Query query = null;
+     try {
+       String queryText = "select \"TBL_TYPE\" from " + TBLS + "" +
+           " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " +
+           " where " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + DBS + ".\"CTLG_NAME\" = ?";
+       Object[] params = new Object[] { tblName, dbName, catName };
+       query = pm.newQuery("javax.jdo.query.SQL", queryText);
+       query.setUnique(true);
+       Object result = executeWithArray(query, params, queryText);
+       return (result != null) && result.toString().equals(TableType.VIRTUAL_VIEW.toString());
+     } finally {
+       if (query != null) {
+         query.closeAll();
+       }
+     }
+   }
+ 
+   /**
+    * Get partition ids for the query using direct SQL queries, to avoid bazillion
+    * queries created by DN retrieving stuff for each object individually.
+    * @param catName MetaStore catalog name
+    * @param dbName MetaStore db name
+    * @param tblName MetaStore table name
+    * @param sqlFilter SQL filter to use. Better be SQL92-compliant.
+    * @param paramsForFilter params for ?-s in SQL filter text. Params must be in order.
+    * @param joinsForFilter if the filter needs additional join statement, they must be in
+    *                       this list. Better be SQL92-compliant.
+    * @param max The maximum number of partitions to return.
+    * @return List of partition objects.
+    */
+   private List<Object> getPartitionIdsViaSqlFilter(
+       String catName, String dbName, String tblName, String sqlFilter,
+       List<? extends Object> paramsForFilter, List<String> joinsForFilter, Integer max)
+       throws MetaException {
+     boolean doTrace = LOG.isDebugEnabled();
+     final String dbNameLcase = dbName.toLowerCase();
+     final String tblNameLcase = tblName.toLowerCase();
+     final String catNameLcase = normalizeSpace(catName).toLowerCase();
+ 
+     // We have to be mindful of order during filtering if we are not returning all partitions.
+     String orderForFilter = (max != null) ? " order by \"PART_NAME\" asc" : "";
+ 
+     String queryText =
+         "select " + PARTITIONS + ".\"PART_ID\" from " + PARTITIONS + ""
+       + "  inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\" "
+       + "    and " + TBLS + ".\"TBL_NAME\" = ? "
+       + "  inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" "
+       + "     and " + DBS + ".\"NAME\" = ? "
+       + join(joinsForFilter, ' ')
+       + " where " + DBS + ".\"CTLG_NAME\" = ? "
+       + (StringUtils.isBlank(sqlFilter) ? "" : (" and " + sqlFilter)) + orderForFilter;
+     Object[] params = new Object[paramsForFilter.size() + 3];
+     params[0] = tblNameLcase;
+     params[1] = dbNameLcase;
+     params[2] = catNameLcase;
+     for (int i = 0; i < paramsForFilter.size(); ++i) {
+       params[i + 3] = paramsForFilter.get(i);
+     }
+ 
+     long start = doTrace ? System.nanoTime() : 0;
+     Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+     if (max != null) {
+       query.setRange(0, max.shortValue());
+     }
+     List<Object> sqlResult = executeWithArray(query, params, queryText);
+     long queryTime = doTrace ? System.nanoTime() : 0;
+     timingTrace(doTrace, queryText, start, queryTime);
+     if (sqlResult.isEmpty()) {
+       return Collections.emptyList(); // no partitions, bail early.
+     }
+ 
+     List<Object> result = new ArrayList<Object>(sqlResult.size());
+     for (Object fields : sqlResult) {
+       result.add(extractSqlLong(fields));
+     }
+     query.closeAll();
+     return result;
+   }
+ 
+   /** Should be called with the list short enough to not trip up Oracle/etc. */
+   private List<Partition> getPartitionsFromPartitionIds(String catName, String dbName, String tblName,
+       Boolean isView, List<Object> partIdList) throws MetaException {
+     boolean doTrace = LOG.isDebugEnabled();
+ 
+     int idStringWidth = (int)Math.ceil(Math.log10(partIdList.size())) + 1; // 1 for comma
+     int sbCapacity = partIdList.size() * idStringWidth;
+ 
+     String partIds = getIdListForIn(partIdList);
+ 
+     // Get most of the fields for the IDs provided.
+     // Assume db and table names are the same for all partition, as provided in arguments.
+     String queryText =
+       "select " + PARTITIONS + ".\"PART_ID\", " + SDS + ".\"SD_ID\", " + SDS + ".\"CD_ID\","
+     + " " + SERDES + ".\"SERDE_ID\", " + PARTITIONS + ".\"CREATE_TIME\","
+     + " " + PARTITIONS + ".\"LAST_ACCESS_TIME\", " + SDS + ".\"INPUT_FORMAT\", " + SDS + ".\"IS_COMPRESSED\","
+     + " " + SDS + ".\"IS_STOREDASSUBDIRECTORIES\", " + SDS + ".\"LOCATION\", " + SDS + ".\"NUM_BUCKETS\","
 -    + " " + SDS + ".\"OUTPUT_FORMAT\", " + SERDES + ".\"NAME\", " + SERDES + ".\"SLIB\" "
 -    + "from " + PARTITIONS + ""
++    + " " + SDS + ".\"OUTPUT_FORMAT\", " + SERDES + ".\"NAME\", " + SERDES + ".\"SLIB\", " + PARTITIONS
++    + ".\"WRITE_ID\"" + " from " + PARTITIONS + ""
+     + "  left outer join " + SDS + " on " + PARTITIONS + ".\"SD_ID\" = " + SDS + ".\"SD_ID\" "
+     + "  left outer join " + SERDES + " on " + SDS + ".\"SERDE_ID\" = " + SERDES + ".\"SERDE_ID\" "
+     + "where \"PART_ID\" in (" + partIds + ") order by \"PART_NAME\" asc";
+     long start = doTrace ? System.nanoTime() : 0;
+     Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+     List<Object[]> sqlResult = executeWithArray(query, null, queryText);
+     long queryTime = doTrace ? System.nanoTime() : 0;
+     Deadline.checkTimeout();
+ 
+     // Read all the fields and create partitions, SDs and serdes.
+     TreeMap<Long, Partition> partitions = new TreeMap<Long, Partition>();
+     TreeMap<Long, StorageDescriptor> sds = new TreeMap<Long, StorageDescriptor>();
+     TreeMap<Long, SerDeInfo> serdes = new TreeMap<Long, SerDeInfo>();
+     TreeMap<Long, List<FieldSchema>> colss = new TreeMap<Long, List<FieldSchema>>();
+     // Keep order by name, consistent with JDO.
+     ArrayList<Partition> orderedResult = new ArrayList<Partition>(partIdList.size());
+ 
+     // Prepare StringBuilder-s for "in (...)" lists to use in one-to-many queries.
+     StringBuilder sdSb = new StringBuilder(sbCapacity), serdeSb = new StringBuilder(sbCapacity);
+     StringBuilder colsSb = new StringBuilder(7); // We expect that there's only one field schema.
+     tblName = tblName.toLowerCase();
+     dbName = dbName.toLowerCase();
+     catName = normalizeSpace(catName).toLowerCase();
+     for (Object[] fields : sqlResult) {
+       // Here comes the ugly part...
+       long partitionId = extractSqlLong(fields[0]);
+       Long sdId = extractSqlLong(fields[1]);
+       Long colId = extractSqlLong(fields[2]);
+       Long serdeId = extractSqlLong(fields[3]);
+       // A partition must have at least sdId and serdeId set, or nothing set if it's a view.
+       if (sdId == null || serdeId == null) {
+         if (isView == null) {
+           isView = isViewTable(catName, dbName, tblName);
+         }
+         if ((sdId != null || colId != null || serdeId != null) || !isView) {
+           throw new MetaException("Unexpected null for one of the IDs, SD " + sdId +
+                   ", serde " + serdeId + " for a " + (isView ? "" : "non-") + " view");
+         }
+       }
+ 
+       Partition part = new Partition();
+       orderedResult.add(part);
+       // Set the collection fields; some code might not check presence before accessing them.
+       part.setParameters(new HashMap<>());
+       part.setValues(new ArrayList<String>());
+       part.setCatName(catName);
+       part.setDbName(dbName);
+       part.setTableName(tblName);
+       if (fields[4] != null) part.setCreateTime(extractSqlInt(fields[4]));
+       if (fields[5] != null) part.setLastAccessTime(extractSqlInt(fields[5]));
++      Long writeId = extractSqlLong(fields[14]);
++      if (writeId != null) {
++        part.setWriteId(writeId);
++      }
+       partitions.put(partitionId, part);
+ 
++
+       if (sdId == null) continue; // Probably a view.
+       assert serdeId != null;
+ 
+       // We assume each partition has an unique SD.
+       StorageDescriptor sd = new StorageDescriptor();
+       StorageDescriptor oldSd = sds.put(sdId, sd);
+       if (oldSd != null) {
+         throw new MetaException("Partitions reuse SDs; we don't expect that");
+       }
+       // Set the collection fields; some code might not check presence before accessing them.
+       sd.setSortCols(new ArrayList<Order>());
+       sd.setBucketCols(new ArrayList<String>());
+       sd.setParameters(new HashMap<String, String>());
+       sd.setSkewedInfo(new SkewedInfo(new ArrayList<String>(),
+           new ArrayList<List<String>>(), new HashMap<List<String>, String>()));
+       sd.setInputFormat((String)fields[6]);
+       Boolean tmpBoolean = extractSqlBoolean(fields[7]);
+       if (tmpBoolean != null) sd.setCompressed(tmpBoolean);
+       tmpBoolean = extractSqlBoolean(fields[8]);
+       if (tmpBoolean != null) sd.setStoredAsSubDirectories(tmpBoolean);
+       sd.setLocation((String)fields[9]);
+       if (fields[10] != null) sd.setNumBuckets(extractSqlInt(fields[10]));
+       sd.setOutputFormat((String)fields[11]);
+       sdSb.append(sdId).append(",");
+       part.setSd(sd);
+ 
+       if (colId != null) {
+         List<FieldSchema> cols = colss.get(colId);
+         // We expect that colId will be the same for all (or many) SDs.
+         if (cols == null) {
+           cols = new ArrayList<FieldSchema>();
+           colss.put(colId, cols);
+           colsSb.append(colId).append(",");
+         }
+         sd.setCols(cols);
+       }
+ 
+       // We assume each SD has an unique serde.
+       SerDeInfo serde = new SerDeInfo();
+       SerDeInfo oldSerde = serdes.put(serdeId, serde);
+       if (oldSerde != null) {
+         throw new MetaException("SDs reuse serdes; we don't expect that");
+       }
+       serde.setParameters(new HashMap<String, String>());
+       serde.setName((String)fields[12]);
+       serde.setSerializationLib((String)fields[13]);
+       serdeSb.append(serdeId).append(",");
+       sd.setSerdeInfo(serde);
++
+       Deadline.checkTimeout();
+     }
+     query.closeAll();
+     timingTrace(doTrace, queryText, start, queryTime);
+ 
+     // Now get all the one-to-many things. Start with partitions.
+     queryText = "select \"PART_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + PARTITION_PARAMS + ""
+         + " where \"PART_ID\" in (" + partIds + ") and \"PARAM_KEY\" is not null"
+         + " order by \"PART_ID\" asc";
+     loopJoinOrderedResult(partitions, queryText, 0, new ApplyFunc<Partition>() {
+       @Override
+       public void apply(Partition t, Object[] fields) {
+         t.putToParameters((String)fields[1], (String)fields[2]);
+       }});
+     // Perform conversion of null map values
+     for (Partition t : partitions.values()) {
+       t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
+     }
+ 
+     queryText = "select \"PART_ID\", \"PART_KEY_VAL\" from " + PARTITION_KEY_VALS + ""
+         + " where \"PART_ID\" in (" + partIds + ")"
+         + " order by \"PART_ID\" asc, \"INTEGER_IDX\" asc";
+     loopJoinOrderedResult(partitions, queryText, 0, new ApplyFunc<Partition>() {
+       @Override
+       public void apply(Partition t, Object[] fields) {
+         t.addToValues((String)fields[1]);
+       }});
+ 
+     // Prepare IN (blah) lists for the following queries. Cut off the final ','s.
+     if (sdSb.length() == 0) {
+       assert serdeSb.length() == 0 && colsSb.length() == 0;
+       return orderedResult; // No SDs, probably a view.
+     }
+ 
+     String sdIds = trimCommaList(sdSb);
+     String serdeIds = trimCommaList(serdeSb);
+     String colIds = trimCommaList(colsSb);
+ 
+     // Get all the stuff for SD. Don't do empty-list check - we expect partitions do have SDs.
+     queryText = "select \"SD_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + SD_PARAMS + ""
+         + " where \"SD_ID\" in (" + sdIds + ") and \"PARAM_KEY\" is not null"
+         + " order by \"SD_ID\" asc";
+     loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+       @Override
+       public void apply(StorageDescriptor t, Object[] fields) {
+         t.putToParameters((String)fields[1], extractSqlClob(fields[2]));
+       }});
+     // Perform conversion of null map values
+     for (StorageDescriptor t : sds.values()) {
+       t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
+     }
+ 
+     queryText = "select \"SD_ID\", \"COLUMN_NAME\", " + SORT_COLS + ".\"ORDER\""
+         + " from " + SORT_COLS + ""
+         + " where \"SD_ID\" in (" + sdIds + ")"
+         + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc";
+     loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+       @Override
+       public void apply(StorageDescriptor t, Object[] fields) {
+         if (fields[2] == null) return;
+         t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2])));
+       }});
+ 
+     queryText = "select \"SD_ID\", \"BUCKET_COL_NAME\" from " + BUCKETING_COLS + ""
+         + " where \"SD_ID\" in (" + sdIds + ")"
+         + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc";
+     loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+       @Override
+       public void apply(StorageDescriptor t, Object[] fields) {
+         t.addToBucketCols((String)fields[1]);
+       }});
+ 
+     // Skewed columns stuff.
+     queryText = "select \"SD_ID\", \"SKEWED_COL_NAME\" from " + SKEWED_COL_NAMES + ""
+         + " where \"SD_ID\" in (" + sdIds + ")"
+         + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc";
+     boolean hasSkewedColumns =
+       loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+         @Override
+         public void apply(StorageDescriptor t, Object[] fields) {
+           if (!t.isSetSkewedInfo()) t.setSkewedInfo(new SkewedInfo());
+           t.getSkewedInfo().addToSkewedColNames((String)fields[1]);
+         }}) > 0;
+ 
+     // Assume we don't need to fetch the rest of the skewed column data if we have no columns.
+     if (hasSkewedColumns) {
+       // We are skipping the SKEWED_STRING_LIST table here, as it seems to be totally useless.
+       queryText =
+             "select " + SKEWED_VALUES + ".\"SD_ID_OID\","
+           + "  " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\","
+           + "  " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_VALUE\" "
+           + "from " + SKEWED_VALUES + " "
+           + "  left outer join " + SKEWED_STRING_LIST_VALUES + " on " + SKEWED_VALUES + "."
+           + "\"STRING_LIST_ID_EID\" = " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" "
+           + "where " + SKEWED_VALUES + ".\"SD_ID_OID\" in (" + sdIds + ") "
+           + "  and " + SKEWED_VALUES + ".\"STRING_LIST_ID_EID\" is not null "
+           + "  and " + SKEWED_VALUES + ".\"INTEGER_IDX\" >= 0 "
+           + "order by " + SKEWED_VALUES + ".\"SD_ID_OID\" asc, " + SKEWED_VALUES + ".\"INTEGER_IDX\" asc,"
+           + "  " + SKEWED_STRING_LIST_VALUES + ".\"INTEGER_IDX\" asc";
+       loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+         private Long currentListId;
+         private List<String> currentList;
+         @Override
+         public void apply(StorageDescriptor t, Object[] fields) throws MetaException {
+           if (!t.isSetSkewedInfo()) t.setSkewedInfo(new SkewedInfo());
+           // Note that this is not a typical list accumulator - there's no call to finalize
+           // the last list. Instead we add list to SD first, as well as locally to add elements.
+           if (fields[1] == null) {
+             currentList = null; // left outer join produced a list with no values
+             currentListId = null;
+             t.getSkewedInfo().addToSkewedColValues(Collections.<String>emptyList());
+           } else {
+             long fieldsListId = extractSqlLong(fields[1]);
+             if (currentListId == null || fieldsListId != currentListId) {
+               currentList = new ArrayList<String>();
+               currentListId = fieldsListId;
+               t.getSkewedInfo().addToSkewedColValues(currentList);
+             }
+             currentList.add((String)fields[2]);
+           }
+         }});
+ 
+       // We are skipping the SKEWED_STRING_LIST table here, as it seems to be totally useless.
+       queryText =
+             "select " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\","
+           + " " + SKEWED_STRING_LIST_VALUES + ".STRING_LIST_ID,"
+           + " " + SKEWED_COL_VALUE_LOC_MAP + ".\"LOCATION\","
+           + " " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_VALUE\" "
+           + "from " + SKEWED_COL_VALUE_LOC_MAP + ""
+           + "  left outer join " + SKEWED_STRING_LIST_VALUES + " on " + SKEWED_COL_VALUE_LOC_MAP + "."
+           + "\"STRING_LIST_ID_KID\" = " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" "
+           + "where " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\" in (" + sdIds + ")"
+           + "  and " + SKEWED_COL_VALUE_LOC_MAP + ".\"STRING_LIST_ID_KID\" is not null "
+           + "order by " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\" asc,"
+           + "  " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" asc,"
+           + "  " + SKEWED_STRING_LIST_VALUES + ".\"INTEGER_IDX\" asc";
+ 
+       loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+         private Long currentListId;
+         private List<String> currentList;
+         @Override
+         public void apply(StorageDescriptor t, Object[] fields) throws MetaException {
+           if (!t.isSetSkewedInfo()) {
+             SkewedInfo skewedInfo = new SkewedInfo();
+             skewedInfo.setSkewedColValueLocationMaps(new HashMap<List<String>, String>());
+             t.setSkewedInfo(skewedInfo);
+           }
+           Map<List<String>, String> skewMap = t.getSkewedInfo().getSkewedColValueLocationMaps();
+           // Note that this is not a typical list accumulator - there's no call to finalize
+           // the last list. Instead we add list to SD first, as well as locally to add elements.
+           if (fields[1] == null) {
+             currentList = new ArrayList<String>(); // left outer join produced a list with no values
+             currentListId = null;
+           } else {
+             long fieldsListId = extractSqlLong(fields[1]);
+             if (currentListId == null || fieldsListId != currentListId) {
+               currentList = new ArrayList<String>();
+               currentListId = fieldsListId;
+             } else {
+               skewMap.remove(currentList); // value based compare.. remove first
+             }
+             currentList.add((String)fields[3]);
+           }
+           skewMap.put(currentList, (String)fields[2]);
+         }});
+     } // if (hasSkewedColumns)
+ 
+     // Get FieldSchema stuff if any.
+     if (!colss.isEmpty()) {
+       // We are skipping the CDS table here, as it seems to be totally useless.
+       queryText = "select \"CD_ID\", \"COMMENT\", \"COLUMN_NAME\", \"TYPE_NAME\""
+           + " from " + COLUMNS_V2 + " where \"CD_ID\" in (" + colIds + ")"
+           + " order by \"CD_ID\" asc, \"INTEGER_IDX\" asc";
+       loopJoinOrderedResult(colss, queryText, 0, new ApplyFunc<List<FieldSchema>>() {
+         @Override
+         public void apply(List<FieldSchema> t, Object[] fields) {
+           t.add(new FieldSchema((String)fields[2], extractSqlClob(fields[3]), (String)fields[1]));
+         }});
+     }
+ 
+     // Finally, get all the stuff for serdes - just the params.
+     queryText = "select \"SERDE_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + SERDE_PARAMS + ""
+         + " where \"SERDE_ID\" in (" + serdeIds + ") and \"PARAM_KEY\" is not null"
+         + " order by \"SERDE_ID\" asc";
+     loopJoinOrderedResult(serdes, queryText, 0, new ApplyFunc<SerDeInfo>() {
+       @Override
+       public void apply(SerDeInfo t, Object[] fields) {
+         t.putToParameters((String)fields[1], extractSqlClob(fields[2]));
+       }});
+     // Perform conversion of null map values
+     for (SerDeInfo t : serdes.values()) {
+       t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
+     }
+ 
+     return orderedResult;
+   }
+ 
+   public int getNumPartitionsViaSqlFilter(SqlFilterForPushdown filter) throws MetaException {
+     boolean doTrace = LOG.isDebugEnabled();
+     String catName = filter.table.getCatName().toLowerCase();
+     String dbName = filter.table.getDbName().toLowerCase();
+     String tblName = filter.table.getTableName().toLowerCase();
+ 
+     // Get number of partitions by doing count on PART_ID.
+     String queryText = "select count(" + PARTITIONS + ".\"PART_ID\") from " + PARTITIONS + ""
+       + "  inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\" "
+       + "    and " + TBLS + ".\"TBL_NAME\" = ? "
+       + "  inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" "
+       + "     and " + DBS + ".\"NAME\" = ? "
+       + join(filter.joins, ' ')
+       + " where " + DBS + ".\"CTLG_NAME\" = ? "
+       + (filter.filter == null || filter.filter.trim().isEmpty() ? "" : (" and " + filter.filter));
+ 
+     Object[] params = new Object[filter.params.size() + 3];
+     params[0] = tblName;
+     params[1] = dbName;
+     params[2] = catName;
+     for (int i = 0; i < filter.params.size(); ++i) {
+       params[i + 3] = filter.params.get(i);
+     }
+ 
+     long start = doTrace ? System.nanoTime() : 0;
+     Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+     query.setUnique(true);
+     int sqlResult = extractSqlInt(query.executeWithArray(params));
+     long queryTime = doTrace ? System.nanoTime() : 0;
+     timingTrace(doTrace, queryText, start, queryTime);
+     return sqlResult;
+   }
+ 
+ 
+   private void timingTrace(boolean doTrace, String queryText, long start, long queryTime) {
+     if (!doTrace) return;
+     LOG.debug("Direct SQL query in " + (queryTime - start) / 1000000.0 + "ms + " +
+         (System.nanoTime() - queryTime) / 1000000.0 + "ms, the query is [" + queryText + "]");
+   }
+ 
+   static Long extractSqlLong(Object obj) throws MetaException {
+     if (obj == null) return null;
+     if (!(obj instanceof Number)) {
+       throw new MetaException("Expected numeric type but got " + obj.getClass().getName());
+     }
+     return ((Number)obj).longValue();
+   }
+ 
+   /**
+    * Convert a boolean value returned from the RDBMS to a Java Boolean object.
+    * MySQL has booleans, but e.g. Derby uses 'Y'/'N' mapping.
+    *
+    * @param value
+    *          column value from the database
+    * @return The Boolean value of the database column value, null if the column
+    *         value is null
+    * @throws MetaException
+    *           if the column value cannot be converted into a Boolean object
+    */
+   private static Boolean extractSqlBoolean(Object value) throws MetaException {
+     if (value == null) {
+       return null;
+     }
+     if (value instanceof Boolean) {
+       return (Boolean)value;
+     }
+     if (value instanceof String) {
+       try {
+         return BooleanUtils.toBooleanObject((String) value, "Y", "N", null);
+       } catch (IllegalArgumentException iae) {
+         // NOOP
+       }
+     }
+     throw new MetaException("Cannot extract boolean from column value " + value);
+   }
+ 
+   private int extractSqlInt(Object field) {
+     return ((Number)field).intValue();
+   }
+ 
+   private String extractSqlString(Object value) {
+     if (value == null) return null;
+     return value.toString();
+   }
+ 
+   static Double extractSqlDouble(Object obj) throws MetaException {
+     if (obj == null)
+       return null;
+     if (!(obj instanceof Number)) {
+       throw new MetaException("Expected numeric type but got " + obj.getClass().getName());
+     }
+     return ((Number) obj).doubleValue();
+   }
+ 
+   private String extractSqlClob(Object value) {
+     if (value == null) return null;
+     try {
+       if (value instanceof Clob) {
+         // we trim the Clob value to a max length an int can hold
+         int maxLength = (((Clob)value).length() < Integer.MAX_VALUE - 2) ? (int)((Clob)value).length() : Integer.MAX_VALUE - 2;
+         return ((Clob)value).getSubString(1L, maxLength);
+       } else {
+         return value.toString();
+       }
+     } catch (SQLException sqle) {
+       return null;
+     }
+   }
+ 
+   static byte[] extractSqlBlob(Object value) throws MetaException {
+     if (value == null)
+       return null;
+     if (value instanceof Blob) {
+       //derby, oracle
+       try {
+         // getBytes function says: pos the ordinal position of the first byte in
+         // the BLOB value to be extracted; the first byte is at position 1
+         return ((Blob) value).getBytes(1, (int) ((Blob) value).length());
+       } catch (SQLException e) {
+         throw new MetaException("Encounter error while processing blob.");
+       }
+     }
+     else if (value instanceof byte[]) {
+       // mysql, postgres, sql server
+       return (byte[]) value;
+     }
+ 	else {
+       // this may happen when enablebitvector is false
+       LOG.debug("Expected blob type but got " + value.getClass().getName());
+       return null;
+     }
+   }
+ 
+   /**
+    * Helper method for preparing for "SOMETHING_ID in (...)" to use in future queries.
+    * @param objectIds the objectId collection
+    * @return The concatenated list
+    * @throws MetaException If the list contains wrong data
+    */
+   private static String getIdListForIn(List<Object> objectIds) throws MetaException {
+     return objectIds.stream()
+                .map(i -> i.toString())
+                .collect(Collectors.joining(","));
+   }
+ 
+   private static String trimCommaList(StringBuilder sb) {
+     if (sb.length() > 0) {
+       sb.setLength(sb.length() - 1);
+     }
+     return sb.toString();
+   }
+ 
+   private abstract class ApplyFunc<Target> {
+     public abstract void apply(Target t, Object[] fields) throws MetaException;
+   }
+ 
+   /**
+    * Merges applies the result of a PM SQL query into a tree of object.
+    * Essentially it's an object join. DN could do this for us, but it issues queries
+    * separately for every object, which is suboptimal.
+    * @param tree The object tree, by ID.
+    * @param queryText The query text.
+    * @param keyIndex Index of the Long column corresponding to the map ID in query result rows.
+    * @param func The function that is called on each (object,row) pair with the same id.
+    * @return the count of results returned from the query.
+    */
+   private <T> int loopJoinOrderedResult(TreeMap<Long, T> tree,
+       String queryText, int keyIndex, ApplyFunc<T> func) throws MetaException {
+     boolean doTrace = LOG.isDebugEnabled();
+     long start = doTrace ? System.nanoTime() : 0;
+     Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+     Object result = query.execute();
+     long queryTime = doTrace ? System.nanoTime() : 0;
+     if (result == null) {
+       query.closeAll();
+       return 0;
+     }
+     List<Object[]> list = ensureList(result);
+     Iterator<Object[]> iter = list.iterator();
+     Object[] fields = null;
+     for (Map.Entry<Long, T> entry : tree.entrySet()) {
+       if (fields == null && !iter.hasNext()) break;
+       long id = entry.getKey();
+       while (fields != null || iter.hasNext()) {
+         if (fields == null) {
+           fields = iter.next();
+         }
+         long nestedId = extractSqlLong(fields[keyIndex]);
+         if (nestedId < id) throw new MetaException("Found entries for unknown ID " + nestedId);
+         if (nestedId > id) break; // fields belong to one of the next entries
+         func.apply(entry.getValue(), fields);
+         fields = null;
+       }
+       Deadline.checkTimeout();
+     }
+     int rv = list.size();
+     query.closeAll();
+     timingTrace(doTrace, queryText, start, queryTime);
+     return rv;
+   }
+ 
+   private static class PartitionFilterGenerator extends TreeVisitor {
+     private final Table table;
+     private final FilterBuilder filterBuffer;
+     private final List<Object> params;
+     private final List<String> joins;
+     private final boolean dbHasJoinCastBug;
+     private final String defaultPartName;
+     private final DatabaseProduct dbType;
+     private final String PARTITION_KEY_VALS, PARTITIONS, DBS, TBLS;
+ 
+     private PartitionFilterGenerator(Table table, List<Object> params, List<String> joins,
+         boolean dbHasJoinCastBug, String defaultPartName, DatabaseProduct dbType, String schema) {
+       this.table = table;
+       this.params = params;
+       this.joins = joins;
+       this.dbHasJoinCastBug = dbHasJoinCastBug;
+       this.filterBuffer = new FilterBuilder(false);
+       this.defaultPartName = defaultPartName;
+       this.dbType = dbType;
+       this.PARTITION_KEY_VALS = getFullyQualifiedName(schema, "PARTITION_KEY_VALS");
+       this.PARTITIONS = getFullyQualifiedName(schema, "PARTITIONS");
+       this.DBS = getFullyQualifiedName(schema, "DBS");
+       this.TBLS = getFullyQualifiedName(schema, "TBLS");
+     }
+ 
+     /**
+      * Generate the ANSI SQL92 filter for the given expression tree
+      * @param table the table being queried
+      * @param params the ordered parameters for the resulting expression
+      * @param joins the joins necessary for the resulting expression
+      * @return the string representation of the expression tree
+      */
+     private static String generateSqlFilter(Table table, ExpressionTree tree, List<Object> params,
+         List<String> joins, boolean dbHasJoinCastBug, String defaultPartName,
+         DatabaseProduct dbType, String schema) throws MetaException {
+       assert table != null;
+       if (tree == null) {
+         // consistent with other APIs like makeExpressionTree, null is returned to indicate that
+         // the filter could not pushed down due to parsing issue etc
+         return null;
+       }
+       if (tree.getRoot() == null) {
+         return "";
+       }
+       PartitionFilterGenerator visitor = new PartitionFilterGenerator(
+           table, params, joins, dbHasJoinCastBug, defaultPartName, dbType, schema);
+       tree.accept(visitor);
+       if (visitor.filterBuffer.hasError()) {
+         LOG.info("Unable to push down SQL filter: " + visitor.filterBuffer.getErrorMessage());
+         return null;
+       }
+ 
+       // Some joins might be null (see processNode for LeafNode), clean them up.
+       for (int i = 0; i < joins.size(); ++i) {
+         if (joins.get(i) != null) continue;
+         joins.remove(i--);
+       }
+       return "(" + visitor.filterBuffer.getFilter() + ")";
+     }
+ 
+     @Override
+     protected void beginTreeNode(TreeNode node) throws MetaException {
+       filterBuffer.append(" (");
+     }
+ 
+     @Override
+     protected void midTreeNode(TreeNode node) throws MetaException {
+       filterBuffer.append((node.getAndOr() == LogicalOperator.AND) ? " and " : " or ");
+     }
+ 
+     @Override
+     protected void endTreeNode(TreeNode node) throws MetaException {
+       filterBuffer.append(") ");
+     }
+ 
+     @Override
+     protected boolean shouldStop() {
+       return filterBuffer.hasError();
+     }
+ 
+     private static enum FilterType {
+       Integral,
+       String,
+       Date,
+ 
+       Invalid;
+ 
+       static FilterType fromType(String colTypeStr) {
+         if (colTypeStr.equals(ColumnType.STRING_TYPE_NAME)) {
+           return FilterType.String;
+         } else if (colTypeStr.equals(ColumnType.DATE_TYPE_NAME)) {
+           return FilterType.Date;
+         } else if (ColumnType.IntegralTypes.contains(colTypeStr)) {
+           return FilterType.Integral;
+         }
+         return FilterType.Invalid;
+       }
+ 
+       public static FilterType fromClass(Object value) {
+         if (value instanceof String) {
+           return FilterType.String;
+         } else if (value instanceof Long) {
+           return FilterType.Integral;
+         } else if (value instanceof java.sql.Date) {
+           return FilterType.Date;
+         }
+         return FilterType.Invalid;
+       }
+     }
+ 
+     @Override
+     public void visit(LeafNode node) throws MetaException {
+       if (node.operator == Operator.LIKE) {
+         filterBuffer.setError("LIKE is not supported for SQL filter pushdown");
+         return;
+       }
+       int partColCount = table.getPartitionKeys().size();
+       int partColIndex = node.getPartColIndexForFilter(table, filterBuffer);
+       if (filterBuffer.hasError()) return;
+ 
+       // We skipped 'like', other ops should all work as long as the types are right.
+       String colTypeStr = table.getPartitionKeys().get(partColIndex).getType();
+       FilterType colType = FilterType.fromType(colTypeStr);
+       if (colType == FilterType.Invalid) {
+         filterBuffer.setError("Filter pushdown not supported for type " + colTypeStr);
+         return;
+       }
+       FilterType valType = FilterType.fromClass(node.value);
+       Object nodeValue = node.value;
+       if (valType == FilterType.Invalid) {
+         filterBuffer.setError("Filter pushdown not supported for value " + node.value.getClass());
+         return;
+       }
+ 
+       // if Filter.g does date parsing for quoted strings, we'd need to verify there's no
+       // type mismatch when string col is filtered by a string that looks like date.
+       if (colType == FilterType.Date && valType == FilterType.String) {
+         // Filter.g cannot parse a quoted date; try to parse date here too.
+         try {
+           nodeValue = MetaStoreUtils.PARTITION_DATE_FORMAT.get().parse((String)nodeValue);
+           valType = FilterType.Date;
+         } catch (ParseException pe) { // do nothing, handled below - types will mismatch
+         }
+       }
+ 
+       // We format it so we are sure we are getting the right value
+       if (valType == FilterType.Date) {
+         // Format
+         nodeValue = MetaStoreUtils.PARTITION_DATE_FORMAT.get().format(nodeValue);
+       }
+ 
+       if (colType != valType) {
+         // It's not clear how filtering for e.g. "stringCol > 5" should work (which side is
+         // to be coerced?). Let the expression evaluation sort this one out, not metastore.
+         filterBuffer.setError("Cannot push down filter for "
+             + colTypeStr + " column and value " + nodeValue.getClass());
+         return;
+       }
+ 
+       if (joins.isEmpty()) {
+         // There's a fixed number of partition cols that we might have filters on. To avoid
+         // joining multiple times for one column (if there are several filters on it), we will
+         // keep numCols elements in the list, one for each column; we will fill it with nulls,
+         // put each join at a corresponding index when necessary, and remove nulls in the end.
+         for (int i = 0; i < partColCount; ++i) {
+           joins.add(null);
+         }
+       }
+       if (joins.get(partColIndex) == null) {
+         joins.set(partColIndex, "inner join " + PARTITION_KEY_VALS + " \"FILTER" + partColIndex
+             + "\" on \"FILTER"  + partColIndex + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+             + " and \"FILTER" + partColIndex + "\".\"INTEGER_IDX\" = " + partColIndex);
+       }
+ 
+       // Build the filter and add parameters linearly; we are traversing leaf nodes LTR.
+       String tableValue = "\"FILTER" + partColIndex + "\".\"PART_KEY_VAL\"";
+ 
+       if (node.isReverseOrder) {
+         params.add(nodeValue);
+       }
+       String tableColumn = tableValue;
+       if (colType != FilterType.String) {
+         // The underlying database field is varchar, we need to compare numbers.
+         if (colType == FilterType.Integral) {
+           tableValue = "cast(" + tableValue + " as decimal(21,0))";
+         } else if (colType == FilterType.Date) {
+           if (dbType == DatabaseProduct.ORACLE) {
+             // Oracle requires special treatment... as usual.
+             tableValue = "TO_DATE(" + tableValue + ", 'YYYY-MM-DD')";
+           } else {
+             tableValue = "cast(" + tableValue + " as date)";
+           }
+         }
+ 
+         // Workaround for HIVE_DEFAULT_PARTITION - ignore it like JDO does, for now.
+         String tableValue0 = tableValue;
+         tableValue = "(case when " + tableColumn + " <> ?";
+         params.add(defaultPartName);
+ 
+         if (dbHasJoinCastBug) {
+           // This is a workaround for DERBY-6358 and Oracle bug; it is pretty horrible.
+           tableValue += (" and " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and "
+               + DBS + ".\"CTLG_NAME\" = ? and "
+               + "\"FILTER" + partColIndex + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\" and "
+                 + "\"FILTER" + partColIndex + "\".\"INTEGER_IDX\" = " + partColIndex);
+           params.add(table.getTableName().toLowerCase());
+           params.add(table.getDbName().toLowerCase());
+           params.add(table.getCatName().toLowerCase());
+         }
+         tableValue += " then " + tableValue0 + " else null end)";
+       }
+       if (!node.isReverseOrder) {
+         params.add(nodeValue);
+       }
+ 
+       filterBuffer.append(node.isReverseOrder
+           ? "(? " + node.operator.getSqlOp() + " " + tableValue + ")"
+           : "(" + tableValue + " " + node.operator.getSqlOp() + " ?)");
+     }
+   }
+ 
+   /**
+    * Retrieve the column statistics for the specified columns of the table. NULL
+    * is returned if the columns are not provided.
+    * @param catName     the catalog name of the table
+    * @param dbName      the database name of the table
+    * @param tableName   the table name
+    * @param colNames    the list of the column names
+    * @return            the column statistics for the specified columns
+    * @throws MetaException
+    */
+   public ColumnStatistics getTableStats(final String catName, final String dbName,
+                                         final String tableName, List<String> colNames,
+                                         boolean enableBitVector) throws MetaException {
+     if (colNames == null || colNames.isEmpty()) {
+       return null;
+     }
+     final boolean doTrace = LOG.isDebugEnabled();
+     final String queryText0 = "select " + getStatsList(enableBitVector) + " from " + TAB_COL_STATS
+           + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\" in (";
+     Batchable<String, Object[]> b = new Batchable<String, Object[]>() {
+       @Override
+       public List<Object[]> run(List<String> input) throws MetaException {
+         String queryText = queryText0 + makeParams(input.size()) + ")";
+         Object[] params = new Object[input.size() + 3];
+         params[0] = catName;
+         params[1] = dbName;
+         params[2] = tableName;
+         for (int i = 0; i < input.size(); ++i) {
+           params[i + 3] = input.get(i);
+         }
+         long start = doTrace ? System.nanoTime() : 0;
+         Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+         Object qResult = executeWithArray(query, params, queryText);
+         timingTrace(doTrace, queryText0 + "...)", start, (doTrace ? System.nanoTime() : 0));
+         if (qResult == null) {
+           query.closeAll();
+           return null;
+         }
+         addQueryAfterUse(query);
+         return ensureList(qResult);
+       }
+     };
+     List<Object[]> list = Batchable.runBatched(batchSize, colNames, b);
+     if (list.isEmpty()) {
+       return null;
+     }
+     ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tableName);
+     csd.setCatName(catName);
+     ColumnStatistics result = makeColumnStats(list, csd, 0);
+     b.closeAllQueries();
+     return result;
+   }
+ 
+   public AggrStats aggrColStatsForPartitions(String catName, String dbName, String tableName,
+       List<String> partNames, List<String> colNames, boolean useDensityFunctionForNDVEstimation,
+       double ndvTuner, boolean enableBitVector) throws MetaException {
+     if (colNames.isEmpty() || partNames.isEmpty()) {
+       LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval");
+       return new AggrStats(Collections.<ColumnStatisticsObj>emptyList(), 0); // Nothing to aggregate
+     }
+     long partsFound = 0;
+     List<ColumnStatisticsObj> colStatsList;
+     // Try to read from the cache first
+     if (isAggregateStatsCacheEnabled
+         && (partNames.size() < aggrStatsCache.getMaxPartsPerCacheNode())) {
+       AggrColStats colStatsAggrCached;
+       List<ColumnStatisticsObj> colStatsAggrFromDB;
+       int maxPartsPerCacheNode = aggrStatsCache.getMaxPartsPerCacheNode();
+       double fpp = aggrStatsCache.getFalsePositiveProbability();
+       colStatsList = new ArrayList<ColumnStatisticsObj>();
+       // Bloom filter for the new node that we will eventually add to the cache
+       BloomFilter bloomFilter = createPartsBloomFilter(maxPartsPerCacheNode, fpp, partNames);
+       boolean computePartsFound = true;
+       for (String colName : colNames) {
+         // Check the cache first
+         colStatsAggrCached = aggrStatsCache.get(catName, dbName, tableName, colName, partNames);
+         if (colStatsAggrCached != null) {
+           colStatsList.add(colStatsAggrCached.getColStats());
+           partsFound = colStatsAggrCached.getNumPartsCached();
+         } else {
+           if (computePartsFound) {
+             partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames);
+             computePartsFound = false;
+           }
+           List<String> colNamesForDB = new ArrayList<>();
+           colNamesForDB.add(colName);
+           // Read aggregated stats for one column
+           colStatsAggrFromDB =
+               columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNamesForDB,
+                   partsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector);
+           if (!colStatsAggrFromDB.isEmpty()) {
+             ColumnStatisticsObj colStatsAggr = colStatsAggrFromDB.get(0);
+             colStatsList.add(colStatsAggr);
+             // Update the cache to add this new aggregate node
+             aggrStatsCache.add(catName, dbName, tableName, colName, partsFound, colStatsAggr, bloomFilter);
+           }
+         }
+       }
+     } else {
+       partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames);
+       colStatsList =
+           columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNames, partsFound,
+               useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector);
+     }
+     LOG.info("useDensityFunctionForNDVEstimation = " + useDensityFunctionForNDVEstimation
+         + "\npartsFound = " + partsFound + "\nColumnStatisticsObj = "
+         + Arrays.toString(colStatsList.toArray()));
+     return new AggrStats(colStatsList, partsFound);
+   }
+ 
+   private BloomFilter createPartsBloomFilter(int maxPartsPerCacheNode, double fpp,
+       List<String> partNames) {
+     BloomFilter bloomFilter = new BloomFilter(maxPartsPerCacheNode, fpp);
+     for (String partName : partNames) {
+       bloomFilter.add(partName.getBytes());
+     }
+     return bloomFilter;
+   }
+ 
+   private long partsFoundForPartitions(
+       final String catName, final String dbName, final String tableName,
+       final List<String> partNames, List<String> colNames) throws MetaException {
+     assert !colNames.isEmpty() && !partNames.isEmpty();
+     final boolean doTrace = LOG.isDebugEnabled();
+     final String queryText0  = "select count(\"COLUMN_NAME\") from " + PART_COL_STATS + ""
+         + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+         + " and \"COLUMN_NAME\" in (%1$s) and \"PARTITION_NAME\" in (%2$s)"
+         + " group by \"PARTITION_NAME\"";
+     List<Long> allCounts = Batchable.runBatched(batchSize, colNames, new Batchable<String, Long>() {
+       @Override
+       public List<Long> run(final List<String> inputColName) throws MetaException {
+         return Batchable.runBatched(batchSize, partNames, new Batchable<String, Long>() {
+           @Override
+           public List<Long> run(List<String> inputPartNames) throws MetaException {
+             long partsFound = 0;
+             String queryText = String.format(queryText0,
+                 makeParams(inputColName.size()), makeParams(inputPartNames.size()));
+             long start = doTrace ? System.nanoTime() : 0;
+             Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+             try {
+               Object qResult = executeWithArray(query, prepareParams(
+                   catName, dbName, tableName, inputPartNames, inputColName), queryText);
+               long end = doTrace ? System.nanoTime() : 0;
+               timingTrace(doTrace, queryText, start, end);
+               ForwardQueryResult<?> fqr = (ForwardQueryResult<?>) qResult;
+               Iterator<?> iter = fqr.iterator();
+               while (iter.hasNext()) {
+                 if (extractSqlLong(iter.next()) == inputColName.size()) {
+                   partsFound++;
+                 }
+               }
+               return Lists.<Long>newArrayList(partsFound);
+             } finally {
+               query.closeAll();
+             }
+           }
+         });
+       }
+     });
+     long partsFound = 0;
+     for (Long val : allCounts) {
+       partsFound += val;
+     }
+     return partsFound;
+   }
+ 
+   private List<ColumnStatisticsObj> columnStatisticsObjForPartitions(
+       final String catName, final String dbName,
+     final String tableName, final List<String> partNames, List<String> colNames, long partsFound,
+     final boolean useDensityFunctionForNDVEstimation, final double ndvTuner, final boolean enableBitVector) throws MetaException {
+     final boolean areAllPartsFound = (partsFound == partNames.size());
+     return Batchable.runBatched(batchSize, colNames, new Batchable<String, ColumnStatisticsObj>() {
+       @Override
+       public List<ColumnStatisticsObj> run(final List<String> inputColNames) throws MetaException {
+         return Batchable.runBatched(batchSize, partNames, new Batchable<String, ColumnStatisticsObj>() {
+           @Override
+           public List<ColumnStatisticsObj> run(List<String> inputPartNames) throws MetaException {
+             return columnStatisticsObjForPartitionsBatch(catName, dbName, tableName, inputPartNames,
+                 inputColNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector);
+           }
+         });
+       }
+     });
+   }
+ 
+   public List<ColStatsObjWithSourceInfo> getColStatsForAllTablePartitions(String catName, String dbName,
+       boolean enableBitVector) throws MetaException {
+     String queryText = "select \"TABLE_NAME\", \"PARTITION_NAME\", " + getStatsList(enableBitVector)
+         + " from " + " " + PART_COL_STATS + " where \"DB_NAME\" = ? and \"CAT_NAME\" = ?";
+     long start = 0;
+     long end = 0;
+     Query query = null;
+     boolean doTrace = LOG.isDebugEnabled();
+     Object qResult = null;
+     start = doTrace ? System.nanoTime() : 0;
+     List<ColStatsObjWithSourceInfo> colStatsForDB = new ArrayList<ColStatsObjWithSourceInfo>();
+     try {
+       query = pm.newQuery("javax.jdo.query.SQL", queryText);
+       qResult = executeWithArray(query, new Object[] { dbName, catName }, queryText);
+       if (qResult == null) {
+         query.closeAll();
+         return colStatsForDB;
+       }
+       end = doTrace ? System.nanoTime() : 0;
+       timingTrace(doTrace, queryText, start, end);
+       List<Object[]> list = ensureList(qResult);
+       for (Object[] row : list) {
+         String tblName = (String) row[0];
+         String partName = (String) row[1];
+         ColumnStatisticsObj colStatObj = prepareCSObj(row, 2);
+         colStatsForDB.add(new ColStatsObjWithSourceInfo(colStatObj, catName, dbName, tblName, partName));
+         Deadline.checkTimeout();
+       }
+     } finally {
+       query.closeAll();
+     }
+     return colStatsForDB;
+   }
+ 
+   /** Should be called with the list short enough to not trip up Oracle/etc. */
+   private List<ColumnStatisticsObj> columnStatisticsObjForPartitionsBatch(String catName, String dbName,
+       String tableName, List<String> partNames, List<String> colNames, boolean areAllPartsFound,
+       boolean useDensityFunctionForNDVEstimation, double ndvTuner, boolean enableBitVector)
+       throws MetaException {
+     if (enableBitVector) {
+       return aggrStatsUseJava(catName, dbName, tableName, partNames, colNames, areAllPartsFound,
+           useDensityFunctionForNDVEstimation, ndvTuner);
+     } else {
+       return aggrStatsUseDB(catName, dbName, tableName, partNames, colNames, areAllPartsFound,
+           useDensityFunctionForNDVEstimation, ndvTuner);
+     }
+   }
+ 
+   private List<ColumnStatisticsObj> aggrStatsUseJava(String catName, String dbName, String tableName,
+       List<String> partNames, List<String> colNames, boolean areAllPartsFound,
+       boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException {
+     // 1. get all the stats for colNames in partNames;
+     List<ColumnStatistics> partStats =
+         getPartitionStats(catName, dbName, tableName, partNames, colNames, true);
+     // 2. use util function to aggr stats
+     return MetaStoreUtils.aggrPartitionStats(partStats, catName, dbName, tableName, partNames, colNames,
+         areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner);
+   }
+ 
+   private List<ColumnStatisticsObj> aggrStatsUseDB(String catName, String dbName,
+       String tableName, List<String> partNames, List<String> colNames, boolean areAllPartsFound,
+       boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException {
+     // TODO: all the extrapolation logic should be moved out of this class,
+     // only mechanical data retrieval should remain here.
+     String commonPrefix = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", "
+         + "min(\"LONG_LOW_VALUE\"), max(\"LONG_HIGH_VALUE\"), min(\"DOUBLE_LOW_VALUE\"), max(\"DOUBLE_HIGH_VALUE\"), "
+         + "min(cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal)), max(cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)), "
+         + "sum(\"NUM_NULLS\"), max(\"NUM_DISTINCTS\"), "
+         + "max(\"AVG_COL_LEN\"), max(\"MAX_COL_LEN\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), "
+         // The following data is used to compute a partitioned table's NDV based
+         // on partitions' NDV when useDensityFunctionForNDVEstimation = true. Global NDVs cannot be
+         // accurately derived from partition NDVs, because the domain of column value two partitions
+         // can overlap. If there is no overlap then global NDV is just the sum
+         // of partition NDVs (UpperBound). But if there is some overlay then
+         // global NDV can be anywhere between sum of partition NDVs (no overlap)
+         // and same as one of the partition NDV (domain of column value in all other
+         // partitions is subset of the domain value in one of the partition)
+         // (LowerBound).But under uniform distribution, we can roughly estimate the global
+         // NDV by leveraging the min/max values.
+         // And, we also guarantee that the estimation makes sense by comparing it to the
+         // UpperBound (calculated by "sum(\"NUM_DISTINCTS\")")
+         // and LowerBound (calculated by "max(\"NUM_DISTINCTS\")")
+         + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as decimal)),"
+         + "avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+         + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
+         + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + ""
+         + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? ";
+     String queryText = null;
+     long start = 0;
+     long end = 0;
+     Query query = null;
+     boolean doTrace = LOG.isDebugEnabled();
+     Object qResult = null;
+     ForwardQueryResult<?> fqr = null;
+     // Check if the status of all the columns of all the partitions exists
+     // Extrapolation is not needed.
+     if (areAllPartsFound) {
+       queryText = commonPrefix + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
+           + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+           + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+       start = doTrace ? System.nanoTime() : 0;
+       query = pm.newQuery("javax.jdo.query.SQL", queryText);
+       qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames),
+           queryText);
+       if (qResult == null) {
+         query.closeAll();
+         return Collections.emptyList();
+       }
+       end = doTrace ? System.nanoTime() : 0;
+       timingTrace(doTrace, queryText, start, end);
+       List<Object[]> list = ensureList(qResult);
+       List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(list.size());
+       for (Object[] row : list) {
+         colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation, ndvTuner));
+         Deadline.checkTimeout();
+       }
+       query.closeAll();
+       return colStats;
+     } else {
+       // Extrapolation is needed for some columns.
+       // In this case, at least a column status for a partition is missing.
+       // We need to extrapolate this partition based on the other partitions
+       List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(colNames.size());
+       queryText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", count(\"PARTITION_NAME\") "
+           + " from " + PART_COL_STATS
+           + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+           + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
+           + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+           + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+       start = doTrace ? System.nanoTime() : 0;
+       query = pm.newQuery("javax.jdo.query.SQL", queryText);
+       qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames),
+           queryText);
+       end = doTrace ? System.nanoTime() : 0;
+       timingTrace(doTrace, queryText, start, end);
+       if (qResult == null) {
+         query.closeAll();
+         return Collections.emptyList();
+       }
+       List<String> noExtraColumnNames = new ArrayList<String>();
+       Map<String, String[]> extraColumnNameTypeParts = new HashMap<String, String[]>();
+       List<Object[]> list = ensureList(qResult);
+       for (Object[] row : list) {
+         String colName = (String) row[0];
+         String colType = (String) row[1];
+         // Extrapolation is not needed for this column if
+         // count(\"PARTITION_NAME\")==partNames.size()
+         // Or, extrapolation is not possible for this column if
+         // count(\"PARTITION_NAME\")<2
+         Long count = extractSqlLong(row[2]);
+         if (count == partNames.size() || count < 2) {
+           noExtraColumnNames.add(colName);
+         } else {
+           extraColumnNameTypeParts.put(colName, new String[] { colType, String.valueOf(count) });
+         }
+         Deadline.checkTimeout();
+       }
+       query.closeAll();
+       // Extrapolation is not needed for columns noExtraColumnNames
+       if (noExtraColumnNames.size() != 0) {
+         queryText = commonPrefix + " and \"COLUMN_NAME\" in ("
+             + makeParams(noExtraColumnNames.size()) + ")" + " and \"PARTITION_NAME\" in ("
+             + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+         start = doTrace ? System.nanoTime() : 0;
+         query = pm.newQuery("javax.jdo.query.SQL", queryText);
+         qResult = executeWithArray(query,
+             prepareParams(catName, dbName, tableName, partNames, noExtraColumnNames), queryText);
+         if (qResult == null) {
+           query.closeAll();
+           return Collections.emptyList();
+         }
+         list = ensureList(qResult);
+         for (Object[] row : list) {
+           colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation, ndvTuner));
+           Deadline.checkTimeout();
+         }
+         end = doTrace ? System.nanoTime() : 0;
+         timingTrace(doTrace, queryText, start, end);
+         query.closeAll();
+       }
+       // Extrapolation is needed for extraColumnNames.
+       // give a sequence number for all the partitions
+       if (extraColumnNameTypeParts.size() != 0) {
+         Map<String, Integer> indexMap = new HashMap<String, Integer>();
+         for (int index = 0; index < partNames.size(); index++) {
+           indexMap.put(partNames.get(index), index);
+         }
+         // get sum for all columns to reduce the number of queries
+         Map<String, Map<Integer, Object>> sumMap = new HashMap<String, Map<Integer, Object>>();
+         queryText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), sum(\"NUM_DISTINCTS\")"
+             + " from " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+             + " and \"COLUMN_NAME\" in (" + makeParams(extraColumnNameTypeParts.size())
+             + ") and \"PARTITION_NAME\" in (" + makeParams(partNames.size())
+             + ") group by \"COLUMN_NAME\"";
+         start = doTrace ? System.nanoTime() : 0;
+         query = pm.newQuery("javax.jdo.query.SQL", queryText);
+         List<String> extraColumnNames = new ArrayList<String>();
+         extraColumnNames.addAll(extraColumnNameTypeParts.keySet());
+         qResult = executeWithArray(query,
+             prepareParams(catName, dbName, tableName, partNames, extraColumnNames), queryText);
+         if (qResult == null) {
+           query.closeAll();
+           return Collections.emptyList();
+         }
+         list = ensureList(qResult);
+         // see the indexes for colstats in IExtrapolatePartStatus
+         Integer[] sumIndex = new Integer[] { 6, 10, 11, 15 };
+         for (Object[] row : list) {
+           Map<Integer, Object> indexToObject = new HashMap<Integer, Object>();
+           for (int ind = 1; ind < row.length; ind++) {
+             indexToObject.put(sumIndex[ind - 1], row[ind]);
+           }
+           // row[0] is the column name
+           sumMap.put((String) row[0], indexToObject);
+           Deadline.checkTimeout();
+         }
+         end = doTrace ? System.nanoTime() : 0;
+         timingTrace(doTrace, queryText, start, end);
+         query.closeAll();
+         for (Map.Entry<String, String[]> entry : extraColumnNameTypeParts.entrySet()) {
+           Object[] row = new Object[IExtrapolatePartStatus.colStatNames.length + 2];
+           String colName = entry.getKey();
+           String colType = entry.getValue()[0];
+           Long sumVal = Long.parseLong(entry.getValue()[1]);
+           // fill in colname
+           row[0] = colName;
+           // fill in coltype
+           row[1] = colType;
+           // use linear extrapolation. more complicated one can be added in the
+           // future.
+           IExtrapolatePartStatus extrapolateMethod = new LinearExtrapolatePartStatus();
+           // fill in colstatus
+           Integer[] index = null;
+           boolean decimal = false;
+           if (colType.toLowerCase().startsWith("decimal")) {
+             index = IExtrapolatePartStatus.indexMaps.get("decimal");
+             decimal = true;
+           } else {
+             index = IExtrapolatePartStatus.indexMaps.get(colType.toLowerCase());
+           }
+           // if the colType is not the known type, long, double, etc, then get
+           // all index.
+           if (index == null) {
+             index = IExtrapolatePartStatus.indexMaps.get("default");
+           }
+           for (int colStatIndex : index) {
+             String colStatName = IExtrapolatePartStatus.colStatNames[colStatIndex];
+             // if the aggregation type is sum, we do a scale-up
+             if (IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Sum) {
+               Object o = sumMap.get(colName).get(colStatIndex);
+               if (o == null) {
+                 row[2 + colStatIndex] = null;
+               } else {
+                 Long val = extractSqlLong(o);
+                 row[2 + colStatIndex] = val / sumVal * (partNames.size());
+               }
+             } else if (IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Min
+                 || IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Max) {
+               // if the aggregation type is min/max, we extrapolate from the
+               // left/right borders
+               if (!decimal) {
+                 queryText = "select \"" + colStatName
+                     + "\",\"PARTITION_NAME\" from " + PART_COL_STATS
+                     + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?"
+                     + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+                     + " order by \"" + colStatName + "\"";
+               } else {
+                 queryText = "select \"" + colStatName
+                     + "\",\"PARTITION_NAME\" from " + PART_COL_STATS
+                     + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?"
+                     + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+                     + " order by cast(\"" + colStatName + "\" as decimal)";
+               }
+               start = doTrace ? System.nanoTime() : 0;
+               query = pm.newQuery("javax.jdo.query.SQL", queryText);
+               qResult = executeWithArray(query,
+                   prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName)), queryText);
+               if (qResult == null) {
+                 query.closeAll();
+                 return Collections.emptyList();
+               }
+               fqr = (ForwardQueryResult<?>) qResult;
+               Object[] min = (Object[]) (fqr.get(0));
+               Object[] max = (Object[]) (fqr.get(fqr.size() - 1));
+               end = doTrace ? System.nanoTime() : 0;
+               timingTrace(doTrace, queryText, start, end);
+               query.closeAll();
+               if (min[0] == null || max[0] == null) {
+                 row[2 + colStatIndex] = null;
+               } else {
+                 row[2 + colStatIndex] = extrapolateMethod.extrapolate(min, max, colStatIndex,
+                     indexMap);
+               }
+             } else {
+               // if the aggregation type is avg, we use the average on the existing ones.
+               queryText = "select "
+                   + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as decimal)),"
+                   + "avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+                   + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\")"
+                   + " from " + PART_COL_STATS + "" + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?"
+                   + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in ("
+                   + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\"";
+               start = doTrace ? System.nanoTime() : 0;
+               query = pm.newQuery("javax.jdo.query.SQL", queryText);
+               qResult = executeWithArray(query,
+                   prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName)), queryText);
+               if (qResult == null) {
+                 query.closeAll();
+                 return Collections.emptyList();
+               }
+               fqr = (Forward

<TRUNCATED>

[23/54] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - more tests (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index af77e0e..7ab64ea 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -220,6 +220,8 @@ import org.slf4j.LoggerFactory;
 
     public void rename_partition(String db_name, String tbl_name, List<String> part_vals, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
 
+    public RenamePartitionResponse rename_partition_req(RenamePartitionRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+
     public boolean partition_name_has_valid_characters(List<String> part_vals, boolean throw_exception) throws MetaException, org.apache.thrift.TException;
 
     public String get_config_value(String name, String defaultValue) throws ConfigValSecurityException, org.apache.thrift.TException;
@@ -650,6 +652,8 @@ import org.slf4j.LoggerFactory;
 
     public void rename_partition(String db_name, String tbl_name, List<String> part_vals, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void rename_partition_req(RenamePartitionRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void partition_name_has_valid_characters(List<String> part_vals, boolean throw_exception, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void get_config_value(String name, String defaultValue, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -3596,6 +3600,35 @@ import org.slf4j.LoggerFactory;
       return;
     }
 
+    public RenamePartitionResponse rename_partition_req(RenamePartitionRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException
+    {
+      send_rename_partition_req(req);
+      return recv_rename_partition_req();
+    }
+
+    public void send_rename_partition_req(RenamePartitionRequest req) throws org.apache.thrift.TException
+    {
+      rename_partition_req_args args = new rename_partition_req_args();
+      args.setReq(req);
+      sendBase("rename_partition_req", args);
+    }
+
+    public RenamePartitionResponse recv_rename_partition_req() throws InvalidOperationException, MetaException, org.apache.thrift.TException
+    {
+      rename_partition_req_result result = new rename_partition_req_result();
+      receiveBase(result, "rename_partition_req");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "rename_partition_req failed: unknown result");
+    }
+
     public boolean partition_name_has_valid_characters(List<String> part_vals, boolean throw_exception) throws MetaException, org.apache.thrift.TException
     {
       send_partition_name_has_valid_characters(part_vals, throw_exception);
@@ -10289,6 +10322,38 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    public void rename_partition_req(RenamePartitionRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      rename_partition_req_call method_call = new rename_partition_req_call(req, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class rename_partition_req_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private RenamePartitionRequest req;
+      public rename_partition_req_call(RenamePartitionRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.req = req;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("rename_partition_req", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        rename_partition_req_args args = new rename_partition_req_args();
+        args.setReq(req);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public RenamePartitionResponse getResult() throws InvalidOperationException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_rename_partition_req();
+      }
+    }
+
     public void partition_name_has_valid_characters(List<String> part_vals, boolean throw_exception, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       partition_name_has_valid_characters_call method_call = new partition_name_has_valid_characters_call(part_vals, throw_exception, resultHandler, this, ___protocolFactory, ___transport);
@@ -14467,6 +14532,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("alter_partitions_req", new alter_partitions_req());
       processMap.put("alter_partition_with_environment_context", new alter_partition_with_environment_context());
       processMap.put("rename_partition", new rename_partition());
+      processMap.put("rename_partition_req", new rename_partition_req());
       processMap.put("partition_name_has_valid_characters", new partition_name_has_valid_characters());
       processMap.put("get_config_value", new get_config_value());
       processMap.put("partition_name_to_vals", new partition_name_to_vals());
@@ -16951,6 +17017,32 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class rename_partition_req<I extends Iface> extends org.apache.thrift.ProcessFunction<I, rename_partition_req_args> {
+      public rename_partition_req() {
+        super("rename_partition_req");
+      }
+
+      public rename_partition_req_args getEmptyArgsInstance() {
+        return new rename_partition_req_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public rename_partition_req_result getResult(I iface, rename_partition_req_args args) throws org.apache.thrift.TException {
+        rename_partition_req_result result = new rename_partition_req_result();
+        try {
+          result.success = iface.rename_partition_req(args.req);
+        } catch (InvalidOperationException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class partition_name_has_valid_characters<I extends Iface> extends org.apache.thrift.ProcessFunction<I, partition_name_has_valid_characters_args> {
       public partition_name_has_valid_characters() {
         super("partition_name_has_valid_characters");
@@ -20162,6 +20254,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("alter_partitions_req", new alter_partitions_req());
       processMap.put("alter_partition_with_environment_context", new alter_partition_with_environment_context());
       processMap.put("rename_partition", new rename_partition());
+      processMap.put("rename_partition_req", new rename_partition_req());
       processMap.put("partition_name_has_valid_characters", new partition_name_has_valid_characters());
       processMap.put("get_config_value", new get_config_value());
       processMap.put("partition_name_to_vals", new partition_name_to_vals());
@@ -25876,6 +25969,68 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class rename_partition_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, rename_partition_req_args, RenamePartitionResponse> {
+      public rename_partition_req() {
+        super("rename_partition_req");
+      }
+
+      public rename_partition_req_args getEmptyArgsInstance() {
+        return new rename_partition_req_args();
+      }
+
+      public AsyncMethodCallback<RenamePartitionResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<RenamePartitionResponse>() { 
+          public void onComplete(RenamePartitionResponse o) {
+            rename_partition_req_result result = new rename_partition_req_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            rename_partition_req_result result = new rename_partition_req_result();
+            if (e instanceof InvalidOperationException) {
+                        result.o1 = (InvalidOperationException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, rename_partition_req_args args, org.apache.thrift.async.AsyncMethodCallback<RenamePartitionResponse> resultHandler) throws TException {
+        iface.rename_partition_req(args.req,resultHandler);
+      }
+    }
+
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class partition_name_has_valid_characters<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, partition_name_has_valid_characters_args, Boolean> {
       public partition_name_has_valid_characters() {
         super("partition_name_has_valid_characters");
@@ -43189,13 +43344,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list960 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list960.size);
-                  String _elem961;
-                  for (int _i962 = 0; _i962 < _list960.size; ++_i962)
+                  org.apache.thrift.protocol.TList _list968 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list968.size);
+                  String _elem969;
+                  for (int _i970 = 0; _i970 < _list968.size; ++_i970)
                   {
-                    _elem961 = iprot.readString();
-                    struct.success.add(_elem961);
+                    _elem969 = iprot.readString();
+                    struct.success.add(_elem969);
                   }
                   iprot.readListEnd();
                 }
@@ -43230,9 +43385,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter963 : struct.success)
+            for (String _iter971 : struct.success)
             {
-              oprot.writeString(_iter963);
+              oprot.writeString(_iter971);
             }
             oprot.writeListEnd();
           }
@@ -43271,9 +43426,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter964 : struct.success)
+            for (String _iter972 : struct.success)
             {
-              oprot.writeString(_iter964);
+              oprot.writeString(_iter972);
             }
           }
         }
@@ -43288,13 +43443,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list965.size);
-            String _elem966;
-            for (int _i967 = 0; _i967 < _list965.size; ++_i967)
+            org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list973.size);
+            String _elem974;
+            for (int _i975 = 0; _i975 < _list973.size; ++_i975)
             {
-              _elem966 = iprot.readString();
-              struct.success.add(_elem966);
+              _elem974 = iprot.readString();
+              struct.success.add(_elem974);
             }
           }
           struct.setSuccessIsSet(true);
@@ -43948,13 +44103,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list968 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list968.size);
-                  String _elem969;
-                  for (int _i970 = 0; _i970 < _list968.size; ++_i970)
+                  org.apache.thrift.protocol.TList _list976 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list976.size);
+                  String _elem977;
+                  for (int _i978 = 0; _i978 < _list976.size; ++_i978)
                   {
-                    _elem969 = iprot.readString();
-                    struct.success.add(_elem969);
+                    _elem977 = iprot.readString();
+                    struct.success.add(_elem977);
                   }
                   iprot.readListEnd();
                 }
@@ -43989,9 +44144,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter971 : struct.success)
+            for (String _iter979 : struct.success)
             {
-              oprot.writeString(_iter971);
+              oprot.writeString(_iter979);
             }
             oprot.writeListEnd();
           }
@@ -44030,9 +44185,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter972 : struct.success)
+            for (String _iter980 : struct.success)
             {
-              oprot.writeString(_iter972);
+              oprot.writeString(_iter980);
             }
           }
         }
@@ -44047,13 +44202,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list973.size);
-            String _elem974;
-            for (int _i975 = 0; _i975 < _list973.size; ++_i975)
+            org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list981.size);
+            String _elem982;
+            for (int _i983 = 0; _i983 < _list981.size; ++_i983)
             {
-              _elem974 = iprot.readString();
-              struct.success.add(_elem974);
+              _elem982 = iprot.readString();
+              struct.success.add(_elem982);
             }
           }
           struct.setSuccessIsSet(true);
@@ -48660,16 +48815,16 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                 {
-                  org.apache.thrift.protocol.TMap _map976 = iprot.readMapBegin();
-                  struct.success = new HashMap<String,Type>(2*_map976.size);
-                  String _key977;
-                  Type _val978;
-                  for (int _i979 = 0; _i979 < _map976.size; ++_i979)
+                  org.apache.thrift.protocol.TMap _map984 = iprot.readMapBegin();
+                  struct.success = new HashMap<String,Type>(2*_map984.size);
+                  String _key985;
+                  Type _val986;
+                  for (int _i987 = 0; _i987 < _map984.size; ++_i987)
                   {
-                    _key977 = iprot.readString();
-                    _val978 = new Type();
-                    _val978.read(iprot);
-                    struct.success.put(_key977, _val978);
+                    _key985 = iprot.readString();
+                    _val986 = new Type();
+                    _val986.read(iprot);
+                    struct.success.put(_key985, _val986);
                   }
                   iprot.readMapEnd();
                 }
@@ -48704,10 +48859,10 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (Map.Entry<String, Type> _iter980 : struct.success.entrySet())
+            for (Map.Entry<String, Type> _iter988 : struct.success.entrySet())
             {
-              oprot.writeString(_iter980.getKey());
-              _iter980.getValue().write(oprot);
+              oprot.writeString(_iter988.getKey());
+              _iter988.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -48746,10 +48901,10 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (Map.Entry<String, Type> _iter981 : struct.success.entrySet())
+            for (Map.Entry<String, Type> _iter989 : struct.success.entrySet())
             {
-              oprot.writeString(_iter981.getKey());
-              _iter981.getValue().write(oprot);
+              oprot.writeString(_iter989.getKey());
+              _iter989.getValue().write(oprot);
             }
           }
         }
@@ -48764,16 +48919,16 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TMap _map982 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new HashMap<String,Type>(2*_map982.size);
-            String _key983;
-            Type _val984;
-            for (int _i985 = 0; _i985 < _map982.size; ++_i985)
+            org.apache.thrift.protocol.TMap _map990 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new HashMap<String,Type>(2*_map990.size);
+            String _key991;
+            Type _val992;
+            for (int _i993 = 0; _i993 < _map990.size; ++_i993)
             {
-              _key983 = iprot.readString();
-              _val984 = new Type();
-              _val984.read(iprot);
-              struct.success.put(_key983, _val984);
+              _key991 = iprot.readString();
+              _val992 = new Type();
+              _val992.read(iprot);
+              struct.success.put(_key991, _val992);
             }
           }
           struct.setSuccessIsSet(true);
@@ -49808,14 +49963,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list986 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list986.size);
-                  FieldSchema _elem987;
-                  for (int _i988 = 0; _i988 < _list986.size; ++_i988)
+                  org.apache.thrift.protocol.TList _list994 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list994.size);
+                  FieldSchema _elem995;
+                  for (int _i996 = 0; _i996 < _list994.size; ++_i996)
                   {
-                    _elem987 = new FieldSchema();
-                    _elem987.read(iprot);
-                    struct.success.add(_elem987);
+                    _elem995 = new FieldSchema();
+                    _elem995.read(iprot);
+                    struct.success.add(_elem995);
                   }
                   iprot.readListEnd();
                 }
@@ -49868,9 +50023,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter989 : struct.success)
+            for (FieldSchema _iter997 : struct.success)
             {
-              _iter989.write(oprot);
+              _iter997.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -49925,9 +50080,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter990 : struct.success)
+            for (FieldSchema _iter998 : struct.success)
             {
-              _iter990.write(oprot);
+              _iter998.write(oprot);
             }
           }
         }
@@ -49948,14 +50103,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list991.size);
-            FieldSchema _elem992;
-            for (int _i993 = 0; _i993 < _list991.size; ++_i993)
+            org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list999.size);
+            FieldSchema _elem1000;
+            for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001)
             {
-              _elem992 = new FieldSchema();
-              _elem992.read(iprot);
-              struct.success.add(_elem992);
+              _elem1000 = new FieldSchema();
+              _elem1000.read(iprot);
+              struct.success.add(_elem1000);
             }
           }
           struct.setSuccessIsSet(true);
@@ -51109,14 +51264,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list994 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list994.size);
-                  FieldSchema _elem995;
-                  for (int _i996 = 0; _i996 < _list994.size; ++_i996)
+                  org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list1002.size);
+                  FieldSchema _elem1003;
+                  for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004)
                   {
-                    _elem995 = new FieldSchema();
-                    _elem995.read(iprot);
-                    struct.success.add(_elem995);
+                    _elem1003 = new FieldSchema();
+                    _elem1003.read(iprot);
+                    struct.success.add(_elem1003);
                   }
                   iprot.readListEnd();
                 }
@@ -51169,9 +51324,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter997 : struct.success)
+            for (FieldSchema _iter1005 : struct.success)
             {
-              _iter997.write(oprot);
+              _iter1005.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -51226,9 +51381,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter998 : struct.success)
+            for (FieldSchema _iter1006 : struct.success)
             {
-              _iter998.write(oprot);
+              _iter1006.write(oprot);
             }
           }
         }
@@ -51249,14 +51404,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list999.size);
-            FieldSchema _elem1000;
-            for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001)
+            org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list1007.size);
+            FieldSchema _elem1008;
+            for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009)
             {
-              _elem1000 = new FieldSchema();
-              _elem1000.read(iprot);
-              struct.success.add(_elem1000);
+              _elem1008 = new FieldSchema();
+              _elem1008.read(iprot);
+              struct.success.add(_elem1008);
             }
           }
           struct.setSuccessIsSet(true);
@@ -52301,14 +52456,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list1002.size);
-                  FieldSchema _elem1003;
-                  for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004)
+                  org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list1010.size);
+                  FieldSchema _elem1011;
+                  for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012)
                   {
-                    _elem1003 = new FieldSchema();
-                    _elem1003.read(iprot);
-                    struct.success.add(_elem1003);
+                    _elem1011 = new FieldSchema();
+                    _elem1011.read(iprot);
+                    struct.success.add(_elem1011);
                   }
                   iprot.readListEnd();
                 }
@@ -52361,9 +52516,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter1005 : struct.success)
+            for (FieldSchema _iter1013 : struct.success)
             {
-              _iter1005.write(oprot);
+              _iter1013.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -52418,9 +52573,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter1006 : struct.success)
+            for (FieldSchema _iter1014 : struct.success)
             {
-              _iter1006.write(oprot);
+              _iter1014.write(oprot);
             }
           }
         }
@@ -52441,14 +52596,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list1007.size);
-            FieldSchema _elem1008;
-            for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009)
+            org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list1015.size);
+            FieldSchema _elem1016;
+            for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017)
             {
-              _elem1008 = new FieldSchema();
-              _elem1008.read(iprot);
-              struct.success.add(_elem1008);
+              _elem1016 = new FieldSchema();
+              _elem1016.read(iprot);
+              struct.success.add(_elem1016);
             }
           }
           struct.setSuccessIsSet(true);
@@ -53602,14 +53757,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list1010.size);
-                  FieldSchema _elem1011;
-                  for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012)
+                  org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list1018.size);
+                  FieldSchema _elem1019;
+                  for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020)
                   {
-                    _elem1011 = new FieldSchema();
-                    _elem1011.read(iprot);
-                    struct.success.add(_elem1011);
+                    _elem1019 = new FieldSchema();
+                    _elem1019.read(iprot);
+                    struct.success.add(_elem1019);
                   }
                   iprot.readListEnd();
                 }
@@ -53662,9 +53817,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter1013 : struct.success)
+            for (FieldSchema _iter1021 : struct.success)
             {
-              _iter1013.write(oprot);
+              _iter1021.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -53719,9 +53874,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter1014 : struct.success)
+            for (FieldSchema _iter1022 : struct.success)
             {
-              _iter1014.write(oprot);
+              _iter1022.write(oprot);
             }
           }
         }
@@ -53742,14 +53897,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list1015.size);
-            FieldSchema _elem1016;
-            for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017)
+            org.apache.thrift.protocol.TList _list1023 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list1023.size);
+            FieldSchema _elem1024;
+            for (int _i1025 = 0; _i1025 < _list1023.size; ++_i1025)
             {
-              _elem1016 = new FieldSchema();
-              _elem1016.read(iprot);
-              struct.success.add(_elem1016);
+              _elem1024 = new FieldSchema();
+              _elem1024.read(iprot);
+              struct.success.add(_elem1024);
             }
           }
           struct.setSuccessIsSet(true);
@@ -56878,14 +57033,14 @@ import org.slf4j.LoggerFactory;
             case 2: // PRIMARY_KEYS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin();
-                  struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1018.size);
-                  SQLPrimaryKey _elem1019;
-                  for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020)
+                  org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin();
+                  struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1026.size);
+                  SQLPrimaryKey _elem1027;
+                  for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028)
                   {
-                    _elem1019 = new SQLPrimaryKey();
-                    _elem1019.read(iprot);
-                    struct.primaryKeys.add(_elem1019);
+                    _elem1027 = new SQLPrimaryKey();
+                    _elem1027.read(iprot);
+                    struct.primaryKeys.add(_elem1027);
                   }
                   iprot.readListEnd();
                 }
@@ -56897,14 +57052,14 @@ import org.slf4j.LoggerFactory;
             case 3: // FOREIGN_KEYS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1021 = iprot.readListBegin();
-                  struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1021.size);
-                  SQLForeignKey _elem1022;
-                  for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023)
+                  org.apache.thrift.protocol.TList _list1029 = iprot.readListBegin();
+                  struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1029.size);
+                  SQLForeignKey _elem1030;
+                  for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031)
                   {
-                    _elem1022 = new SQLForeignKey();
-                    _elem1022.read(iprot);
-                    struct.foreignKeys.add(_elem1022);
+                    _elem1030 = new SQLForeignKey();
+                    _elem1030.read(iprot);
+                    struct.foreignKeys.add(_elem1030);
                   }
                   iprot.readListEnd();
                 }
@@ -56916,14 +57071,14 @@ import org.slf4j.LoggerFactory;
             case 4: // UNIQUE_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin();
-                  struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1024.size);
-                  SQLUniqueConstraint _elem1025;
-                  for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026)
+                  org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin();
+                  struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1032.size);
+                  SQLUniqueConstraint _elem1033;
+                  for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034)
                   {
-                    _elem1025 = new SQLUniqueConstraint();
-                    _elem1025.read(iprot);
-                    struct.uniqueConstraints.add(_elem1025);
+                    _elem1033 = new SQLUniqueConstraint();
+                    _elem1033.read(iprot);
+                    struct.uniqueConstraints.add(_elem1033);
                   }
                   iprot.readListEnd();
                 }
@@ -56935,14 +57090,14 @@ import org.slf4j.LoggerFactory;
             case 5: // NOT_NULL_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1027 = iprot.readListBegin();
-                  struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1027.size);
-                  SQLNotNullConstraint _elem1028;
-                  for (int _i1029 = 0; _i1029 < _list1027.size; ++_i1029)
+                  org.apache.thrift.protocol.TList _list1035 = iprot.readListBegin();
+                  struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1035.size);
+                  SQLNotNullConstraint _elem1036;
+                  for (int _i1037 = 0; _i1037 < _list1035.size; ++_i1037)
                   {
-                    _elem1028 = new SQLNotNullConstraint();
-                    _elem1028.read(iprot);
-                    struct.notNullConstraints.add(_elem1028);
+                    _elem1036 = new SQLNotNullConstraint();
+                    _elem1036.read(iprot);
+                    struct.notNullConstraints.add(_elem1036);
                   }
                   iprot.readListEnd();
                 }
@@ -56954,14 +57109,14 @@ import org.slf4j.LoggerFactory;
             case 6: // DEFAULT_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1030 = iprot.readListBegin();
-                  struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1030.size);
-                  SQLDefaultConstraint _elem1031;
-                  for (int _i1032 = 0; _i1032 < _list1030.size; ++_i1032)
+                  org.apache.thrift.protocol.TList _list1038 = iprot.readListBegin();
+                  struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1038.size);
+                  SQLDefaultConstraint _elem1039;
+                  for (int _i1040 = 0; _i1040 < _list1038.size; ++_i1040)
                   {
-                    _elem1031 = new SQLDefaultConstraint();
-                    _elem1031.read(iprot);
-                    struct.defaultConstraints.add(_elem1031);
+                    _elem1039 = new SQLDefaultConstraint();
+                    _elem1039.read(iprot);
+                    struct.defaultConstraints.add(_elem1039);
                   }
                   iprot.readListEnd();
                 }
@@ -56973,14 +57128,14 @@ import org.slf4j.LoggerFactory;
             case 7: // CHECK_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1033 = iprot.readListBegin();
-                  struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1033.size);
-                  SQLCheckConstraint _elem1034;
-                  for (int _i1035 = 0; _i1035 < _list1033.size; ++_i1035)
+                  org.apache.thrift.protocol.TList _list1041 = iprot.readListBegin();
+                  struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1041.size);
+                  SQLCheckConstraint _elem1042;
+                  for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043)
                   {
-                    _elem1034 = new SQLCheckConstraint();
-                    _elem1034.read(iprot);
-                    struct.checkConstraints.add(_elem1034);
+                    _elem1042 = new SQLCheckConstraint();
+                    _elem1042.read(iprot);
+                    struct.checkConstraints.add(_elem1042);
                   }
                   iprot.readListEnd();
                 }
@@ -57011,9 +57166,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size()));
-            for (SQLPrimaryKey _iter1036 : struct.primaryKeys)
+            for (SQLPrimaryKey _iter1044 : struct.primaryKeys)
             {
-              _iter1036.write(oprot);
+              _iter1044.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -57023,9 +57178,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size()));
-            for (SQLForeignKey _iter1037 : struct.foreignKeys)
+            for (SQLForeignKey _iter1045 : struct.foreignKeys)
             {
-              _iter1037.write(oprot);
+              _iter1045.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -57035,9 +57190,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size()));
-            for (SQLUniqueConstraint _iter1038 : struct.uniqueConstraints)
+            for (SQLUniqueConstraint _iter1046 : struct.uniqueConstraints)
             {
-              _iter1038.write(oprot);
+              _iter1046.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -57047,9 +57202,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size()));
-            for (SQLNotNullConstraint _iter1039 : struct.notNullConstraints)
+            for (SQLNotNullConstraint _iter1047 : struct.notNullConstraints)
             {
-              _iter1039.write(oprot);
+              _iter1047.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -57059,9 +57214,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size()));
-            for (SQLDefaultConstraint _iter1040 : struct.defaultConstraints)
+            for (SQLDefaultConstraint _iter1048 : struct.defaultConstraints)
             {
-              _iter1040.write(oprot);
+              _iter1048.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -57071,9 +57226,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size()));
-            for (SQLCheckConstraint _iter1041 : struct.checkConstraints)
+            for (SQLCheckConstraint _iter1049 : struct.checkConstraints)
             {
-              _iter1041.write(oprot);
+              _iter1049.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -57125,54 +57280,54 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetPrimaryKeys()) {
           {
             oprot.writeI32(struct.primaryKeys.size());
-            for (SQLPrimaryKey _iter1042 : struct.primaryKeys)
+            for (SQLPrimaryKey _iter1050 : struct.primaryKeys)
             {
-              _iter1042.write(oprot);
+              _iter1050.write(oprot);
             }
           }
         }
         if (struct.isSetForeignKeys()) {
           {
             oprot.writeI32(struct.foreignKeys.size());
-            for (SQLForeignKey _iter1043 : struct.foreignKeys)
+            for (SQLForeignKey _iter1051 : struct.foreignKeys)
             {
-              _iter1043.write(oprot);
+              _iter1051.write(oprot);
             }
           }
         }
         if (struct.isSetUniqueConstraints()) {
           {
             oprot.writeI32(struct.uniqueConstraints.size());
-            for (SQLUniqueConstraint _iter1044 : struct.uniqueConstraints)
+            for (SQLUniqueConstraint _iter1052 : struct.uniqueConstraints)
             {
-              _iter1044.write(oprot);
+              _iter1052.write(oprot);
             }
           }
         }
         if (struct.isSetNotNullConstraints()) {
           {
             oprot.writeI32(struct.notNullConstraints.size());
-            for (SQLNotNullConstraint _iter1045 : struct.notNullConstraints)
+            for (SQLNotNullConstraint _iter1053 : struct.notNullConstraints)
             {
-              _iter1045.write(oprot);
+              _iter1053.write(oprot);
             }
           }
         }
         if (struct.isSetDefaultConstraints()) {
           {
             oprot.writeI32(struct.defaultConstraints.size());
-            for (SQLDefaultConstraint _iter1046 : struct.defaultConstraints)
+            for (SQLDefaultConstraint _iter1054 : struct.defaultConstraints)
             {
-              _iter1046.write(oprot);
+              _iter1054.write(oprot);
             }
           }
         }
         if (struct.isSetCheckConstraints()) {
           {
             oprot.writeI32(struct.checkConstraints.size());
-            for (SQLCheckConstraint _iter1047 : struct.checkConstraints)
+            for (SQLCheckConstraint _iter1055 : struct.checkConstraints)
             {
-              _iter1047.write(oprot);
+              _iter1055.write(oprot);
             }
           }
         }
@@ -57189,84 +57344,84 @@ import org.slf4j.LoggerFactory;
         }
         if (incoming.get(1)) {
           {
-            org.apache.thrift.protocol.TList _list1048 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1048.size);
-            SQLPrimaryKey _elem1049;
-            for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050)
+            org.apache.thrift.protocol.TList _list1056 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1056.size);
+            SQLPrimaryKey _elem1057;
+            for (int _i1058 = 0; _i1058 < _list1056.size; ++_i1058)
             {
-              _elem1049 = new SQLPrimaryKey();
-              _elem1049.read(iprot);
-              struct.primaryKeys.add(_elem1049);
+              _elem1057 = new SQLPrimaryKey();
+              _elem1057.read(iprot);
+              struct.primaryKeys.add(_elem1057);
             }
           }
           struct.setPrimaryKeysIsSet(true);
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list1051 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1051.size);
-            SQLForeignKey _elem1052;
-            for (int _i1053 = 0; _i1053 < _list1051.size; ++_i1053)
+            org.apache.thrift.protocol.TList _list1059 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1059.size);
+            SQLForeignKey _elem1060;
+            for (int _i1061 = 0; _i1061 < _list1059.size; ++_i1061)
             {
-              _elem1052 = new SQLForeignKey();
-              _elem1052.read(iprot);
-              struct.foreignKeys.add(_elem1052);
+              _elem1060 = new SQLForeignKey();
+              _elem1060.read(iprot);
+              struct.foreignKeys.add(_elem1060);
             }
           }
           struct.setForeignKeysIsSet(true);
         }
         if (incoming.get(3)) {
           {
-            org.apache.thrift.protocol.TList _list1054 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1054.size);
-            SQLUniqueConstraint _elem1055;
-            for (int _i1056 = 0; _i1056 < _list1054.size; ++_i1056)
+            org.apache.thrift.protocol.TList _list1062 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1062.size);
+            SQLUniqueConstraint _elem1063;
+            for (int _i1064 = 0; _i1064 < _list1062.size; ++_i1064)
             {
-              _elem1055 = new SQLUniqueConstraint();
-              _elem1055.read(iprot);
-              struct.uniqueConstraints.add(_elem1055);
+              _elem1063 = new SQLUniqueConstraint();
+              _elem1063.read(iprot);
+              struct.uniqueConstraints.add(_elem1063);
             }
           }
           struct.setUniqueConstraintsIsSet(true);
         }
         if (incoming.get(4)) {
           {
-            org.apache.thrift.protocol.TList _list1057 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1057.size);
-            SQLNotNullConstraint _elem1058;
-            for (int _i1059 = 0; _i1059 < _list1057.size; ++_i1059)
+            org.apache.thrift.protocol.TList _list1065 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1065.size);
+            SQLNotNullConstraint _elem1066;
+            for (int _i1067 = 0; _i1067 < _list1065.size; ++_i1067)
             {
-              _elem1058 = new SQLNotNullConstraint();
-              _elem1058.read(iprot);
-              struct.notNullConstraints.add(_elem1058);
+              _elem1066 = new SQLNotNullConstraint();
+              _elem1066.read(iprot);
+              struct.notNullConstraints.add(_elem1066);
             }
           }
           struct.setNotNullConstraintsIsSet(true);
         }
         if (incoming.get(5)) {
           {
-            org.apache.thrift.protocol.TList _list1060 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1060.size);
-            SQLDefaultConstraint _elem1061;
-            for (int _i1062 = 0; _i1062 < _list1060.size; ++_i1062)
+            org.apache.thrift.protocol.TList _list1068 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1068.size);
+            SQLDefaultConstraint _elem1069;
+            for (int _i1070 = 0; _i1070 < _list1068.size; ++_i1070)
             {
-              _elem1061 = new SQLDefaultConstraint();
-              _elem1061.read(iprot);
-              struct.defaultConstraints.add(_elem1061);
+              _elem1069 = new SQLDefaultConstraint();
+              _elem1069.read(iprot);
+              struct.defaultConstraints.add(_elem1069);
             }
           }
           struct.setDefaultConstraintsIsSet(true);
         }
         if (incoming.get(6)) {
           {
-            org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1063.size);
-            SQLCheckConstraint _elem1064;
-            for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065)
+            org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1071.size);
+            SQLCheckConstraint _elem1072;
+            for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073)
             {
-              _elem1064 = new SQLCheckConstraint();
-              _elem1064.read(iprot);
-              struct.checkConstraints.add(_elem1064);
+              _elem1072 = new SQLCheckConstraint();
+              _elem1072.read(iprot);
+              struct.checkConstraints.add(_elem1072);
             }
           }
           struct.setCheckConstraintsIsSet(true);
@@ -66416,13 +66571,13 @@ import org.slf4j.LoggerFactory;
             case 3: // PART_NAMES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin();
-                  struct.partNames = new ArrayList<String>(_list1066.size);
-                  String _elem1067;
-                  for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068)
+                  org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin();
+                  struct.partNames = new ArrayList<String>(_list1074.size);
+                  String _elem1075;
+                  for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076)
                   {
-                    _elem1067 = iprot.readString();
-                    struct.partNames.add(_elem1067);
+                    _elem1075 = iprot.readString();
+                    struct.partNames.add(_elem1075);
                   }
                   iprot.readListEnd();
                 }
@@ -66458,9 +66613,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(PART_NAMES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size()));
-            for (String _iter1069 : struct.partNames)
+            for (String _iter1077 : struct.partNames)
             {
-              oprot.writeString(_iter1069);
+              oprot.writeString(_iter1077);
             }
             oprot.writeListEnd();
           }
@@ -66503,9 +66658,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetPartNames()) {
           {
             oprot.writeI32(struct.partNames.size());
-            for (String _iter1070 : struct.partNames)
+            for (String _iter1078 : struct.partNames)
             {
-              oprot.writeString(_iter1070);
+              oprot.writeString(_iter1078);
             }
           }
         }
@@ -66525,13 +66680,13 @@ import org.slf4j.LoggerFactory;
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.partNames = new ArrayList<String>(_list1071.size);
-            String _elem1072;
-            for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073)
+            org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.partNames = new ArrayList<String>(_list1079.size);
+            String _elem1080;
+            for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081)
             {
-              _elem1072 = iprot.readString();
-              struct.partNames.add(_elem1072);
+              _elem1080 = iprot.readString();
+              struct.partNames.add(_elem1080);
             }
           }
           struct.setPartNamesIsSet(true);
@@ -68588,13 +68743,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list1074.size);
-                  String _elem1075;
-                  for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076)
+                  org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list1082.size);
+                  String _elem1083;
+                  for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084)
                   {
-                    _elem1075 = iprot.readString();
-                    struct.success.add(_elem1075);
+                    _elem1083 = iprot.readString();
+                    struct.success.add(_elem1083);
                   }
                   iprot.readListEnd();
                 }
@@ -68629,9 +68784,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter1077 : struct.success)
+            for (String _iter1085 : struct.success)
             {
-              oprot.writeString(_iter1077);
+              oprot.writeString(_iter1085);
             }
             oprot.writeListEnd();
           }
@@ -68670,9 +68825,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter1078 : struct.success)
+            for (String _iter1086 : struct.success)
             {
-              oprot.writeString(_iter1078);
+              oprot.writeString(_iter1086);
             }
           }
         }
@@ -68687,13 +68842,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list1079.size);
-            String _elem1080;
-            for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081)
+            org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list1087.size);
+            String _elem1088;
+            for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089)
             {
-              _elem1080 = iprot.readString();
-              struct.success.add(_elem1080);
+              _elem1088 = iprot.readString();
+              struct.success.add(_elem1088);
             }
           }
           struct.setSuccessIsSet(true);
@@ -69667,13 +69822,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list1082.size);
-                  String _elem1083;
-                  for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084)
+                  org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list1090.size);
+                  String _elem1091;
+                  for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092)
                   {
-                    _elem1083 = iprot.readString();
-                    struct.success.add(_elem1083);
+                    _elem1091 = iprot.readString();
+                    struct.success.add(_elem1091);
                   }
                   iprot.readListEnd();
                 }
@@ -69708,9 +69863,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter1085 : struct.success)
+            for (String _iter1093 : struct.success)
             {
-              oprot.writeString(_iter1085);
+              oprot.writeString(_iter1093);
             }
             oprot.writeListEnd();
           }
@@ -69749,9 +69904,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter1086 : struct.success)
+            for (String _iter1094 : struct.success)
             {
-              oprot.writeString(_iter1086);
+              oprot.writeString(_iter1094);
             }
           }
         }
@@ -69766,13 +69921,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list1087.size);
-            String _elem1088;
-            for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089)
+            org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list1095.size);
+            String _elem1096;
+            for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097)
             {
-              _elem1088 = iprot.readString();
-              struct.success.add(_elem1088);
+              _elem1096 = iprot.readString();
+              struct.success.add(_elem1096);
             }
           }
           struct.setSuccessIsSet(true);
@@ -70538,13 +70693,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list1090.size);
-                  String _elem1091;
-                  for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092)
+                  org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list1098.size);
+                  String _elem1099;
+                  for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100)
                   {
-                    _elem1091 = iprot.readString();
-                    struct.success.add(_elem1091);
+                    _elem1099 = iprot.readString();
+                    struct.success.add(_elem1099);
                   }
                   iprot.readListEnd();
                 }
@@ -70579,9 +70734,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter1093 : struct.success)
+            for (String _iter1101 : struct.success)
             {
-              oprot.writeString(_iter1093);
+              oprot.writeString(_iter1101);
             }
             oprot.writeListEnd();
           }
@@ -70620,9 +70775,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter1094 : struct.success)
+            for (String _iter1102 : struct.success)
             {
-              oprot.writeString(_iter1094);
+              oprot.writeString(_iter1102);
             }
           }
         }
@@ -70637,13 +70792,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list1095.size);
-            String _elem1096;
-            for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097)
+            org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list1103.size);
+            String _elem1104;
+            for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105)
             {
-              _elem1096 = iprot.readString();
-              struct.success.add(_elem1096);
+              _elem1104 = iprot.readString();
+              struct.success.add(_elem1104);
             }
           }
           struct.setSuccessIsSet(true);
@@ -71148,13 +71303,13 @@ import org.slf4j.LoggerFactory;
             case 3: // TBL_TYPES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin();
-                  struct.tbl_types = new ArrayList<String>(_list1098.size);
-                  String _elem1099;
-                  for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100)
+                  org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin();
+                  struct.tbl_types = new ArrayList<String>(_list1106.size);
+                  String _elem1107;
+                  for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108)
                   {
-                    _elem1099 = iprot.readString();
-                    struct.tbl_types.add(_elem1099);
+                    _elem1107 = iprot.readString();
+                    struct.tbl_types.add(_elem1107);
                   }
                   iprot.readListEnd();
                 }
@@ -71190,9 +71345,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size()));
-            for (String _iter1101 : struct.tbl_types)
+            for (String _iter1109 : struct.tbl_types)
             {
-              oprot.writeString(_iter1101);
+              oprot.writeString(_iter1109);
             }
             oprot.writeListEnd();
           }
@@ -71235,9 +71390,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetTbl_types()) {
           {
             oprot.writeI32(struct.tbl_types.size());
-            for (String _iter1102 : struct.tbl_types)
+            for (String _iter1110 : struct.tbl_types)
             {
-              oprot.writeString(_iter1102);
+              oprot.writeString(_iter1110);
             }
           }
         }
@@ -71257,13 +71412,13 @@ import org.slf4j.LoggerFactory;
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.tbl_types = new ArrayList<String>(_list1103.size);
-            String _elem1104;
-            for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105)
+            org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.tbl_types = new ArrayList<String>(_list1111.size);
+            String _elem1112;
+            for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113)
             {
-              _elem1104 = iprot.readString();
-              struct.tbl_types.add(_elem1104);
+              _elem1112 = iprot.readString();
+              struct.tbl_types.add(_elem1112);
             }
           }
           struct.setTbl_typesIsSet(true);
@@ -71669,14 +71824,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin();
-                  struct.success = new ArrayList<TableMeta>(_list1106.size);
-                  TableMeta _elem1107;
-                  for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108)
+                  org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin();
+                  struct.success = new ArrayList<TableMeta>(_list1114.size);
+                  TableMeta _elem1115;
+                  for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116)
                   {
-                    _elem1107 = new TableMeta();
-                    _elem1107.read(iprot);
-                    struct.success.add(_elem1107);
+                    _elem1115 = new TableMeta();
+                    _elem1115.read(iprot);
+                    struct.success.add(_elem1115);
                   }
                   iprot.readListEnd();
                 }
@@ -71711,9 +71866,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (TableMeta _iter1109 : struct.success)
+            for (TableMeta _iter1117 : struct.success)
             {
-              _iter1109.write(oprot);
+              _iter1117.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -71752,9 +71907,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (TableMeta _iter1110 : struct.success)
+            for (TableMeta _iter1118 : struct.success)
             {
-              _iter1110.write(oprot);
+              _iter1118.write(oprot);
             }
           }
         }
@@ -71769,14 +71924,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<TableMeta>(_list1111.size);
-            TableMeta _elem1112;
-            for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113)
+            org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<TableMeta>(_list1119.size);
+            TableMeta _elem1120;
+            for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121)
             {
-              _elem1112 = new TableMeta();
-              _elem1112.read(iprot);
-              struct.success.add(_elem1112);
+              _elem1120 = new TableMeta();
+              _elem1120.read(iprot);
+              struct.success.add(_elem1120);
             }
           }
           struct.setSuccessIsSet(true);
@@ -72542,13 +72697,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list1114.size);
-                  String _elem1115;
-                  for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116)
+                  org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list1122.size);
+                  String _elem1123;
+                  for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124)
                   {
-                    _elem1115 = iprot.readString();
-                    struct.success.add(_elem1115);
+                    _elem1123 = iprot.readString();
+                    struct.success.add(_elem1123);
                   }
                   iprot.readListEnd();
                 }
@@ -72583,9 +72738,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter1117 : struct.success)
+            for (String _iter1125 : struct.success)
             {
-              oprot.writeString(_iter1117);
+              oprot.writeString(_iter1125);
             }
             oprot.writeListEnd();
           }
@@ -72624,9 +72779,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter1118 : struct.success)
+            for (String _iter1126 : struct.success)
             {
-              oprot.writeString(_iter1118);
+              oprot.writeString(_iter1126);
             }
           }
         }
@@ -72641,13 +72796,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list1119.size);
-            String _elem1120;
-            for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121)
+            org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list1127.size);
+            String _elem1128;
+            for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129)
             {
-              _elem1120 = iprot.readString();
-              struct.success.add(_elem1120);
+              _elem1128 = iprot.readString();
+              struct.success.add(_elem1128);
             }
           }
           struct.setSuccessIsSet(true);
@@ -74100,13 +74255,13 @@ import org.slf4j.LoggerFactory;
             case 2: // TBL_NAMES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin();
-                  struct.tbl_names = new ArrayList<String>(_list1122.size);
-                  String _elem1123;
-                  for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124)
+                  org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin();
+                  struct.tbl_names = new ArrayList<String>(_list1130.size);
+                  String _elem1131;
+                  for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132)
                   {
-                    _elem1123 = iprot.readString();
-                    struct.tbl_names.add(_elem1123);
+                    _elem1131 = iprot.readString();
+                    struct.tbl_names.add(_elem1131);
                   }
                   iprot.readListEnd();
                 }
@@ -74137,9 +74292,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size()));
-            for (String _iter1125 : struct.tbl_names)
+            for (String _iter1133 : struct.tbl_names)
             {
-              oprot.writeString(_iter1125);
+              oprot.writeString(_iter1133);
             }
             oprot.writeListEnd();
           }
@@ -74176,9 +74331,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetTbl_names()) {
           {
             oprot.writeI32(struct.tbl_names.size());
-            for (String _iter1126 : struct.tbl_names)
+            for (String _iter1134 : struct.tbl_names)
             {
-              oprot.writeString(_iter1126);
+              oprot.writeString(_iter1134);
             }
           }
         }
@@ -74194,13 +74349,13 @@ import org.slf4j.LoggerFactory;
         }
         if (incoming.get(1)) {
           {
-            org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.tbl_names = new ArrayList<String>(_list1127.size);
-            String _elem1128;
-            for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129)
+            org.apache.thrift.protocol.TList _list1135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.tbl_names = new ArrayList<String>(_list1135.size);
+            String _elem1136;
+            for (int _i1137 = 0; _i1137 < _list1135.size; ++_i1137)
             {
-              _elem1128 = iprot.readString();
-              struct.tbl_names.add(_elem1128);
+              _elem1136 = iprot.readString();
+              struct.tbl_names.add(_elem1136);
             }
           }
           struct.setTbl_namesIsSet(true);
@@ -74525,14 +74680,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin();
-                  struct.success = new ArrayList<Table>(_list1130.size);
-                  Table _elem1131;
-                  for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132)
+                  org.apache.thrift.protocol.TList _list1138 = iprot.readListBegin();
+                  struct.success = new ArrayList<Table>(_list1138.size);
+                  Table _elem1139;
+                  for (int _i1140 = 0; _i1140 < _list1138.size; ++_i1140)
                   {
-                    _elem1131 = new Table();
-                    _elem1131.read(iprot);
-                    struct.success.add(_elem1131);
+                    _elem1139 = new Table();
+                    _elem1139.read(iprot);
+                    struct.success.add(_elem1139);
                   }
                   iprot.readListEnd();
                 }
@@ -74558,9 +74713,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (Table _iter1133 : struct.success)
+            for (Table _iter1141 : struct.success)
             {
-              _iter1133.write(oprot);
+              _iter1141.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -74591,9 +74746,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (Table _iter1134 : struct.success)
+            for (Table _iter1142 : struct.success)
             {
-              _iter1134.write(oprot);
+              _iter1142.write(oprot);
             }
           }
         }
@@ -74605,14 +74760,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<Table>(_list1135.size);
-            Table _elem1136;
-            for (int _i1137 = 0; _i1137 < _list1135.size; ++_i1137)
+            org.apache.thrift.protocol.TList _list1143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<Table>(_list1143.size);
+            Table _elem1144;
+            for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145)
             {
-              _elem1136 = new Table();
-              _elem1136.read(iprot);
-              struct.success.add(_elem1136);
+              _elem1144 = new Table();
+              _elem1144.read(iprot);
+              struct.success.add(_elem1144);
             }
           }
           struct.setSuccessIsSet(true);
@@ -80120,13 +80275,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1138 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list1138.size);
-                  String _elem1139;
-                  for (int _i1140 = 0; _i1140 < _list1138.size; ++_i1140)
+                  org.apache.thrift.protocol.TList _list1146 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list1146.size);
+                  String _elem1147;
+                  for (int _i1148 = 0; _i1148 < _list1146.size; ++_i1148)
                   {
-                    _elem1139 = iprot.readString();
-                    struct.success.add(_elem1139);
+                    _elem1147 = iprot.readString();
+                    struct.success.add(_elem1147);
                   }
                   iprot.readListEnd();
                 }
@@ -80179,9 +80334,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter1141 : struct.success)
+            for (String _iter1149 : struct.success)
             {
-              oprot.writeString(_iter1141);
+              oprot.writeString(_iter1149);
             }
             oprot.writeListEnd();
           }
@@ -80236,9 +80391,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter1142 : struct.success)
+            for (String _iter1150 : struct.success)
             {
-              oprot.writeString(_iter1142);
+              oprot.writeString(_iter1150);
             }
           }
         }
@@ -80259,13 +80414,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list1143.size);
-            String _elem1144;
-            for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145)
+            org.apache.thrift.protocol.TList _list1151 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list1151.size);
+            String _elem1152;
+            for (int _i1153 = 0; _i1153 < _list1151.size; ++_i1153)
             {
-              _elem1144 = iprot.readString();
-              struct.success.add(_elem1144);
+              _elem1152 = iprot.readString();
+              struct.success.add(_elem1152);
             }
           }
           struct.setSuccessIsSet(true);
@@ -87062,14 +87217,14 @@ import org.slf4j.LoggerFactory;
             case 1: // NEW_PARTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1146 = iprot.readListBegin();
-                  struct.new_parts = new ArrayList<Partition>(_list1146.size);
-                  Partition _elem1147;
-                  for (int _i1148 = 0; _i1148 < _list1146.size; ++_i1148)
+                  org.apache.thrift.protocol.TList _list1154 = iprot.readListBegin();
+                  struct.new_parts = new ArrayList<Partition>(_list1154.size);
+                  Partition _elem1155;
+                  for (int _i1156 = 0; _i1156 < _list1154.size; ++_i1156)
                   {
-                    _elem1147 = new Partition();
-                    _elem1147.read(iprot);
-                    struct.new_parts.add(_elem1147);
+                    _elem1155 = new Partition();
+                    _elem1155.read(iprot);
+                    struct.new_parts.add(_elem1155);
                   }
                   iprot.readListEnd();
                 }
@@ -87095,9 +87250,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size()));
-            for (Partition _iter1149 : struct.new_parts)
+            for (Partition _iter1157 : struct.new_parts)
             {
-              _iter1149.write(oprot);
+              _iter1157.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -87128,9 +87283,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetNew_parts()) {
           {
             oprot.writeI32(struct.new_parts.size());
-            for (Partition _iter1150 : struct.new_parts)
+            for (Partition _iter1158 : struct.new_parts)
             {
-              _iter1150.write(oprot);
+              _iter1158.write(oprot);
             }
           }
         }
@@ -87142,14 +87297,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1151 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.new_parts = new ArrayList<Partition>(_list1151.size);
-            Partition _elem1152;
-            for (int _i1153 = 0; _i1153 < _list1151.size; ++_i1153)
+            org.apache.thrift.protocol.TList _list1159 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.new_parts = new ArrayList<Partition>(_list1159.size);
+            Partition _elem1160;
+ 

<TRUNCATED>

[02/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query80.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query80.q.out b/ql/src/test/results/clientpositive/perf/tez/query80.q.out
index ee8cdd8..e02f45f 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query80.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query80.q.out
@@ -232,394 +232,400 @@ Stage-0
     limit:100
     Stage-1
       Reducer 10 vectorized
-      File Output Operator [FS_460]
-        Limit [LIM_459] (rows=100 width=108)
+      File Output Operator [FS_465]
+        Limit [LIM_464] (rows=100 width=108)
           Number of rows:100
-          Select Operator [SEL_458] (rows=1217531358 width=108)
+          Select Operator [SEL_463] (rows=1217531358 width=108)
             Output:["_col0","_col1","_col2","_col3","_col4"]
           <-Reducer 9 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_457]
-              Select Operator [SEL_456] (rows=1217531358 width=108)
+            SHUFFLE [RS_462]
+              Select Operator [SEL_461] (rows=1217531358 width=108)
                 Output:["_col0","_col1","_col2","_col3","_col4"]
-                Group By Operator [GBY_455] (rows=1217531358 width=108)
+                Group By Operator [GBY_460] (rows=1217531358 width=108)
                   Output:["_col0","_col1","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2
                 <-Union 8 [SIMPLE_EDGE]
                   <-Reducer 18 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_484]
+                    Reduce Output Operator [RS_490]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_483] (rows=2435062716 width=108)
+                      Group By Operator [GBY_489] (rows=2435062716 width=108)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_482] (rows=231905279 width=135)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Group By Operator [GBY_481] (rows=231905279 width=135)
-                            Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0
-                          <-Reducer 17 [SIMPLE_EDGE]
-                            SHUFFLE [RS_75]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_74] (rows=463810558 width=135)
-                                Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0
-                                Select Operator [SEL_72] (rows=463810558 width=135)
-                                  Output:["_col0","_col1","_col2","_col3"]
-                                  Merge Join Operator [MERGEJOIN_368] (rows=463810558 width=135)
-                                    Conds:RS_69._col1=RS_469._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
-                                  <-Map 39 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_469]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_468] (rows=46000 width=460)
-                                        Output:["_col0","_col1"]
-                                        Filter Operator [FIL_467] (rows=46000 width=460)
-                                          predicate:cp_catalog_page_sk is not null
-                                          TableScan [TS_54] (rows=46000 width=460)
-                                            default@catalog_page,catalog_page,Tbl:COMPLETE,Col:NONE,Output:["cp_catalog_page_sk","cp_catalog_page_id"]
-                                  <-Reducer 16 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_69]
-                                      PartitionCols:_col1
-                                      Merge Join Operator [MERGEJOIN_367] (rows=421645953 width=135)
-                                        Conds:RS_66._col3=RS_425._col0(Inner),Output:["_col1","_col5","_col6","_col9","_col10"]
-                                      <-Map 30 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_425]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_422] (rows=1150 width=1179)
-                                            Output:["_col0"]
-                                            Filter Operator [FIL_421] (rows=1150 width=1179)
-                                              predicate:((p_channel_tv = 'N') and p_promo_sk is not null)
-                                              TableScan [TS_12] (rows=2300 width=1179)
-                                                default@promotion,promotion,Tbl:COMPLETE,Col:NONE,Output:["p_promo_sk","p_channel_tv"]
-                                      <-Reducer 15 [SIMPLE_EDGE]
-                                        SHUFFLE [RS_66]
-                                          PartitionCols:_col3
-                                          Merge Join Operator [MERGEJOIN_366] (rows=383314495 width=135)
-                                            Conds:RS_63._col2=RS_409._col0(Inner),Output:["_col1","_col3","_col5","_col6","_col9","_col10"]
-                                          <-Map 26 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_409]
-                                              PartitionCols:_col0
-                                              Select Operator [SEL_406] (rows=154000 width=1436)
-                                                Output:["_col0"]
-                                                Filter Operator [FIL_405] (rows=154000 width=1436)
-                                                  predicate:((i_current_price > 50) and i_item_sk is not null)
-                                                  TableScan [TS_9] (rows=462000 width=1436)
-                                                    default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_current_price"]
-                                          <-Reducer 14 [SIMPLE_EDGE]
-                                            SHUFFLE [RS_63]
-                                              PartitionCols:_col2
-                                              Merge Join Operator [MERGEJOIN_365] (rows=348467716 width=135)
-                                                Conds:RS_60._col0=RS_393._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
-                                              <-Map 12 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_393]
-                                                  PartitionCols:_col0
-                                                  Select Operator [SEL_390] (rows=8116 width=1119)
-                                                    Output:["_col0"]
-                                                    Filter Operator [FIL_389] (rows=8116 width=1119)
-                                                      predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00' AND TIMESTAMP'1998-09-03 00:00:00' and d_date_sk is not null)
-                                                      TableScan [TS_6] (rows=73049 width=1119)
-                                                        default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
-                                              <-Reducer 37 [SIMPLE_EDGE]
-                                                SHUFFLE [RS_60]
-                                                  PartitionCols:_col0
-                                                  Merge Join Operator [MERGEJOIN_364] (rows=316788826 width=135)
-                                                    Conds:RS_477._col2, _col4=RS_480._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
-                                                  <-Map 36 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_477]
-                                                      PartitionCols:_col2, _col4
-                                                      Select Operator [SEL_476] (rows=287989836 width=135)
-                                                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                        Filter Operator [FIL_475] (rows=287989836 width=135)
-                                                          predicate:((cs_catalog_page_sk BETWEEN DynamicValue(RS_70_catalog_page_cp_catalog_page_sk_min) AND DynamicValue(RS_70_catalog_page_cp_catalog_page_sk_max) and in_bloom_filter(cs_catalog_page_sk, DynamicValue(RS_70_catalog_page_cp_catalog_page_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_64_item_i_item_sk_min) AND DynamicValue(RS_64_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_64_item_i_item_sk_bloom_filter))) and (cs_promo_sk BETWEEN DynamicValue(RS_67_promotion_p_promo_sk_min) AND DynamicValue(RS_67_promotion_p_promo_sk_max) and in_bloom_filter(cs_promo_sk, DynamicValue(RS_67_promotion_p_promo_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_61_date_dim_d_date_sk_min) AND DynamicValue(RS_61_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_61_date_dim_d_date_sk_bloom_filter))) and cs_catalog_page_sk is not null and cs_item_sk is not null and cs_pr
 omo_sk is not null and cs_sold_date_sk is not null)
-                                                          TableScan [TS_39] (rows=287989836 width=135)
-                                                            default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_catalog_page_sk","cs_item_sk","cs_promo_sk","cs_order_number","cs_ext_sales_price","cs_net_profit"]
-                                                          <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_462]
-                                                              Group By Operator [GBY_461] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_401]
-                                                                  Group By Operator [GBY_398] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_394] (rows=8116 width=1119)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_390]
-                                                          <-Reducer 28 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_464]
-                                                              Group By Operator [GBY_463] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 26 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_417]
-                                                                  Group By Operator [GBY_414] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_410] (rows=154000 width=1436)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_406]
-                                                          <-Reducer 32 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_466]
-                                                              Group By Operator [GBY_465] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_433]
-                                                                  Group By Operator [GBY_430] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_426] (rows=1150 width=1179)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_422]
-                                                          <-Reducer 40 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_474]
-                                                              Group By Operator [GBY_473] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 39 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                PARTITION_ONLY_SHUFFLE [RS_472]
-                                                                  Group By Operator [GBY_471] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_470] (rows=46000 width=460)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_468]
-                                                  <-Map 38 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_480]
-                                                      PartitionCols:_col0, _col1
-                                                      Select Operator [SEL_479] (rows=28798881 width=106)
-                                                        Output:["_col0","_col1","_col2","_col3"]
-                                                        Filter Operator [FIL_478] (rows=28798881 width=106)
-                                                          predicate:cr_item_sk is not null
-                                                          TableScan [TS_42] (rows=28798881 width=106)
-                                                            default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_amount","cr_net_loss"]
+                        Top N Key Operator [TNK_488] (rows=811687572 width=108)
+                          keys:_col0, _col1, 0L,sort order:+++,top n:100
+                          Select Operator [SEL_487] (rows=231905279 width=135)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Group By Operator [GBY_486] (rows=231905279 width=135)
+                              Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0
+                            <-Reducer 17 [SIMPLE_EDGE]
+                              SHUFFLE [RS_75]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_74] (rows=463810558 width=135)
+                                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0
+                                  Select Operator [SEL_72] (rows=463810558 width=135)
+                                    Output:["_col0","_col1","_col2","_col3"]
+                                    Merge Join Operator [MERGEJOIN_369] (rows=463810558 width=135)
+                                      Conds:RS_69._col1=RS_474._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
+                                    <-Map 39 [SIMPLE_EDGE] vectorized
+                                      PARTITION_ONLY_SHUFFLE [RS_474]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_473] (rows=46000 width=460)
+                                          Output:["_col0","_col1"]
+                                          Filter Operator [FIL_472] (rows=46000 width=460)
+                                            predicate:cp_catalog_page_sk is not null
+                                            TableScan [TS_54] (rows=46000 width=460)
+                                              default@catalog_page,catalog_page,Tbl:COMPLETE,Col:NONE,Output:["cp_catalog_page_sk","cp_catalog_page_id"]
+                                    <-Reducer 16 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_69]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_368] (rows=421645953 width=135)
+                                          Conds:RS_66._col3=RS_429._col0(Inner),Output:["_col1","_col5","_col6","_col9","_col10"]
+                                        <-Map 30 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_429]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_426] (rows=1150 width=1179)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_425] (rows=1150 width=1179)
+                                                predicate:((p_channel_tv = 'N') and p_promo_sk is not null)
+                                                TableScan [TS_12] (rows=2300 width=1179)
+                                                  default@promotion,promotion,Tbl:COMPLETE,Col:NONE,Output:["p_promo_sk","p_channel_tv"]
+                                        <-Reducer 15 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_66]
+                                            PartitionCols:_col3
+                                            Merge Join Operator [MERGEJOIN_367] (rows=383314495 width=135)
+                                              Conds:RS_63._col2=RS_413._col0(Inner),Output:["_col1","_col3","_col5","_col6","_col9","_col10"]
+                                            <-Map 26 [SIMPLE_EDGE] vectorized
+                                              SHUFFLE [RS_413]
+                                                PartitionCols:_col0
+                                                Select Operator [SEL_410] (rows=154000 width=1436)
+                                                  Output:["_col0"]
+                                                  Filter Operator [FIL_409] (rows=154000 width=1436)
+                                                    predicate:((i_current_price > 50) and i_item_sk is not null)
+                                                    TableScan [TS_9] (rows=462000 width=1436)
+                                                      default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_current_price"]
+                                            <-Reducer 14 [SIMPLE_EDGE]
+                                              SHUFFLE [RS_63]
+                                                PartitionCols:_col2
+                                                Merge Join Operator [MERGEJOIN_366] (rows=348467716 width=135)
+                                                  Conds:RS_60._col0=RS_397._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                                <-Map 12 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_397]
+                                                    PartitionCols:_col0
+                                                    Select Operator [SEL_394] (rows=8116 width=1119)
+                                                      Output:["_col0"]
+                                                      Filter Operator [FIL_393] (rows=8116 width=1119)
+                                                        predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00' AND TIMESTAMP'1998-09-03 00:00:00' and d_date_sk is not null)
+                                                        TableScan [TS_6] (rows=73049 width=1119)
+                                                          default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
+                                                <-Reducer 37 [SIMPLE_EDGE]
+                                                  SHUFFLE [RS_60]
+                                                    PartitionCols:_col0
+                                                    Merge Join Operator [MERGEJOIN_365] (rows=316788826 width=135)
+                                                      Conds:RS_482._col2, _col4=RS_485._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                                    <-Map 36 [SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_482]
+                                                        PartitionCols:_col2, _col4
+                                                        Select Operator [SEL_481] (rows=287989836 width=135)
+                                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+                                                          Filter Operator [FIL_480] (rows=287989836 width=135)
+                                                            predicate:((cs_catalog_page_sk BETWEEN DynamicValue(RS_70_catalog_page_cp_catalog_page_sk_min) AND DynamicValue(RS_70_catalog_page_cp_catalog_page_sk_max) and in_bloom_filter(cs_catalog_page_sk, DynamicValue(RS_70_catalog_page_cp_catalog_page_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_64_item_i_item_sk_min) AND DynamicValue(RS_64_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_64_item_i_item_sk_bloom_filter))) and (cs_promo_sk BETWEEN DynamicValue(RS_67_promotion_p_promo_sk_min) AND DynamicValue(RS_67_promotion_p_promo_sk_max) and in_bloom_filter(cs_promo_sk, DynamicValue(RS_67_promotion_p_promo_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_61_date_dim_d_date_sk_min) AND DynamicValue(RS_61_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_61_date_dim_d_date_sk_bloom_filter))) and cs_catalog_page_sk is not null and cs_item_sk is not null and cs_
 promo_sk is not null and cs_sold_date_sk is not null)
+                                                            TableScan [TS_39] (rows=287989836 width=135)
+                                                              default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_catalog_page_sk","cs_item_sk","cs_promo_sk","cs_order_number","cs_ext_sales_price","cs_net_profit"]
+                                                            <-Reducer 19 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_467]
+                                                                Group By Operator [GBY_466] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  SHUFFLE [RS_405]
+                                                                    Group By Operator [GBY_402] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_398] (rows=8116 width=1119)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_394]
+                                                            <-Reducer 28 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_469]
+                                                                Group By Operator [GBY_468] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 26 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  SHUFFLE [RS_421]
+                                                                    Group By Operator [GBY_418] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_414] (rows=154000 width=1436)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_410]
+                                                            <-Reducer 32 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_471]
+                                                                Group By Operator [GBY_470] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  SHUFFLE [RS_437]
+                                                                    Group By Operator [GBY_434] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_430] (rows=1150 width=1179)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_426]
+                                                            <-Reducer 40 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_479]
+                                                                Group By Operator [GBY_478] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 39 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  PARTITION_ONLY_SHUFFLE [RS_477]
+                                                                    Group By Operator [GBY_476] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_475] (rows=46000 width=460)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_473]
+                                                    <-Map 38 [SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_485]
+                                                        PartitionCols:_col0, _col1
+                                                        Select Operator [SEL_484] (rows=28798881 width=106)
+                                                          Output:["_col0","_col1","_col2","_col3"]
+                                                          Filter Operator [FIL_483] (rows=28798881 width=106)
+                                                            predicate:cr_item_sk is not null
+                                                            TableScan [TS_42] (rows=28798881 width=106)
+                                                              default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_amount","cr_net_loss"]
                   <-Reducer 24 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_508]
+                    Reduce Output Operator [RS_515]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_507] (rows=2435062716 width=108)
+                      Group By Operator [GBY_514] (rows=2435062716 width=108)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_506] (rows=115958879 width=135)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Group By Operator [GBY_505] (rows=115958879 width=135)
-                            Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0
-                          <-Reducer 23 [SIMPLE_EDGE]
-                            SHUFFLE [RS_115]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_114] (rows=231917759 width=135)
-                                Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0
-                                Select Operator [SEL_112] (rows=231917759 width=135)
-                                  Output:["_col0","_col1","_col2","_col3"]
-                                  Merge Join Operator [MERGEJOIN_373] (rows=231917759 width=135)
-                                    Conds:RS_109._col2=RS_493._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
-                                  <-Map 44 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_493]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_492] (rows=84 width=1850)
-                                        Output:["_col0","_col1"]
-                                        Filter Operator [FIL_491] (rows=84 width=1850)
-                                          predicate:web_site_sk is not null
-                                          TableScan [TS_94] (rows=84 width=1850)
-                                            default@web_site,web_site,Tbl:COMPLETE,Col:NONE,Output:["web_site_sk","web_site_id"]
-                                  <-Reducer 22 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_109]
-                                      PartitionCols:_col2
-                                      Merge Join Operator [MERGEJOIN_372] (rows=210834322 width=135)
-                                        Conds:RS_106._col3=RS_427._col0(Inner),Output:["_col2","_col5","_col6","_col9","_col10"]
-                                      <-Map 30 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_427]
-                                          PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_422]
-                                      <-Reducer 21 [SIMPLE_EDGE]
-                                        SHUFFLE [RS_106]
-                                          PartitionCols:_col3
-                                          Merge Join Operator [MERGEJOIN_371] (rows=191667562 width=135)
-                                            Conds:RS_103._col1=RS_411._col0(Inner),Output:["_col2","_col3","_col5","_col6","_col9","_col10"]
-                                          <-Map 26 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_411]
-                                              PartitionCols:_col0
-                                               Please refer to the previous Select Operator [SEL_406]
-                                          <-Reducer 20 [SIMPLE_EDGE]
-                                            SHUFFLE [RS_103]
-                                              PartitionCols:_col1
-                                              Merge Join Operator [MERGEJOIN_370] (rows=174243235 width=135)
-                                                Conds:RS_100._col0=RS_395._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
-                                              <-Map 12 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_395]
-                                                  PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_390]
-                                              <-Reducer 42 [SIMPLE_EDGE]
-                                                SHUFFLE [RS_100]
-                                                  PartitionCols:_col0
-                                                  Merge Join Operator [MERGEJOIN_369] (rows=158402938 width=135)
-                                                    Conds:RS_501._col1, _col4=RS_504._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
-                                                  <-Map 41 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_501]
-                                                      PartitionCols:_col1, _col4
-                                                      Select Operator [SEL_500] (rows=144002668 width=135)
-                                                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                        Filter Operator [FIL_499] (rows=144002668 width=135)
-                                                          predicate:((ws_item_sk BETWEEN DynamicValue(RS_104_item_i_item_sk_min) AND DynamicValue(RS_104_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_104_item_i_item_sk_bloom_filter))) and (ws_promo_sk BETWEEN DynamicValue(RS_107_promotion_p_promo_sk_min) AND DynamicValue(RS_107_promotion_p_promo_sk_max) and in_bloom_filter(ws_promo_sk, DynamicValue(RS_107_promotion_p_promo_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_101_date_dim_d_date_sk_min) AND DynamicValue(RS_101_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_101_date_dim_d_date_sk_bloom_filter))) and (ws_web_site_sk BETWEEN DynamicValue(RS_110_web_site_web_site_sk_min) AND DynamicValue(RS_110_web_site_web_site_sk_max) and in_bloom_filter(ws_web_site_sk, DynamicValue(RS_110_web_site_web_site_sk_bloom_filter))) and ws_item_sk is not null and ws_promo_sk is not null and ws_sold_date_sk is not null and ws_web_si
 te_sk is not null)
-                                                          TableScan [TS_79] (rows=144002668 width=135)
-                                                            default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_web_site_sk","ws_promo_sk","ws_order_number","ws_ext_sales_price","ws_net_profit"]
-                                                          <-Reducer 25 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_486]
-                                                              Group By Operator [GBY_485] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_402]
-                                                                  Group By Operator [GBY_399] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_396] (rows=8116 width=1119)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_390]
-                                                          <-Reducer 29 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_488]
-                                                              Group By Operator [GBY_487] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 26 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_418]
-                                                                  Group By Operator [GBY_415] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_412] (rows=154000 width=1436)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_406]
-                                                          <-Reducer 33 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_490]
-                                                              Group By Operator [GBY_489] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_434]
-                                                                  Group By Operator [GBY_431] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_428] (rows=1150 width=1179)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_422]
-                                                          <-Reducer 45 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_498]
-                                                              Group By Operator [GBY_497] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 44 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                PARTITION_ONLY_SHUFFLE [RS_496]
-                                                                  Group By Operator [GBY_495] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_494] (rows=84 width=1850)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_492]
-                                                  <-Map 43 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_504]
-                                                      PartitionCols:_col0, _col1
-                                                      Select Operator [SEL_503] (rows=14398467 width=92)
-                                                        Output:["_col0","_col1","_col2","_col3"]
-                                                        Filter Operator [FIL_502] (rows=14398467 width=92)
-                                                          predicate:wr_item_sk is not null
-                                                          TableScan [TS_82] (rows=14398467 width=92)
-                                                            default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_amt","wr_net_loss"]
+                        Top N Key Operator [TNK_513] (rows=811687572 width=108)
+                          keys:_col0, _col1, 0L,sort order:+++,top n:100
+                          Select Operator [SEL_512] (rows=115958879 width=135)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Group By Operator [GBY_511] (rows=115958879 width=135)
+                              Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0
+                            <-Reducer 23 [SIMPLE_EDGE]
+                              SHUFFLE [RS_115]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_114] (rows=231917759 width=135)
+                                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0
+                                  Select Operator [SEL_112] (rows=231917759 width=135)
+                                    Output:["_col0","_col1","_col2","_col3"]
+                                    Merge Join Operator [MERGEJOIN_374] (rows=231917759 width=135)
+                                      Conds:RS_109._col2=RS_499._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
+                                    <-Map 44 [SIMPLE_EDGE] vectorized
+                                      PARTITION_ONLY_SHUFFLE [RS_499]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_498] (rows=84 width=1850)
+                                          Output:["_col0","_col1"]
+                                          Filter Operator [FIL_497] (rows=84 width=1850)
+                                            predicate:web_site_sk is not null
+                                            TableScan [TS_94] (rows=84 width=1850)
+                                              default@web_site,web_site,Tbl:COMPLETE,Col:NONE,Output:["web_site_sk","web_site_id"]
+                                    <-Reducer 22 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_109]
+                                        PartitionCols:_col2
+                                        Merge Join Operator [MERGEJOIN_373] (rows=210834322 width=135)
+                                          Conds:RS_106._col3=RS_431._col0(Inner),Output:["_col2","_col5","_col6","_col9","_col10"]
+                                        <-Map 30 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_431]
+                                            PartitionCols:_col0
+                                             Please refer to the previous Select Operator [SEL_426]
+                                        <-Reducer 21 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_106]
+                                            PartitionCols:_col3
+                                            Merge Join Operator [MERGEJOIN_372] (rows=191667562 width=135)
+                                              Conds:RS_103._col1=RS_415._col0(Inner),Output:["_col2","_col3","_col5","_col6","_col9","_col10"]
+                                            <-Map 26 [SIMPLE_EDGE] vectorized
+                                              SHUFFLE [RS_415]
+                                                PartitionCols:_col0
+                                                 Please refer to the previous Select Operator [SEL_410]
+                                            <-Reducer 20 [SIMPLE_EDGE]
+                                              SHUFFLE [RS_103]
+                                                PartitionCols:_col1
+                                                Merge Join Operator [MERGEJOIN_371] (rows=174243235 width=135)
+                                                  Conds:RS_100._col0=RS_399._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                                <-Map 12 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_399]
+                                                    PartitionCols:_col0
+                                                     Please refer to the previous Select Operator [SEL_394]
+                                                <-Reducer 42 [SIMPLE_EDGE]
+                                                  SHUFFLE [RS_100]
+                                                    PartitionCols:_col0
+                                                    Merge Join Operator [MERGEJOIN_370] (rows=158402938 width=135)
+                                                      Conds:RS_507._col1, _col4=RS_510._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                                    <-Map 41 [SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_507]
+                                                        PartitionCols:_col1, _col4
+                                                        Select Operator [SEL_506] (rows=144002668 width=135)
+                                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+                                                          Filter Operator [FIL_505] (rows=144002668 width=135)
+                                                            predicate:((ws_item_sk BETWEEN DynamicValue(RS_104_item_i_item_sk_min) AND DynamicValue(RS_104_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_104_item_i_item_sk_bloom_filter))) and (ws_promo_sk BETWEEN DynamicValue(RS_107_promotion_p_promo_sk_min) AND DynamicValue(RS_107_promotion_p_promo_sk_max) and in_bloom_filter(ws_promo_sk, DynamicValue(RS_107_promotion_p_promo_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_101_date_dim_d_date_sk_min) AND DynamicValue(RS_101_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_101_date_dim_d_date_sk_bloom_filter))) and (ws_web_site_sk BETWEEN DynamicValue(RS_110_web_site_web_site_sk_min) AND DynamicValue(RS_110_web_site_web_site_sk_max) and in_bloom_filter(ws_web_site_sk, DynamicValue(RS_110_web_site_web_site_sk_bloom_filter))) and ws_item_sk is not null and ws_promo_sk is not null and ws_sold_date_sk is not null and ws_web_
 site_sk is not null)
+                                                            TableScan [TS_79] (rows=144002668 width=135)
+                                                              default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_web_site_sk","ws_promo_sk","ws_order_number","ws_ext_sales_price","ws_net_profit"]
+                                                            <-Reducer 25 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_492]
+                                                                Group By Operator [GBY_491] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  SHUFFLE [RS_406]
+                                                                    Group By Operator [GBY_403] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_400] (rows=8116 width=1119)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_394]
+                                                            <-Reducer 29 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_494]
+                                                                Group By Operator [GBY_493] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 26 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  SHUFFLE [RS_422]
+                                                                    Group By Operator [GBY_419] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_416] (rows=154000 width=1436)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_410]
+                                                            <-Reducer 33 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_496]
+                                                                Group By Operator [GBY_495] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  SHUFFLE [RS_438]
+                                                                    Group By Operator [GBY_435] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_432] (rows=1150 width=1179)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_426]
+                                                            <-Reducer 45 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_504]
+                                                                Group By Operator [GBY_503] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 44 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  PARTITION_ONLY_SHUFFLE [RS_502]
+                                                                    Group By Operator [GBY_501] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_500] (rows=84 width=1850)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_498]
+                                                    <-Map 43 [SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_510]
+                                                        PartitionCols:_col0, _col1
+                                                        Select Operator [SEL_509] (rows=14398467 width=92)
+                                                          Output:["_col0","_col1","_col2","_col3"]
+                                                          Filter Operator [FIL_508] (rows=14398467 width=92)
+                                                            predicate:wr_item_sk is not null
+                                                            TableScan [TS_82] (rows=14398467 width=92)
+                                                              default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_amt","wr_net_loss"]
                   <-Reducer 7 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_454]
+                    Reduce Output Operator [RS_459]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_453] (rows=2435062716 width=108)
+                      Group By Operator [GBY_458] (rows=2435062716 width=108)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_452] (rows=463823414 width=88)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Group By Operator [GBY_451] (rows=463823414 width=88)
-                            Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0
-                          <-Reducer 6 [SIMPLE_EDGE]
-                            SHUFFLE [RS_36]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_35] (rows=927646829 width=88)
-                                Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0
-                                Select Operator [SEL_33] (rows=927646829 width=88)
-                                  Output:["_col0","_col1","_col2","_col3"]
-                                  Merge Join Operator [MERGEJOIN_363] (rows=927646829 width=88)
-                                    Conds:RS_30._col2=RS_439._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
-                                  <-Map 34 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_439]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_438] (rows=1704 width=1910)
-                                        Output:["_col0","_col1"]
-                                        Filter Operator [FIL_437] (rows=1704 width=1910)
-                                          predicate:s_store_sk is not null
-                                          TableScan [TS_15] (rows=1704 width=1910)
-                                            default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id"]
-                                  <-Reducer 5 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_30]
-                                      PartitionCols:_col2
-                                      Merge Join Operator [MERGEJOIN_362] (rows=843315281 width=88)
-                                        Conds:RS_27._col3=RS_423._col0(Inner),Output:["_col2","_col5","_col6","_col9","_col10"]
-                                      <-Map 30 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_423]
-                                          PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_422]
-                                      <-Reducer 4 [SIMPLE_EDGE]
-                                        SHUFFLE [RS_27]
-                                          PartitionCols:_col3
-                                          Merge Join Operator [MERGEJOIN_361] (rows=766650239 width=88)
-                                            Conds:RS_24._col1=RS_407._col0(Inner),Output:["_col2","_col3","_col5","_col6","_col9","_col10"]
-                                          <-Map 26 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_407]
-                                              PartitionCols:_col0
-                                               Please refer to the previous Select Operator [SEL_406]
-                                          <-Reducer 3 [SIMPLE_EDGE]
-                                            SHUFFLE [RS_24]
-                                              PartitionCols:_col1
-                                              Merge Join Operator [MERGEJOIN_360] (rows=696954748 width=88)
-                                                Conds:RS_21._col0=RS_391._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
-                                              <-Map 12 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_391]
-                                                  PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_390]
-                                              <-Reducer 2 [SIMPLE_EDGE]
-                                                SHUFFLE [RS_21]
-                                                  PartitionCols:_col0
-                                                  Merge Join Operator [MERGEJOIN_359] (rows=633595212 width=88)
-                                                    Conds:RS_447._col1, _col4=RS_450._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
-                                                  <-Map 1 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_447]
-                                                      PartitionCols:_col1, _col4
-                                                      Select Operator [SEL_446] (rows=575995635 width=88)
-                                                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                        Filter Operator [FIL_445] (rows=575995635 width=88)
-                                                          predicate:((ss_item_sk BETWEEN DynamicValue(RS_25_item_i_item_sk_min) AND DynamicValue(RS_25_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_25_item_i_item_sk_bloom_filter))) and (ss_promo_sk BETWEEN DynamicValue(RS_28_promotion_p_promo_sk_min) AND DynamicValue(RS_28_promotion_p_promo_sk_max) and in_bloom_filter(ss_promo_sk, DynamicValue(RS_28_promotion_p_promo_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_22_date_dim_d_date_sk_min) AND DynamicValue(RS_22_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_22_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_31_store_s_store_sk_min) AND DynamicValue(RS_31_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_31_store_s_store_sk_bloom_filter))) and ss_item_sk is not null and ss_promo_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null)
-                                                          TableScan [TS_0] (rows=575995635 width=88)
-                                                            default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_store_sk","ss_promo_sk","ss_ticket_number","ss_ext_sales_price","ss_net_profit"]
-                                                          <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_404]
-                                                              Group By Operator [GBY_403] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_400]
-                                                                  Group By Operator [GBY_397] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_392] (rows=8116 width=1119)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_390]
-                                                          <-Reducer 27 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_420]
-                                                              Group By Operator [GBY_419] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 26 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_416]
-                                                                  Group By Operator [GBY_413] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_408] (rows=154000 width=1436)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_406]
-                                                          <-Reducer 31 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_436]
-                                                              Group By Operator [GBY_435] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_432]
-                                                                  Group By Operator [GBY_429] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_424] (rows=1150 width=1179)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_422]
-                                                          <-Reducer 35 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_444]
-                                                              Group By Operator [GBY_443] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                              <-Map 34 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_442]
-                                                                  Group By Operator [GBY_441] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_440] (rows=1704 width=1910)
-                                                                      Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_438]
-                                                  <-Map 11 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_450]
-                                                      PartitionCols:_col0, _col1
-                                                      Select Operator [SEL_449] (rows=57591150 width=77)
-                                                        Output:["_col0","_col1","_col2","_col3"]
-                                                        Filter Operator [FIL_448] (rows=57591150 width=77)
-                                                          predicate:sr_item_sk is not null
-                                                          TableScan [TS_3] (rows=57591150 width=77)
-                                                            default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_amt","sr_net_loss"]
+                        Top N Key Operator [TNK_457] (rows=811687572 width=108)
+                          keys:_col0, _col1, 0L,sort order:+++,top n:100
+                          Select Operator [SEL_456] (rows=463823414 width=88)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Group By Operator [GBY_455] (rows=463823414 width=88)
+                              Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0
+                            <-Reducer 6 [SIMPLE_EDGE]
+                              SHUFFLE [RS_36]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_35] (rows=927646829 width=88)
+                                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0
+                                  Select Operator [SEL_33] (rows=927646829 width=88)
+                                    Output:["_col0","_col1","_col2","_col3"]
+                                    Merge Join Operator [MERGEJOIN_364] (rows=927646829 width=88)
+                                      Conds:RS_30._col2=RS_443._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
+                                    <-Map 34 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_443]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_442] (rows=1704 width=1910)
+                                          Output:["_col0","_col1"]
+                                          Filter Operator [FIL_441] (rows=1704 width=1910)
+                                            predicate:s_store_sk is not null
+                                            TableScan [TS_15] (rows=1704 width=1910)
+                                              default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id"]
+                                    <-Reducer 5 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_30]
+                                        PartitionCols:_col2
+                                        Merge Join Operator [MERGEJOIN_363] (rows=843315281 width=88)
+                                          Conds:RS_27._col3=RS_427._col0(Inner),Output:["_col2","_col5","_col6","_col9","_col10"]
+                                        <-Map 30 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_427]
+                                            PartitionCols:_col0
+                                             Please refer to the previous Select Operator [SEL_426]
+                                        <-Reducer 4 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_27]
+                                            PartitionCols:_col3
+                                            Merge Join Operator [MERGEJOIN_362] (rows=766650239 width=88)
+                                              Conds:RS_24._col1=RS_411._col0(Inner),Output:["_col2","_col3","_col5","_col6","_col9","_col10"]
+                                            <-Map 26 [SIMPLE_EDGE] vectorized
+                                              SHUFFLE [RS_411]
+                                                PartitionCols:_col0
+                                                 Please refer to the previous Select Operator [SEL_410]
+                                            <-Reducer 3 [SIMPLE_EDGE]
+                                              SHUFFLE [RS_24]
+                                                PartitionCols:_col1
+                                                Merge Join Operator [MERGEJOIN_361] (rows=696954748 width=88)
+                                                  Conds:RS_21._col0=RS_395._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                                <-Map 12 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_395]
+                                                    PartitionCols:_col0
+                                                     Please refer to the previous Select Operator [SEL_394]
+                                                <-Reducer 2 [SIMPLE_EDGE]
+                                                  SHUFFLE [RS_21]
+                                                    PartitionCols:_col0
+                                                    Merge Join Operator [MERGEJOIN_360] (rows=633595212 width=88)
+                                                      Conds:RS_451._col1, _col4=RS_454._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                                    <-Map 1 [SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_451]
+                                                        PartitionCols:_col1, _col4
+                                                        Select Operator [SEL_450] (rows=575995635 width=88)
+                                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+                                                          Filter Operator [FIL_449] (rows=575995635 width=88)
+                                                            predicate:((ss_item_sk BETWEEN DynamicValue(RS_25_item_i_item_sk_min) AND DynamicValue(RS_25_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_25_item_i_item_sk_bloom_filter))) and (ss_promo_sk BETWEEN DynamicValue(RS_28_promotion_p_promo_sk_min) AND DynamicValue(RS_28_promotion_p_promo_sk_max) and in_bloom_filter(ss_promo_sk, DynamicValue(RS_28_promotion_p_promo_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_22_date_dim_d_date_sk_min) AND DynamicValue(RS_22_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_22_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_31_store_s_store_sk_min) AND DynamicValue(RS_31_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_31_store_s_store_sk_bloom_filter))) and ss_item_sk is not null and ss_promo_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null)
+                                                            TableScan [TS_0] (rows=575995635 width=88)
+                                                              default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_store_sk","ss_promo_sk","ss_ticket_number","ss_ext_sales_price","ss_net_profit"]
+                                                            <-Reducer 13 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_408]
+                                                                Group By Operator [GBY_407] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  SHUFFLE [RS_404]
+                                                                    Group By Operator [GBY_401] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_396] (rows=8116 width=1119)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_394]
+                                                            <-Reducer 27 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_424]
+                                                                Group By Operator [GBY_423] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 26 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  SHUFFLE [RS_420]
+                                                                    Group By Operator [GBY_417] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_412] (rows=154000 width=1436)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_410]
+                                                            <-Reducer 31 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_440]
+                                                                Group By Operator [GBY_439] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  SHUFFLE [RS_436]
+                                                                    Group By Operator [GBY_433] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_428] (rows=1150 width=1179)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_426]
+                                                            <-Reducer 35 [BROADCAST_EDGE] vectorized
+                                                              BROADCAST [RS_448]
+                                                                Group By Operator [GBY_447] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                <-Map 34 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                  SHUFFLE [RS_446]
+                                                                    Group By Operator [GBY_445] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                      Select Operator [SEL_444] (rows=1704 width=1910)
+                                                                        Output:["_col0"]
+                                                                         Please refer to the previous Select Operator [SEL_442]
+                                                    <-Map 11 [SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_454]
+                                                        PartitionCols:_col0, _col1
+                                                        Select Operator [SEL_453] (rows=57591150 width=77)
+                                                          Output:["_col0","_col1","_col2","_col3"]
+                                                          Filter Operator [FIL_452] (rows=57591150 width=77)
+                                                            predicate:sr_item_sk is not null
+                                                            TableScan [TS_3] (rows=57591150 width=77)
+                                                              default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_amt","sr_net_loss"]
 


[09/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query49.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query49.q.out b/ql/src/test/results/clientpositive/perf/tez/query49.q.out
index 434ed16..6f642ef 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query49.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query49.q.out
@@ -283,253 +283,257 @@ Stage-0
     limit:100
     Stage-1
       Reducer 11 vectorized
-      File Output Operator [FS_306]
-        Limit [LIM_305] (rows=100 width=101)
+      File Output Operator [FS_310]
+        Limit [LIM_309] (rows=100 width=101)
           Number of rows:100
-          Select Operator [SEL_304] (rows=5915494 width=101)
+          Select Operator [SEL_308] (rows=5915494 width=101)
             Output:["_col0","_col1","_col2","_col3","_col4"]
           <-Reducer 10 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_303]
-              Select Operator [SEL_302] (rows=5915494 width=101)
+            SHUFFLE [RS_307]
+              Select Operator [SEL_306] (rows=5915494 width=101)
                 Output:["_col0","_col1","_col2","_col3","_col4"]
-                Group By Operator [GBY_301] (rows=5915494 width=101)
+                Group By Operator [GBY_305] (rows=5915494 width=101)
                   Output:["_col0","_col1","_col2","_col3","_col4"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
                 <-Union 9 [SIMPLE_EDGE]
                   <-Reducer 24 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_346]
+                    Reduce Output Operator [RS_351]
                       PartitionCols:_col0, _col1, _col2, _col3, _col4
-                      Group By Operator [GBY_345] (rows=11830988 width=101)
+                      Group By Operator [GBY_350] (rows=11830988 width=101)
                         Output:["_col0","_col1","_col2","_col3","_col4"],keys:_col0, _col3, _col4, _col1, _col2
-                        Select Operator [SEL_344] (rows=8604378 width=88)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Filter Operator [FIL_343] (rows=8604378 width=88)
-                            predicate:((_col0 <= 10) or (rank_window_1 <= 10))
-                            PTF Operator [PTF_342] (rows=12906568 width=88)
-                              Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col4 AS decimal(15,4)) / CAST( _col5 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
-                              Select Operator [SEL_341] (rows=12906568 width=88)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                              <-Reducer 23 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_340]
-                                  PartitionCols:0
-                                  Select Operator [SEL_339] (rows=12906568 width=88)
-                                    Output:["rank_window_0","_col0","_col1","_col2","_col3","_col4"]
-                                    PTF Operator [PTF_338] (rows=12906568 width=88)
-                                      Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col1 AS decimal(15,4)) / CAST( _col2 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
-                                      Select Operator [SEL_337] (rows=12906568 width=88)
-                                        Output:["_col0","_col1","_col2","_col3","_col4"]
-                                      <-Reducer 22 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_336]
-                                          PartitionCols:0
-                                          Group By Operator [GBY_335] (rows=12906568 width=88)
-                                            Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
-                                          <-Reducer 21 [SIMPLE_EDGE]
-                                            SHUFFLE [RS_89]
-                                              PartitionCols:_col0
-                                              Group By Operator [GBY_88] (rows=25813137 width=88)
-                                                Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0
-                                                Select Operator [SEL_86] (rows=25813137 width=88)
-                                                  Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                  Merge Join Operator [MERGEJOIN_236] (rows=25813137 width=88)
-                                                    Conds:RS_83._col1, _col2=RS_334._col0, _col1(Inner),Output:["_col1","_col3","_col4","_col11","_col12"]
-                                                  <-Map 30 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_334]
-                                                      PartitionCols:_col0, _col1
-                                                      Select Operator [SEL_333] (rows=19197050 width=77)
-                                                        Output:["_col0","_col1","_col2","_col3"]
-                                                        Filter Operator [FIL_332] (rows=19197050 width=77)
-                                                          predicate:((sr_return_amt > 10000) and sr_item_sk is not null and sr_ticket_number is not null)
-                                                          TableScan [TS_77] (rows=57591150 width=77)
-                                                            default@store_returns,sr,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_quantity","sr_return_amt"]
-                                                  <-Reducer 20 [SIMPLE_EDGE]
-                                                    SHUFFLE [RS_83]
-                                                      PartitionCols:_col1, _col2
-                                                      Merge Join Operator [MERGEJOIN_235] (rows=23466488 width=88)
-                                                        Conds:RS_331._col0=RS_269._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                                      <-Map 12 [SIMPLE_EDGE] vectorized
-                                                        PARTITION_ONLY_SHUFFLE [RS_269]
-                                                          PartitionCols:_col0
-                                                          Select Operator [SEL_264] (rows=18262 width=1119)
-                                                            Output:["_col0"]
-                                                            Filter Operator [FIL_263] (rows=18262 width=1119)
-                                                              predicate:((d_moy = 12) and (d_year = 2000) and d_date_sk is not null)
-                                                              TableScan [TS_3] (rows=73049 width=1119)
-                                                                default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                                                      <-Map 29 [SIMPLE_EDGE] vectorized
-                                                        SHUFFLE [RS_331]
-                                                          PartitionCols:_col0
-                                                          Select Operator [SEL_330] (rows=21333171 width=88)
-                                                            Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                            Filter Operator [FIL_329] (rows=21333171 width=88)
-                                                              predicate:((ss_net_paid > 0) and (ss_net_profit > 1) and (ss_quantity > 0) and (ss_sold_date_sk BETWEEN DynamicValue(RS_81_date_dim_d_date_sk_min) AND DynamicValue(RS_81_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_81_date_dim_d_date_sk_bloom_filter))) and ss_item_sk is not null and ss_sold_date_sk is not null and ss_ticket_number is not null)
-                                                              TableScan [TS_71] (rows=575995635 width=88)
-                                                                default@store_sales,sts,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ticket_number","ss_quantity","ss_net_paid","ss_net_profit"]
-                                                              <-Reducer 25 [BROADCAST_EDGE] vectorized
-                                                                BROADCAST [RS_328]
-                                                                  Group By Operator [GBY_327] (rows=1 width=12)
-                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                                  <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                    PARTITION_ONLY_SHUFFLE [RS_276]
-                                                                      Group By Operator [GBY_273] (rows=1 width=12)
-                                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                        Select Operator [SEL_270] (rows=18262 width=1119)
-                                                                          Output:["_col0"]
-                                                                           Please refer to the previous Select Operator [SEL_264]
+                        Top N Key Operator [TNK_349] (rows=11830988 width=101)
+                          keys:_col0, _col3, _col4, _col1, _col2,sort order:+++++,top n:100
+                          Select Operator [SEL_348] (rows=8604378 width=88)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Filter Operator [FIL_347] (rows=8604378 width=88)
+                              predicate:((_col0 <= 10) or (rank_window_1 <= 10))
+                              PTF Operator [PTF_346] (rows=12906568 width=88)
+                                Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col4 AS decimal(15,4)) / CAST( _col5 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
+                                Select Operator [SEL_345] (rows=12906568 width=88)
+                                  Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                <-Reducer 23 [SIMPLE_EDGE] vectorized
+                                  SHUFFLE [RS_344]
+                                    PartitionCols:0
+                                    Select Operator [SEL_343] (rows=12906568 width=88)
+                                      Output:["rank_window_0","_col0","_col1","_col2","_col3","_col4"]
+                                      PTF Operator [PTF_342] (rows=12906568 width=88)
+                                        Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col1 AS decimal(15,4)) / CAST( _col2 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
+                                        Select Operator [SEL_341] (rows=12906568 width=88)
+                                          Output:["_col0","_col1","_col2","_col3","_col4"]
+                                        <-Reducer 22 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_340]
+                                            PartitionCols:0
+                                            Group By Operator [GBY_339] (rows=12906568 width=88)
+                                              Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
+                                            <-Reducer 21 [SIMPLE_EDGE]
+                                              SHUFFLE [RS_89]
+                                                PartitionCols:_col0
+                                                Group By Operator [GBY_88] (rows=25813137 width=88)
+                                                  Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0
+                                                  Select Operator [SEL_86] (rows=25813137 width=88)
+                                                    Output:["_col0","_col1","_col2","_col3","_col4"]
+                                                    Merge Join Operator [MERGEJOIN_237] (rows=25813137 width=88)
+                                                      Conds:RS_83._col1, _col2=RS_338._col0, _col1(Inner),Output:["_col1","_col3","_col4","_col11","_col12"]
+                                                    <-Map 30 [SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_338]
+                                                        PartitionCols:_col0, _col1
+                                                        Select Operator [SEL_337] (rows=19197050 width=77)
+                                                          Output:["_col0","_col1","_col2","_col3"]
+                                                          Filter Operator [FIL_336] (rows=19197050 width=77)
+                                                            predicate:((sr_return_amt > 10000) and sr_item_sk is not null and sr_ticket_number is not null)
+                                                            TableScan [TS_77] (rows=57591150 width=77)
+                                                              default@store_returns,sr,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_quantity","sr_return_amt"]
+                                                    <-Reducer 20 [SIMPLE_EDGE]
+                                                      SHUFFLE [RS_83]
+                                                        PartitionCols:_col1, _col2
+                                                        Merge Join Operator [MERGEJOIN_236] (rows=23466488 width=88)
+                                                          Conds:RS_335._col0=RS_272._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                                        <-Map 12 [SIMPLE_EDGE] vectorized
+                                                          PARTITION_ONLY_SHUFFLE [RS_272]
+                                                            PartitionCols:_col0
+                                                            Select Operator [SEL_267] (rows=18262 width=1119)
+                                                              Output:["_col0"]
+                                                              Filter Operator [FIL_266] (rows=18262 width=1119)
+                                                                predicate:((d_moy = 12) and (d_year = 2000) and d_date_sk is not null)
+                                                                TableScan [TS_3] (rows=73049 width=1119)
+                                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                                        <-Map 29 [SIMPLE_EDGE] vectorized
+                                                          SHUFFLE [RS_335]
+                                                            PartitionCols:_col0
+                                                            Select Operator [SEL_334] (rows=21333171 width=88)
+                                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                                              Filter Operator [FIL_333] (rows=21333171 width=88)
+                                                                predicate:((ss_net_paid > 0) and (ss_net_profit > 1) and (ss_quantity > 0) and (ss_sold_date_sk BETWEEN DynamicValue(RS_81_date_dim_d_date_sk_min) AND DynamicValue(RS_81_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_81_date_dim_d_date_sk_bloom_filter))) and ss_item_sk is not null and ss_sold_date_sk is not null and ss_ticket_number is not null)
+                                                                TableScan [TS_71] (rows=575995635 width=88)
+                                                                  default@store_sales,sts,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ticket_number","ss_quantity","ss_net_paid","ss_net_profit"]
+                                                                <-Reducer 25 [BROADCAST_EDGE] vectorized
+                                                                  BROADCAST [RS_332]
+                                                                    Group By Operator [GBY_331] (rows=1 width=12)
+                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                    <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                      PARTITION_ONLY_SHUFFLE [RS_279]
+                                                                        Group By Operator [GBY_276] (rows=1 width=12)
+                                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                          Select Operator [SEL_273] (rows=18262 width=1119)
+                                                                            Output:["_col0"]
+                                                                             Please refer to the previous Select Operator [SEL_267]
                   <-Reducer 8 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_300]
+                    Reduce Output Operator [RS_304]
                       PartitionCols:_col0, _col1, _col2, _col3, _col4
-                      Group By Operator [GBY_299] (rows=11830988 width=101)
+                      Group By Operator [GBY_303] (rows=11830988 width=101)
                         Output:["_col0","_col1","_col2","_col3","_col4"],keys:_col0, _col3, _col4, _col1, _col2
-                        Select Operator [SEL_298] (rows=3226610 width=135)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Group By Operator [GBY_297] (rows=3226610 width=135)
-                            Output:["_col0","_col1","_col2","_col3","_col4"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
-                          <-Union 7 [SIMPLE_EDGE]
-                            <-Reducer 18 [CONTAINS] vectorized
-                              Reduce Output Operator [RS_326]
-                                PartitionCols:_col0, _col1, _col2, _col3, _col4
-                                Group By Operator [GBY_325] (rows=6453220 width=135)
-                                  Output:["_col0","_col1","_col2","_col3","_col4"],keys:_col0, _col3, _col4, _col1, _col2
-                                  Select Operator [SEL_324] (rows=4302070 width=135)
-                                    Output:["_col0","_col1","_col2","_col3","_col4"]
-                                    Filter Operator [FIL_323] (rows=4302070 width=135)
-                                      predicate:((_col0 <= 10) or (rank_window_1 <= 10))
-                                      PTF Operator [PTF_322] (rows=6453105 width=135)
-                                        Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col4 AS decimal(15,4)) / CAST( _col5 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
-                                        Select Operator [SEL_321] (rows=6453105 width=135)
-                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                        <-Reducer 17 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_320]
-                                            PartitionCols:0
-                                            Select Operator [SEL_319] (rows=6453105 width=135)
-                                              Output:["rank_window_0","_col0","_col1","_col2","_col3","_col4"]
-                                              PTF Operator [PTF_318] (rows=6453105 width=135)
-                                                Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col1 AS decimal(15,4)) / CAST( _col2 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
-                                                Select Operator [SEL_317] (rows=6453105 width=135)
-                                                  Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                <-Reducer 16 [SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_316]
-                                                    PartitionCols:0
-                                                    Group By Operator [GBY_315] (rows=6453105 width=135)
-                                                      Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
-                                                    <-Reducer 15 [SIMPLE_EDGE]
-                                                      SHUFFLE [RS_50]
-                                                        PartitionCols:_col0
-                                                        Group By Operator [GBY_49] (rows=12906211 width=135)
-                                                          Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0
-                                                          Select Operator [SEL_47] (rows=12906211 width=135)
-                                                            Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                            Merge Join Operator [MERGEJOIN_234] (rows=12906211 width=135)
-                                                              Conds:RS_44._col1, _col2=RS_314._col0, _col1(Inner),Output:["_col1","_col3","_col4","_col11","_col12"]
-                                                            <-Map 28 [SIMPLE_EDGE] vectorized
-                                                              SHUFFLE [RS_314]
-                                                                PartitionCols:_col0, _col1
-                                                                Select Operator [SEL_313] (rows=9599627 width=106)
-                                                                  Output:["_col0","_col1","_col2","_col3"]
-                                                                  Filter Operator [FIL_312] (rows=9599627 width=106)
-                                                                    predicate:((cr_return_amount > 10000) and cr_item_sk is not null and cr_order_number is not null)
-                                                                    TableScan [TS_38] (rows=28798881 width=106)
-                                                                      default@catalog_returns,cr,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_quantity","cr_return_amount"]
-                                                            <-Reducer 14 [SIMPLE_EDGE]
-                                                              SHUFFLE [RS_44]
-                                                                PartitionCols:_col1, _col2
-                                                                Merge Join Operator [MERGEJOIN_233] (rows=11732919 width=135)
-                                                                  Conds:RS_311._col0=RS_267._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                                                <-Map 12 [SIMPLE_EDGE] vectorized
-                                                                  PARTITION_ONLY_SHUFFLE [RS_267]
-                                                                    PartitionCols:_col0
-                                                                     Please refer to the previous Select Operator [SEL_264]
-                                                                <-Map 27 [SIMPLE_EDGE] vectorized
-                                                                  SHUFFLE [RS_311]
-                                                                    PartitionCols:_col0
-                                                                    Select Operator [SEL_310] (rows=10666290 width=135)
-                                                                      Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                                      Filter Operator [FIL_309] (rows=10666290 width=135)
-                                                                        predicate:((cs_net_paid > 0) and (cs_net_profit > 1) and (cs_quantity > 0) and (cs_sold_date_sk BETWEEN DynamicValue(RS_42_date_dim_d_date_sk_min) AND DynamicValue(RS_42_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_42_date_dim_d_date_sk_bloom_filter))) and cs_item_sk is not null and cs_order_number is not null and cs_sold_date_sk is not null)
-                                                                        TableScan [TS_32] (rows=287989836 width=135)
-                                                                          default@catalog_sales,cs,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_order_number","cs_quantity","cs_net_paid","cs_net_profit"]
-                                                                        <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                                                          BROADCAST [RS_308]
-                                                                            Group By Operator [GBY_307] (rows=1 width=12)
-                                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                                            <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                              PARTITION_ONLY_SHUFFLE [RS_275]
-                                                                                Group By Operator [GBY_272] (rows=1 width=12)
-                                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                  Select Operator [SEL_268] (rows=18262 width=1119)
-                                                                                    Output:["_col0"]
-                                                                                     Please refer to the previous Select Operator [SEL_264]
-                            <-Reducer 6 [CONTAINS] vectorized
-                              Reduce Output Operator [RS_296]
-                                PartitionCols:_col0, _col1, _col2, _col3, _col4
-                                Group By Operator [GBY_295] (rows=6453220 width=135)
-                                  Output:["_col0","_col1","_col2","_col3","_col4"],keys:_col0, _col3, _col4, _col1, _col2
-                                  Select Operator [SEL_294] (rows=2151150 width=135)
-                                    Output:["_col0","_col1","_col2","_col3","_col4"]
-                                    Filter Operator [FIL_293] (rows=2151150 width=135)
-                                      predicate:((_col0 <= 10) or (rank_window_1 <= 10))
-                                      PTF Operator [PTF_292] (rows=3226726 width=135)
-                                        Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col4 AS decimal(15,4)) / CAST( _col5 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
-                                        Select Operator [SEL_291] (rows=3226726 width=135)
-                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                        <-Reducer 5 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_290]
-                                            PartitionCols:0
-                                            Select Operator [SEL_289] (rows=3226726 width=135)
-                                              Output:["rank_window_0","_col0","_col1","_col2","_col3","_col4"]
-                                              PTF Operator [PTF_288] (rows=3226726 width=135)
-                                                Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col1 AS decimal(15,4)) / CAST( _col2 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
-                                                Select Operator [SEL_287] (rows=3226726 width=135)
-                                                  Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                <-Reducer 4 [SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_286]
-                                                    PartitionCols:0
-                                                    Group By Operator [GBY_285] (rows=3226726 width=135)
-                                                      Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
-                                                    <-Reducer 3 [SIMPLE_EDGE]
-                                                      SHUFFLE [RS_18]
-                                                        PartitionCols:_col0
-                                                        Group By Operator [GBY_17] (rows=6453452 width=135)
-                                                          Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0
-                                                          Select Operator [SEL_15] (rows=6453452 width=135)
-                                                            Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                            Merge Join Operator [MERGEJOIN_232] (rows=6453452 width=135)
-                                                              Conds:RS_12._col1, _col2=RS_284._col0, _col1(Inner),Output:["_col1","_col3","_col4","_col11","_col12"]
-                                                            <-Map 26 [SIMPLE_EDGE] vectorized
-                                                              SHUFFLE [RS_284]
-                                                                PartitionCols:_col0, _col1
-                                                                Select Operator [SEL_283] (rows=4799489 width=92)
-                                                                  Output:["_col0","_col1","_col2","_col3"]
-                                                                  Filter Operator [FIL_282] (rows=4799489 width=92)
-                                                                    predicate:((wr_return_amt > 10000) and wr_item_sk is not null and wr_order_number is not null)
-                                                                    TableScan [TS_6] (rows=14398467 width=92)
-                                                                      default@web_returns,wr,Tbl:COMPLETE,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_quantity","wr_return_amt"]
-                                                            <-Reducer 2 [SIMPLE_EDGE]
-                                                              SHUFFLE [RS_12]
-                                                                PartitionCols:_col1, _col2
-                                                                Merge Join Operator [MERGEJOIN_231] (rows=5866775 width=135)
-                                                                  Conds:RS_281._col0=RS_265._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                                                <-Map 12 [SIMPLE_EDGE] vectorized
-                                                                  PARTITION_ONLY_SHUFFLE [RS_265]
-                                                                    PartitionCols:_col0
-                                                                     Please refer to the previous Select Operator [SEL_264]
-                                                                <-Map 1 [SIMPLE_EDGE] vectorized
-                                                                  SHUFFLE [RS_281]
-                                                                    PartitionCols:_col0
-                                                                    Select Operator [SEL_280] (rows=5333432 width=135)
-                                                                      Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                                      Filter Operator [FIL_279] (rows=5333432 width=135)
-                                                                        predicate:((ws_net_paid > 0) and (ws_net_profit > 1) and (ws_quantity > 0) and (ws_sold_date_sk BETWEEN DynamicValue(RS_10_date_dim_d_date_sk_min) AND DynamicValue(RS_10_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_10_date_dim_d_date_sk_bloom_filter))) and ws_item_sk is not null and ws_order_number is not null and ws_sold_date_sk is not null)
-                                                                        TableScan [TS_0] (rows=144002668 width=135)
-                                                                          default@web_sales,ws,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_order_number","ws_quantity","ws_net_paid","ws_net_profit"]
-                                                                        <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                                                          BROADCAST [RS_278]
-                                                                            Group By Operator [GBY_277] (rows=1 width=12)
-                                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                                            <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                              PARTITION_ONLY_SHUFFLE [RS_274]
-                                                                                Group By Operator [GBY_271] (rows=1 width=12)
-                                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                  Select Operator [SEL_266] (rows=18262 width=1119)
-                                                                                    Output:["_col0"]
-                                                                                     Please refer to the previous Select Operator [SEL_264]
+                        Top N Key Operator [TNK_302] (rows=11830988 width=101)
+                          keys:_col0, _col3, _col4, _col1, _col2,sort order:+++++,top n:100
+                          Select Operator [SEL_301] (rows=3226610 width=135)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Group By Operator [GBY_300] (rows=3226610 width=135)
+                              Output:["_col0","_col1","_col2","_col3","_col4"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
+                            <-Union 7 [SIMPLE_EDGE]
+                              <-Reducer 18 [CONTAINS] vectorized
+                                Reduce Output Operator [RS_330]
+                                  PartitionCols:_col0, _col1, _col2, _col3, _col4
+                                  Group By Operator [GBY_329] (rows=6453220 width=135)
+                                    Output:["_col0","_col1","_col2","_col3","_col4"],keys:_col0, _col3, _col4, _col1, _col2
+                                    Select Operator [SEL_328] (rows=4302070 width=135)
+                                      Output:["_col0","_col1","_col2","_col3","_col4"]
+                                      Filter Operator [FIL_327] (rows=4302070 width=135)
+                                        predicate:((_col0 <= 10) or (rank_window_1 <= 10))
+                                        PTF Operator [PTF_326] (rows=6453105 width=135)
+                                          Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col4 AS decimal(15,4)) / CAST( _col5 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
+                                          Select Operator [SEL_325] (rows=6453105 width=135)
+                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                          <-Reducer 17 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_324]
+                                              PartitionCols:0
+                                              Select Operator [SEL_323] (rows=6453105 width=135)
+                                                Output:["rank_window_0","_col0","_col1","_col2","_col3","_col4"]
+                                                PTF Operator [PTF_322] (rows=6453105 width=135)
+                                                  Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col1 AS decimal(15,4)) / CAST( _col2 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
+                                                  Select Operator [SEL_321] (rows=6453105 width=135)
+                                                    Output:["_col0","_col1","_col2","_col3","_col4"]
+                                                  <-Reducer 16 [SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_320]
+                                                      PartitionCols:0
+                                                      Group By Operator [GBY_319] (rows=6453105 width=135)
+                                                        Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
+                                                      <-Reducer 15 [SIMPLE_EDGE]
+                                                        SHUFFLE [RS_50]
+                                                          PartitionCols:_col0
+                                                          Group By Operator [GBY_49] (rows=12906211 width=135)
+                                                            Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0
+                                                            Select Operator [SEL_47] (rows=12906211 width=135)
+                                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                                              Merge Join Operator [MERGEJOIN_235] (rows=12906211 width=135)
+                                                                Conds:RS_44._col1, _col2=RS_318._col0, _col1(Inner),Output:["_col1","_col3","_col4","_col11","_col12"]
+                                                              <-Map 28 [SIMPLE_EDGE] vectorized
+                                                                SHUFFLE [RS_318]
+                                                                  PartitionCols:_col0, _col1
+                                                                  Select Operator [SEL_317] (rows=9599627 width=106)
+                                                                    Output:["_col0","_col1","_col2","_col3"]
+                                                                    Filter Operator [FIL_316] (rows=9599627 width=106)
+                                                                      predicate:((cr_return_amount > 10000) and cr_item_sk is not null and cr_order_number is not null)
+                                                                      TableScan [TS_38] (rows=28798881 width=106)
+                                                                        default@catalog_returns,cr,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_quantity","cr_return_amount"]
+                                                              <-Reducer 14 [SIMPLE_EDGE]
+                                                                SHUFFLE [RS_44]
+                                                                  PartitionCols:_col1, _col2
+                                                                  Merge Join Operator [MERGEJOIN_234] (rows=11732919 width=135)
+                                                                    Conds:RS_315._col0=RS_270._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                                                  <-Map 12 [SIMPLE_EDGE] vectorized
+                                                                    PARTITION_ONLY_SHUFFLE [RS_270]
+                                                                      PartitionCols:_col0
+                                                                       Please refer to the previous Select Operator [SEL_267]
+                                                                  <-Map 27 [SIMPLE_EDGE] vectorized
+                                                                    SHUFFLE [RS_315]
+                                                                      PartitionCols:_col0
+                                                                      Select Operator [SEL_314] (rows=10666290 width=135)
+                                                                        Output:["_col0","_col1","_col2","_col3","_col4"]
+                                                                        Filter Operator [FIL_313] (rows=10666290 width=135)
+                                                                          predicate:((cs_net_paid > 0) and (cs_net_profit > 1) and (cs_quantity > 0) and (cs_sold_date_sk BETWEEN DynamicValue(RS_42_date_dim_d_date_sk_min) AND DynamicValue(RS_42_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_42_date_dim_d_date_sk_bloom_filter))) and cs_item_sk is not null and cs_order_number is not null and cs_sold_date_sk is not null)
+                                                                          TableScan [TS_32] (rows=287989836 width=135)
+                                                                            default@catalog_sales,cs,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_order_number","cs_quantity","cs_net_paid","cs_net_profit"]
+                                                                          <-Reducer 19 [BROADCAST_EDGE] vectorized
+                                                                            BROADCAST [RS_312]
+                                                                              Group By Operator [GBY_311] (rows=1 width=12)
+                                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                              <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                PARTITION_ONLY_SHUFFLE [RS_278]
+                                                                                  Group By Operator [GBY_275] (rows=1 width=12)
+                                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                                    Select Operator [SEL_271] (rows=18262 width=1119)
+                                                                                      Output:["_col0"]
+                                                                                       Please refer to the previous Select Operator [SEL_267]
+                              <-Reducer 6 [CONTAINS] vectorized
+                                Reduce Output Operator [RS_299]
+                                  PartitionCols:_col0, _col1, _col2, _col3, _col4
+                                  Group By Operator [GBY_298] (rows=6453220 width=135)
+                                    Output:["_col0","_col1","_col2","_col3","_col4"],keys:_col0, _col3, _col4, _col1, _col2
+                                    Select Operator [SEL_297] (rows=2151150 width=135)
+                                      Output:["_col0","_col1","_col2","_col3","_col4"]
+                                      Filter Operator [FIL_296] (rows=2151150 width=135)
+                                        predicate:((_col0 <= 10) or (rank_window_1 <= 10))
+                                        PTF Operator [PTF_295] (rows=3226726 width=135)
+                                          Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col4 AS decimal(15,4)) / CAST( _col5 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
+                                          Select Operator [SEL_294] (rows=3226726 width=135)
+                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                          <-Reducer 5 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_293]
+                                              PartitionCols:0
+                                              Select Operator [SEL_292] (rows=3226726 width=135)
+                                                Output:["rank_window_0","_col0","_col1","_col2","_col3","_col4"]
+                                                PTF Operator [PTF_291] (rows=3226726 width=135)
+                                                  Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(CAST( _col1 AS decimal(15,4)) / CAST( _col2 AS decimal(15,4))) ASC NULLS FIRST","partition by:":"0"}]
+                                                  Select Operator [SEL_290] (rows=3226726 width=135)
+                                                    Output:["_col0","_col1","_col2","_col3","_col4"]
+                                                  <-Reducer 4 [SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_289]
+                                                      PartitionCols:0
+                                                      Group By Operator [GBY_288] (rows=3226726 width=135)
+                                                        Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
+                                                      <-Reducer 3 [SIMPLE_EDGE]
+                                                        SHUFFLE [RS_18]
+                                                          PartitionCols:_col0
+                                                          Group By Operator [GBY_17] (rows=6453452 width=135)
+                                                            Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0
+                                                            Select Operator [SEL_15] (rows=6453452 width=135)
+                                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                                              Merge Join Operator [MERGEJOIN_233] (rows=6453452 width=135)
+                                                                Conds:RS_12._col1, _col2=RS_287._col0, _col1(Inner),Output:["_col1","_col3","_col4","_col11","_col12"]
+                                                              <-Map 26 [SIMPLE_EDGE] vectorized
+                                                                SHUFFLE [RS_287]
+                                                                  PartitionCols:_col0, _col1
+                                                                  Select Operator [SEL_286] (rows=4799489 width=92)
+                                                                    Output:["_col0","_col1","_col2","_col3"]
+                                                                    Filter Operator [FIL_285] (rows=4799489 width=92)
+                                                                      predicate:((wr_return_amt > 10000) and wr_item_sk is not null and wr_order_number is not null)
+                                                                      TableScan [TS_6] (rows=14398467 width=92)
+                                                                        default@web_returns,wr,Tbl:COMPLETE,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_quantity","wr_return_amt"]
+                                                              <-Reducer 2 [SIMPLE_EDGE]
+                                                                SHUFFLE [RS_12]
+                                                                  PartitionCols:_col1, _col2
+                                                                  Merge Join Operator [MERGEJOIN_232] (rows=5866775 width=135)
+                                                                    Conds:RS_284._col0=RS_268._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                                                  <-Map 12 [SIMPLE_EDGE] vectorized
+                                                                    PARTITION_ONLY_SHUFFLE [RS_268]
+                                                                      PartitionCols:_col0
+                                                                       Please refer to the previous Select Operator [SEL_267]
+                                                                  <-Map 1 [SIMPLE_EDGE] vectorized
+                                                                    SHUFFLE [RS_284]
+                                                                      PartitionCols:_col0
+                                                                      Select Operator [SEL_283] (rows=5333432 width=135)
+                                                                        Output:["_col0","_col1","_col2","_col3","_col4"]
+                                                                        Filter Operator [FIL_282] (rows=5333432 width=135)
+                                                                          predicate:((ws_net_paid > 0) and (ws_net_profit > 1) and (ws_quantity > 0) and (ws_sold_date_sk BETWEEN DynamicValue(RS_10_date_dim_d_date_sk_min) AND DynamicValue(RS_10_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_10_date_dim_d_date_sk_bloom_filter))) and ws_item_sk is not null and ws_order_number is not null and ws_sold_date_sk is not null)
+                                                                          TableScan [TS_0] (rows=144002668 width=135)
+                                                                            default@web_sales,ws,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_order_number","ws_quantity","ws_net_paid","ws_net_profit"]
+                                                                          <-Reducer 13 [BROADCAST_EDGE] vectorized
+                                                                            BROADCAST [RS_281]
+                                                                              Group By Operator [GBY_280] (rows=1 width=12)
+                                                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                              <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                PARTITION_ONLY_SHUFFLE [RS_277]
+                                                                                  Group By Operator [GBY_274] (rows=1 width=12)
+                                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                                    Select Operator [SEL_269] (rows=18262 width=1119)
+                                                                                      Output:["_col0"]
+                                                                                       Please refer to the previous Select Operator [SEL_267]
 


[29/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
index 0000000,fb4a761..fe64a91
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
@@@ -1,0 -1,471 +1,472 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+ import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreEventContext;
+ import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+ 
+ import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertFalse;
+ import static org.junit.Assert.assertNotSame;
+ import static org.junit.Assert.assertTrue;
+ 
+ import com.google.common.collect.Lists;
+ 
+ import org.junit.experimental.categories.Category;
+ 
+ /**
+  * TestMetaStoreEventListener. Test case for
+  * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} and
+  * {@link org.apache.hadoop.hive.metastore.MetaStorePreEventListener}
+  */
+ @Category(MetastoreUnitTest.class)
+ public class TestMetaStoreEventListener {
+   private Configuration conf;
+   private HiveMetaStoreClient msc;
+ 
+   private static final String dbName = "hive2038";
+   private static final String tblName = "tmptbl";
+   private static final String renamed = "tmptbl2";
+   private static final String metaConfKey = "metastore.partition.name.whitelist.pattern";
+   private static final String metaConfVal = "";
+ 
+   @Before
+   public void setUp() throws Exception {
+     System.setProperty("hive.metastore.event.listeners",
+         DummyListener.class.getName());
+     System.setProperty("hive.metastore.pre.event.listeners",
+         DummyPreListener.class.getName());
+ 
+     conf = MetastoreConf.newMetastoreConf();
+ 
+     MetastoreConf.setVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN, metaConfVal);
+     MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+     MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+     MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
+ 
+     msc = new HiveMetaStoreClient(conf);
+ 
+     msc.dropDatabase(dbName, true, true, true);
+     DummyListener.notifyList.clear();
+     DummyPreListener.notifyList.clear();
+   }
+ 
+   private void validateCreateDb(Database expectedDb, Database actualDb) {
+     assertEquals(expectedDb.getName(), actualDb.getName());
+     assertEquals(expectedDb.getLocationUri(), actualDb.getLocationUri());
+   }
+ 
+   private void validateTable(Table expectedTable, Table actualTable) {
+     assertEquals(expectedTable.getTableName(), actualTable.getTableName());
+     assertEquals(expectedTable.getDbName(), actualTable.getDbName());
+     assertEquals(expectedTable.getSd().getLocation(), actualTable.getSd().getLocation());
+   }
+ 
+   private void validateCreateTable(Table expectedTable, Table actualTable) {
+     validateTable(expectedTable, actualTable);
+   }
+ 
+   private void validateAddPartition(Partition expectedPartition, Partition actualPartition) {
+     assertEquals(expectedPartition, actualPartition);
+   }
+ 
+   private void validateTableInAddPartition(Table expectedTable, Table actualTable) {
+     assertEquals(expectedTable, actualTable);
+   }
+ 
+   private void validatePartition(Partition expectedPartition, Partition actualPartition) {
+     assertEquals(expectedPartition.getValues(), actualPartition.getValues());
+     assertEquals(expectedPartition.getDbName(), actualPartition.getDbName());
+     assertEquals(expectedPartition.getTableName(), actualPartition.getTableName());
+   }
+ 
+   private void validateAlterPartition(Partition expectedOldPartition,
+       Partition expectedNewPartition, String actualOldPartitionDbName,
+       String actualOldPartitionTblName,List<String> actualOldPartitionValues,
+       Partition actualNewPartition) {
+     assertEquals(expectedOldPartition.getValues(), actualOldPartitionValues);
+     assertEquals(expectedOldPartition.getDbName(), actualOldPartitionDbName);
+     assertEquals(expectedOldPartition.getTableName(), actualOldPartitionTblName);
+ 
+     validatePartition(expectedNewPartition, actualNewPartition);
+   }
+ 
+   private void validateAlterTable(Table expectedOldTable, Table expectedNewTable,
+       Table actualOldTable, Table actualNewTable) {
+     validateTable(expectedOldTable, actualOldTable);
+     validateTable(expectedNewTable, actualNewTable);
+   }
+ 
+   private void validateAlterTableColumns(Table expectedOldTable, Table expectedNewTable,
+       Table actualOldTable, Table actualNewTable) {
+     validateAlterTable(expectedOldTable, expectedNewTable, actualOldTable, actualNewTable);
+ 
+     assertEquals(expectedOldTable.getSd().getCols(), actualOldTable.getSd().getCols());
+     assertEquals(expectedNewTable.getSd().getCols(), actualNewTable.getSd().getCols());
+   }
+ 
+   private void validateLoadPartitionDone(String expectedTableName,
+       Map<String,String> expectedPartitionName, String actualTableName,
+       Map<String,String> actualPartitionName) {
+     assertEquals(expectedPartitionName, actualPartitionName);
+     assertEquals(expectedTableName, actualTableName);
+   }
+ 
+   private void validateDropPartition(Iterator<Partition> expectedPartitions, Iterator<Partition> actualPartitions) {
+     while (expectedPartitions.hasNext()){
+       assertTrue(actualPartitions.hasNext());
+       validatePartition(expectedPartitions.next(), actualPartitions.next());
+     }
+     assertFalse(actualPartitions.hasNext());
+   }
+ 
+   private void validateTableInDropPartition(Table expectedTable, Table actualTable) {
+     validateTable(expectedTable, actualTable);
+   }
+ 
+   private void validateDropTable(Table expectedTable, Table actualTable) {
+     validateTable(expectedTable, actualTable);
+   }
+ 
+   private void validateDropDb(Database expectedDb, Database actualDb) {
+     assertEquals(expectedDb, actualDb);
+   }
+ 
+   @Test
+   public void testListener() throws Exception {
+     int listSize = 0;
+ 
+     List<ListenerEvent> notifyList = DummyListener.notifyList;
+     List<PreEventContext> preNotifyList = DummyPreListener.notifyList;
+     assertEquals(notifyList.size(), listSize);
+     assertEquals(preNotifyList.size(), listSize);
+ 
+     new DatabaseBuilder()
+         .setName(dbName)
+         .create(msc, conf);
+     listSize++;
+     PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1));
+     Database db = msc.getDatabase(dbName);
+     assertEquals(listSize, notifyList.size());
+     assertEquals(listSize + 1, preNotifyList.size());
+     validateCreateDb(db, preDbEvent.getDatabase());
+ 
+     CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
+     Assert.assertTrue(dbEvent.getStatus());
+     validateCreateDb(db, dbEvent.getDatabase());
+ 
+     Table table = new TableBuilder()
+         .inDb(db)
+         .setTableName(tblName)
+         .addCol("a", "string")
+         .addPartCol("b", "string")
+         .create(msc, conf);
+     PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1));
+     listSize++;
+     Table tbl = msc.getTable(dbName, tblName);
+     validateCreateTable(tbl, preTblEvent.getTable());
+     assertEquals(notifyList.size(), listSize);
+ 
+     CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
+     Assert.assertTrue(tblEvent.getStatus());
+     validateCreateTable(tbl, tblEvent.getTable());
+ 
+ 
+     new PartitionBuilder()
+         .inTable(table)
+         .addValue("2011")
+         .addToTable(msc, conf);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
+ 
+     AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+     Assert.assertTrue(partEvent.getStatus());
+     Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011");
+     Partition partAdded = partEvent.getPartitionIterator().next();
++    partAdded.setWriteId(part.getWriteId());
+     validateAddPartition(part, partAdded);
+     validateTableInAddPartition(tbl, partEvent.getTable());
+     validateAddPartition(part, prePartEvent.getPartitions().get(0));
+ 
+     // Test adding multiple partitions in a single partition-set, atomically.
+     int currentTime = (int)System.currentTimeMillis();
+     HiveMetaStoreClient hmsClient = new HiveMetaStoreClient(conf);
+     table = hmsClient.getTable(dbName, "tmptbl");
+     Partition partition1 = new Partition(Arrays.asList("20110101"), dbName, "tmptbl", currentTime,
+                                         currentTime, table.getSd(), table.getParameters());
+     Partition partition2 = new Partition(Arrays.asList("20110102"), dbName, "tmptbl", currentTime,
+                                         currentTime, table.getSd(), table.getParameters());
+     Partition partition3 = new Partition(Arrays.asList("20110103"), dbName, "tmptbl", currentTime,
+                                         currentTime, table.getSd(), table.getParameters());
+     hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3));
+     ++listSize;
+     AddPartitionEvent multiplePartitionEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+     assertEquals("Unexpected table value.", table, multiplePartitionEvent.getTable());
+     List<Partition> multiParts = Lists.newArrayList(multiplePartitionEvent.getPartitionIterator());
+     assertEquals("Unexpected number of partitions in event!", 3, multiParts.size());
+     assertEquals("Unexpected partition value.", partition1.getValues(), multiParts.get(0).getValues());
+     assertEquals("Unexpected partition value.", partition2.getValues(), multiParts.get(1).getValues());
+     assertEquals("Unexpected partition value.", partition3.getValues(), multiParts.get(2).getValues());
+ 
+     part.setLastAccessTime((int)(System.currentTimeMillis()/1000));
+     msc.alter_partition(dbName, tblName, part);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreAlterPartitionEvent preAlterPartEvent =
+         (PreAlterPartitionEvent)preNotifyList.get(preNotifyList.size() - 1);
+ 
+     //the partition did not change,
+     // so the new partition should be similar to the original partition
+     Partition origP = msc.getPartition(dbName, tblName, "b=2011");
+ 
+     AlterPartitionEvent alterPartEvent = (AlterPartitionEvent)notifyList.get(listSize - 1);
+     Assert.assertTrue(alterPartEvent.getStatus());
+     validateAlterPartition(origP, origP, alterPartEvent.getOldPartition().getDbName(),
+         alterPartEvent.getOldPartition().getTableName(),
+         alterPartEvent.getOldPartition().getValues(), alterPartEvent.getNewPartition());
+ 
+ 
+     validateAlterPartition(origP, origP, preAlterPartEvent.getDbName(),
+         preAlterPartEvent.getTableName(), preAlterPartEvent.getNewPartition().getValues(),
+         preAlterPartEvent.getNewPartition());
+ 
+     List<String> part_vals = new ArrayList<>();
+     part_vals.add("c=2012");
+     int preEventListSize;
+     preEventListSize = preNotifyList.size() + 1;
+     Partition newPart = msc.appendPartition(dbName, tblName, part_vals);
+ 
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     assertEquals(preNotifyList.size(), preEventListSize);
+ 
+     AddPartitionEvent appendPartEvent =
+         (AddPartitionEvent)(notifyList.get(listSize-1));
+     Partition partAppended = appendPartEvent.getPartitionIterator().next();
+     validateAddPartition(newPart, partAppended);
+ 
+     PreAddPartitionEvent preAppendPartEvent =
+         (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
+     validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0));
+ 
+     Table renamedTable = new Table(table);
+     renamedTable.setTableName(renamed);
+     msc.alter_table(dbName, tblName, renamedTable);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
+ 
+     renamedTable = msc.getTable(dbName, renamed);
+ 
+     AlterTableEvent alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
+     Assert.assertTrue(alterTableE.getStatus());
+     validateAlterTable(tbl, renamedTable, alterTableE.getOldTable(), alterTableE.getNewTable());
+     validateAlterTable(tbl, renamedTable, preAlterTableE.getOldTable(),
+         preAlterTableE.getNewTable());
+ 
+     //change the table name back
+     table = new Table(renamedTable);
+     table.setTableName(tblName);
+     msc.alter_table(dbName, renamed, table);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+ 
+     table = msc.getTable(dbName, tblName);
+     table.getSd().addToCols(new FieldSchema("c", "int", ""));
+     msc.alter_table(dbName, tblName, table);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
+ 
+     Table altTable = msc.getTable(dbName, tblName);
+ 
+     alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
+     Assert.assertTrue(alterTableE.getStatus());
+     validateAlterTableColumns(tbl, altTable, alterTableE.getOldTable(), alterTableE.getNewTable());
+     validateAlterTableColumns(tbl, altTable, preAlterTableE.getOldTable(),
+         preAlterTableE.getNewTable());
+ 
+     Map<String,String> kvs = new HashMap<>(1);
+     kvs.put("b", "2011");
+     msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+ 
+     LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent)notifyList.get(listSize - 1);
+     Assert.assertTrue(partMarkEvent.getStatus());
+     validateLoadPartitionDone("tmptbl", kvs, partMarkEvent.getTable().getTableName(),
+         partMarkEvent.getPartitionName());
+ 
+     PreLoadPartitionDoneEvent prePartMarkEvent =
+         (PreLoadPartitionDoneEvent)preNotifyList.get(preNotifyList.size() - 1);
+     validateLoadPartitionDone("tmptbl", kvs, prePartMarkEvent.getTableName(),
+         prePartMarkEvent.getPartitionName());
+ 
+     msc.dropPartition(dbName, tblName, Collections.singletonList("2011"));
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList
+         .size() - 1);
+ 
+     DropPartitionEvent dropPart = (DropPartitionEvent)notifyList.get(listSize - 1);
+     Assert.assertTrue(dropPart.getStatus());
+     validateDropPartition(Collections.singletonList(part).iterator(), dropPart.getPartitionIterator());
+     validateTableInDropPartition(tbl, dropPart.getTable());
+ 
+     validateDropPartition(Collections.singletonList(part).iterator(), preDropPart.getPartitionIterator());
+     validateTableInDropPartition(tbl, preDropPart.getTable());
+ 
+     msc.dropTable(dbName, tblName);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(preNotifyList.size() - 1);
+ 
+     DropTableEvent dropTbl = (DropTableEvent)notifyList.get(listSize-1);
+     Assert.assertTrue(dropTbl.getStatus());
+     validateDropTable(tbl, dropTbl.getTable());
+     validateDropTable(tbl, preDropTbl.getTable());
+ 
+     msc.dropDatabase(dbName);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(preNotifyList.size() - 1);
+ 
+     DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
+     Assert.assertTrue(dropDB.getStatus());
+     validateDropDb(db, dropDB.getDatabase());
+     validateDropDb(db, preDropDB.getDatabase());
+ 
+     msc.setMetaConf("metastore.try.direct.sql", "false");
+     ConfigChangeEvent event = (ConfigChangeEvent) notifyList.get(notifyList.size() - 1);
+     assertEquals("metastore.try.direct.sql", event.getKey());
+     assertEquals("true", event.getOldValue());
+     assertEquals("false", event.getNewValue());
+   }
+ 
+   @Test
+   public void testMetaConfNotifyListenersClosingClient() throws Exception {
+     HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+     closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+     ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     assertEquals(event.getOldValue(), metaConfVal);
+     assertEquals(event.getNewValue(), "[test pattern modified]");
+     closingClient.close();
+ 
+     Thread.sleep(2 * 1000);
+ 
+     event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     assertEquals(event.getOldValue(), "[test pattern modified]");
+     assertEquals(event.getNewValue(), metaConfVal);
+   }
+ 
+   @Test
+   public void testMetaConfNotifyListenersNonClosingClient() throws Exception {
+     HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf, null);
+     nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+     ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     assertEquals(event.getOldValue(), metaConfVal);
+     assertEquals(event.getNewValue(), "[test pattern modified]");
+     // This should also trigger meta listener notification via TServerEventHandler#deleteContext
+     nonClosingClient.getTTransport().close();
+ 
+     Thread.sleep(2 * 1000);
+ 
+     event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     assertEquals(event.getOldValue(), "[test pattern modified]");
+     assertEquals(event.getNewValue(), metaConfVal);
+   }
+ 
+   @Test
+   public void testMetaConfDuplicateNotification() throws Exception {
+     HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+     closingClient.setMetaConf(metaConfKey, metaConfVal);
+     int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+     closingClient.close();
+ 
+     Thread.sleep(2 * 1000);
+ 
+     int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+     // Setting key to same value, should not trigger configChange event during shutdown
+     assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+   }
+ 
+   @Test
+   public void testMetaConfSameHandler() throws Exception {
+     HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+     closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+     ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+     IHMSHandler beforeHandler = event.getIHMSHandler();
+     closingClient.close();
+ 
+     Thread.sleep(2 * 1000);
+     event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+     IHMSHandler afterHandler = event.getIHMSHandler();
+     // Meta-conf cleanup should trigger an event to listener
+     assertNotSame(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+     // Both the handlers should be same
+     assertEquals(beforeHandler, afterHandler);
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 0000000,833e2bd..995271a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@@ -1,0 -1,904 +1,904 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import com.codahale.metrics.Counter;
+ import com.google.common.base.Supplier;
+ import com.google.common.collect.ImmutableList;
+ import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.HiveObjectPrivilegeBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.HiveObjectRefBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PrivilegeGrantInfoBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+ import org.apache.hadoop.hive.metastore.model.MNotificationLog;
+ import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
+ import org.junit.Assert;
+ import org.junit.Assume;
+ import org.junit.Before;
+ import org.junit.Ignore;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.mockito.Mockito;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import javax.jdo.Query;
+ import java.sql.Connection;
+ import java.sql.DriverManager;
+ import java.sql.ResultSet;
+ import java.sql.SQLException;
+ import java.sql.Statement;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Set;
+ import java.util.concurrent.BrokenBarrierException;
+ import java.util.concurrent.CyclicBarrier;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.TimeUnit;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ 
+ @Category(MetastoreUnitTest.class)
+ public class TestObjectStore {
+   private ObjectStore objectStore = null;
+   private Configuration conf;
+ 
+   private static final String DB1 = "testobjectstoredb1";
+   private static final String DB2 = "testobjectstoredb2";
+   private static final String TABLE1 = "testobjectstoretable1";
+   private static final String KEY1 = "testobjectstorekey1";
+   private static final String KEY2 = "testobjectstorekey2";
+   private static final String OWNER = "testobjectstoreowner";
+   private static final String USER1 = "testobjectstoreuser1";
+   private static final String ROLE1 = "testobjectstorerole1";
+   private static final String ROLE2 = "testobjectstorerole2";
+   private static final Logger LOG = LoggerFactory.getLogger(TestObjectStore.class.getName());
+ 
+   private static final class LongSupplier implements Supplier<Long> {
+     public long value = 0;
+ 
+     @Override
+     public Long get() {
+       return value;
+     }
+   }
+ 
+   @Before
+   public void setUp() throws Exception {
+     conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+ 
+     objectStore = new ObjectStore();
+     objectStore.setConf(conf);
+     dropAllStoreObjects(objectStore);
+     HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf));
+   }
+ 
+   @Test
+   public void catalogs() throws MetaException, NoSuchObjectException {
+     final String names[] = {"cat1", "cat2"};
+     final String locations[] = {"loc1", "loc2"};
+     final String descriptions[] = {"description 1", "description 2"};
+ 
+     for (int i = 0; i < names.length; i++) {
+       Catalog cat = new CatalogBuilder()
+           .setName(names[i])
+           .setLocation(locations[i])
+           .setDescription(descriptions[i])
+           .build();
+       objectStore.createCatalog(cat);
+     }
+ 
+     List<String> fetchedNames = objectStore.getCatalogs();
+     Assert.assertEquals(3, fetchedNames.size());
+     for (int i = 0; i < names.length - 1; i++) {
+       Assert.assertEquals(names[i], fetchedNames.get(i));
+       Catalog cat = objectStore.getCatalog(fetchedNames.get(i));
+       Assert.assertEquals(names[i], cat.getName());
+       Assert.assertEquals(descriptions[i], cat.getDescription());
+       Assert.assertEquals(locations[i], cat.getLocationUri());
+     }
+     Catalog cat = objectStore.getCatalog(fetchedNames.get(2));
+     Assert.assertEquals(DEFAULT_CATALOG_NAME, cat.getName());
+     Assert.assertEquals(Warehouse.DEFAULT_CATALOG_COMMENT, cat.getDescription());
+     // Location will vary by system.
+ 
+     for (int i = 0; i < names.length; i++) objectStore.dropCatalog(names[i]);
+     fetchedNames = objectStore.getCatalogs();
+     Assert.assertEquals(1, fetchedNames.size());
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void getNoSuchCatalog() throws MetaException, NoSuchObjectException {
+     objectStore.getCatalog("no_such_catalog");
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void dropNoSuchCatalog() throws MetaException, NoSuchObjectException {
+     objectStore.dropCatalog("no_such_catalog");
+   }
+ 
+   // TODO test dropping non-empty catalog
+ 
+   /**
+    * Test database operations
+    */
+   @Test
+   public void testDatabaseOps() throws MetaException, InvalidObjectException,
+       NoSuchObjectException {
+     String catName = "tdo1_cat";
+     createTestCatalog(catName);
+     Database db1 = new Database(DB1, "description", "locationurl", null);
+     Database db2 = new Database(DB2, "description", "locationurl", null);
+     db1.setCatalogName(catName);
+     db2.setCatalogName(catName);
+     objectStore.createDatabase(db1);
+     objectStore.createDatabase(db2);
+ 
+     List<String> databases = objectStore.getAllDatabases(catName);
+     LOG.info("databases: " + databases);
+     Assert.assertEquals(2, databases.size());
+     Assert.assertEquals(DB1, databases.get(0));
+     Assert.assertEquals(DB2, databases.get(1));
+ 
+     objectStore.dropDatabase(catName, DB1);
+     databases = objectStore.getAllDatabases(catName);
+     Assert.assertEquals(1, databases.size());
+     Assert.assertEquals(DB2, databases.get(0));
+ 
+     objectStore.dropDatabase(catName, DB2);
+   }
+ 
+   /**
+    * Test table operations
+    */
+   @Test
+   public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException,
+       InvalidInputException {
+     Database db1 = new DatabaseBuilder()
+         .setName(DB1)
+         .setDescription("description")
+         .setLocation("locationurl")
+         .build(conf);
+     objectStore.createDatabase(db1);
+     StorageDescriptor sd1 =
+         new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)),
+             "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null),
+             null, null, null);
+     HashMap<String, String> params = new HashMap<>();
+     params.put("EXTERNAL", "false");
+     Table tbl1 =
+         new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE");
+     objectStore.createTable(tbl1);
+ 
+     List<String> tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(1, tables.size());
+     Assert.assertEquals(TABLE1, tables.get(0));
+ 
+     StorageDescriptor sd2 =
+         new StorageDescriptor(ImmutableList.of(new FieldSchema("fk_col", "double", null)),
+             "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null),
+             null, null, null);
+     Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null,
+         "MANAGED_TABLE");
+ 
+     // Change different fields and verify they were altered
+     newTbl1.setOwner("role1");
+     newTbl1.setOwnerType(PrincipalType.ROLE);
+ 
 -    objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1);
++    objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1, -1, null);
+     tables = objectStore.getTables(DEFAULT_CATALOG_NAME, DB1, "new*");
+     Assert.assertEquals(1, tables.size());
+     Assert.assertEquals("new" + TABLE1, tables.get(0));
+ 
+     // Verify fields were altered during the alterTable operation
+     Table alteredTable = objectStore.getTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1);
+     Assert.assertEquals("Owner of table was not altered", newTbl1.getOwner(), alteredTable.getOwner());
+     Assert.assertEquals("Owner type of table was not altered", newTbl1.getOwnerType(), alteredTable.getOwnerType());
+ 
+     objectStore.createTable(tbl1);
+     tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(2, tables.size());
+ 
+     List<SQLForeignKey> foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null);
+     Assert.assertEquals(0, foreignKeys.size());
+ 
+     SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1,
+         "pk_const_1", false, false, false);
+     pk.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPrimaryKeys(ImmutableList.of(pk));
+     SQLForeignKey fk = new SQLForeignKey(DB1, TABLE1, "pk_col",
+         DB1, "new" + TABLE1, "fk_col", 1,
+         0, 0, "fk_const_1", "pk_const_1", false, false, false);
+     objectStore.addForeignKeys(ImmutableList.of(fk));
+ 
+     // Retrieve from PK side
+     foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+     Assert.assertEquals(1, foreignKeys.size());
+ 
+     List<SQLForeignKey> fks = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+     if (fks != null) {
+       for (SQLForeignKey fkcol : fks) {
+         objectStore.dropConstraint(fkcol.getCatName(), fkcol.getFktable_db(), fkcol.getFktable_name(),
+             fkcol.getFk_name());
+       }
+     }
+     // Retrieve from FK side
+     foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null);
+     Assert.assertEquals(0, foreignKeys.size());
+     // Retrieve from PK side
+     foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+     Assert.assertEquals(0, foreignKeys.size());
+ 
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
+     tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(1, tables.size());
+ 
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1);
+     tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(0, tables.size());
+ 
+     objectStore.dropDatabase(db1.getCatalogName(), DB1);
+   }
+ 
+   private StorageDescriptor createFakeSd(String location) {
+     return new StorageDescriptor(null, location, null, null, false, 0,
+         new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
+   }
+ 
+ 
+   /**
+    * Tests partition operations
+    */
+   @Test
+   public void testPartitionOps() throws MetaException, InvalidObjectException,
+       NoSuchObjectException, InvalidInputException {
+     Database db1 = new DatabaseBuilder()
+         .setName(DB1)
+         .setDescription("description")
+         .setLocation("locationurl")
+         .build(conf);
+     objectStore.createDatabase(db1);
+     StorageDescriptor sd = createFakeSd("location");
+     HashMap<String, String> tableParams = new HashMap<>();
+     tableParams.put("EXTERNAL", "false");
+     FieldSchema partitionKey1 = new FieldSchema("Country", ColumnType.STRING_TYPE_NAME, "");
+     FieldSchema partitionKey2 = new FieldSchema("State", ColumnType.STRING_TYPE_NAME, "");
+     Table tbl1 =
+         new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2),
+             tableParams, null, null, "MANAGED_TABLE");
+     objectStore.createTable(tbl1);
+     HashMap<String, String> partitionParams = new HashMap<>();
+     partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true");
+     List<String> value1 = Arrays.asList("US", "CA");
+     Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams);
+     part1.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(part1);
+     List<String> value2 = Arrays.asList("US", "MA");
+     Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams);
+     part2.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(part2);
+ 
+     Deadline.startTimer("getPartition");
+     List<Partition> partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10);
+     Assert.assertEquals(2, partitions.size());
+     Assert.assertEquals(111, partitions.get(0).getCreateTime());
+     Assert.assertEquals(222, partitions.get(1).getCreateTime());
+ 
+     int numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "");
+     Assert.assertEquals(partitions.size(), numPartitions);
+ 
+     numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "country = \"US\"");
+     Assert.assertEquals(2, numPartitions);
+ 
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value1);
+     partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10);
+     Assert.assertEquals(1, partitions.size());
+     Assert.assertEquals(222, partitions.get(0).getCreateTime());
+ 
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value2);
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
+     objectStore.dropDatabase(db1.getCatalogName(), DB1);
+   }
+ 
+   /**
+    * Checks if the JDO cache is able to handle directSQL partition drops in one session.
+    * @throws MetaException
+    * @throws InvalidObjectException
+    * @throws NoSuchObjectException
+    * @throws SQLException
+    */
+   @Test
+   public void testDirectSQLDropPartitionsCacheInSession()
+       throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+     createPartitionedTable(false, false);
+     // query the partitions with JDO
+     Deadline.startTimer("getPartition");
+     List<Partition> partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(3, partitions.size());
+ 
+     // drop partitions with directSql
+     objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false);
+ 
+     // query the partitions with JDO, checking the cache is not causing any problem
+     partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(1, partitions.size());
+   }
+ 
+   /**
+    * Checks if the JDO cache is able to handle directSQL partition drops cross sessions.
+    * @throws MetaException
+    * @throws InvalidObjectException
+    * @throws NoSuchObjectException
+    * @throws SQLException
+    */
+   @Test
+   public void testDirectSQLDropPartitionsCacheCrossSession()
+       throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+     ObjectStore objectStore2 = new ObjectStore();
+     objectStore2.setConf(conf);
+ 
+     createPartitionedTable(false, false);
+     // query the partitions with JDO in the 1st session
+     Deadline.startTimer("getPartition");
+     List<Partition> partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(3, partitions.size());
+ 
+     // query the partitions with JDO in the 2nd session
+     partitions = objectStore2.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10,
+         false, true);
+     Assert.assertEquals(3, partitions.size());
+ 
+     // drop partitions with directSql in the 1st session
+     objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false);
+ 
+     // query the partitions with JDO in the 2nd session, checking the cache is not causing any
+     // problem
+     partitions = objectStore2.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(1, partitions.size());
+   }
+ 
+   /**
+    * Checks if the directSQL partition drop removes every connected data from the RDBMS tables.
+    * @throws MetaException
+    * @throws InvalidObjectException
+    * @throws NoSuchObjectException
+    * @throws SQLException
+    */
+   @Test
+   public void testDirectSQLDropParitionsCleanup() throws MetaException, InvalidObjectException,
+       NoSuchObjectException, SQLException, InvalidInputException {
+ 
+     createPartitionedTable(true, true);
+ 
+     // Check, that every table in the expected state before the drop
+     checkBackendTableSize("PARTITIONS", 3);
+     checkBackendTableSize("PART_PRIVS", 3);
+     checkBackendTableSize("PART_COL_PRIVS", 3);
+     checkBackendTableSize("PART_COL_STATS", 3);
+     checkBackendTableSize("PARTITION_PARAMS", 3);
+     checkBackendTableSize("PARTITION_KEY_VALS", 3);
+     checkBackendTableSize("SD_PARAMS", 3);
+     checkBackendTableSize("BUCKETING_COLS", 3);
+     checkBackendTableSize("SKEWED_COL_NAMES", 3);
+     checkBackendTableSize("SDS", 4); // Table has an SDS
+     checkBackendTableSize("SORT_COLS", 3);
+     checkBackendTableSize("SERDE_PARAMS", 3);
+     checkBackendTableSize("SERDES", 4); // Table has a serde
+ 
+     // drop the partitions
+     Deadline.startTimer("dropPartitions");
+     objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2"), true, false);
+ 
+     // Check, if every data is dropped connected to the partitions
+     checkBackendTableSize("PARTITIONS", 0);
+     checkBackendTableSize("PART_PRIVS", 0);
+     checkBackendTableSize("PART_COL_PRIVS", 0);
+     checkBackendTableSize("PART_COL_STATS", 0);
+     checkBackendTableSize("PARTITION_PARAMS", 0);
+     checkBackendTableSize("PARTITION_KEY_VALS", 0);
+     checkBackendTableSize("SD_PARAMS", 0);
+     checkBackendTableSize("BUCKETING_COLS", 0);
+     checkBackendTableSize("SKEWED_COL_NAMES", 0);
+     checkBackendTableSize("SDS", 1); // Table has an SDS
+     checkBackendTableSize("SORT_COLS", 0);
+     checkBackendTableSize("SERDE_PARAMS", 0);
+     checkBackendTableSize("SERDES", 1); // Table has a serde
+   }
+ 
+   /**
+    * Creates DB1 database, TABLE1 table with 3 partitions.
+    * @param withPrivileges Should we create privileges as well
+    * @param withStatistics Should we create statitics as well
+    * @throws MetaException
+    * @throws InvalidObjectException
+    */
+   private void createPartitionedTable(boolean withPrivileges, boolean withStatistics)
+       throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+     Database db1 = new DatabaseBuilder()
+                        .setName(DB1)
+                        .setDescription("description")
+                        .setLocation("locationurl")
+                        .build(conf);
+     objectStore.createDatabase(db1);
+     Table tbl1 =
+         new TableBuilder()
+             .setDbName(DB1)
+             .setTableName(TABLE1)
+             .addCol("test_col1", "int")
+             .addCol("test_col2", "int")
+             .addPartCol("test_part_col", "int")
+             .addCol("test_bucket_col", "int", "test bucket col comment")
+             .addCol("test_skewed_col", "int", "test skewed col comment")
+             .addCol("test_sort_col", "int", "test sort col comment")
+             .build(conf);
+     objectStore.createTable(tbl1);
+ 
+     PrivilegeBag privilegeBag = new PrivilegeBag();
+     // Create partitions for the partitioned table
+     for(int i=0; i < 3; i++) {
+       Partition part = new PartitionBuilder()
+                            .inTable(tbl1)
+                            .addValue("a" + i)
+                            .addSerdeParam("serdeParam", "serdeParamValue")
+                            .addStorageDescriptorParam("sdParam", "sdParamValue")
+                            .addBucketCol("test_bucket_col")
+                            .addSkewedColName("test_skewed_col")
+                            .addSortCol("test_sort_col", 1)
+                            .build(conf);
+       objectStore.addPartition(part);
+ 
+       if (withPrivileges) {
+         HiveObjectRef partitionReference = new HiveObjectRefBuilder().buildPartitionReference(part);
+         HiveObjectRef partitionColumnReference = new HiveObjectRefBuilder()
+             .buildPartitionColumnReference(tbl1, "test_part_col", part.getValues());
+         PrivilegeGrantInfo privilegeGrantInfo = new PrivilegeGrantInfoBuilder()
+             .setPrivilege("a")
+             .build();
+         HiveObjectPrivilege partitionPriv = new HiveObjectPrivilegeBuilder()
+                                                 .setHiveObjectRef(partitionReference)
+                                                 .setPrincipleName("a")
+                                                 .setPrincipalType(PrincipalType.USER)
+                                                 .setGrantInfo(privilegeGrantInfo)
+                                                 .build();
+         privilegeBag.addToPrivileges(partitionPriv);
+         HiveObjectPrivilege partitionColPriv = new HiveObjectPrivilegeBuilder()
+                                                    .setHiveObjectRef(partitionColumnReference)
+                                                    .setPrincipleName("a")
+                                                    .setPrincipalType(PrincipalType.USER)
+                                                    .setGrantInfo(privilegeGrantInfo)
+                                                    .build();
+         privilegeBag.addToPrivileges(partitionColPriv);
+       }
+ 
+       if (withStatistics) {
+         ColumnStatistics stats = new ColumnStatistics();
+         ColumnStatisticsDesc desc = new ColumnStatisticsDesc();
+         desc.setCatName(tbl1.getCatName());
+         desc.setDbName(tbl1.getDbName());
+         desc.setTableName(tbl1.getTableName());
+         desc.setPartName("test_part_col=a" + i);
+         stats.setStatsDesc(desc);
+ 
+         List<ColumnStatisticsObj> statsObjList = new ArrayList<>(1);
+         stats.setStatsObj(statsObjList);
+ 
+         ColumnStatisticsData data = new ColumnStatisticsData();
+         BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
+         boolStats.setNumTrues(0);
+         boolStats.setNumFalses(0);
+         boolStats.setNumNulls(0);
+         data.setBooleanStats(boolStats);
+ 
+         ColumnStatisticsObj partStats = new ColumnStatisticsObj("test_part_col", "int", data);
+         statsObjList.add(partStats);
+ 
 -        objectStore.updatePartitionColumnStatistics(stats, part.getValues());
++        objectStore.updatePartitionColumnStatistics(stats, part.getValues(), -1, null, -1);
+       }
+     }
+     if (withPrivileges) {
+       objectStore.grantPrivileges(privilegeBag);
+     }
+   }
+ 
+   /**
+    * Checks if the HMS backend db row number is as expected. If they are not, an
+    * {@link AssertionError} is thrown.
+    * @param tableName The table in which we count the rows
+    * @param size The expected row number
+    * @throws SQLException If there is a problem connecting to / querying the backend DB
+    */
+   private void checkBackendTableSize(String tableName, int size) throws SQLException {
+     String connectionStr = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY);
+     Connection conn = DriverManager.getConnection(connectionStr);
+     Statement stmt = conn.createStatement();
+ 
+     ResultSet rs = stmt.executeQuery("SELECT COUNT(1) FROM " + tableName);
+     rs.next();
+     Assert.assertEquals(tableName + " table should contain " + size + " rows", size,
+         rs.getLong(1));
+   }
+ 
+   /**
+    * Test master keys operation
+    */
+   @Test
+   public void testMasterKeyOps() throws MetaException, NoSuchObjectException {
+     int id1 = objectStore.addMasterKey(KEY1);
+     int id2 = objectStore.addMasterKey(KEY2);
+ 
+     String[] keys = objectStore.getMasterKeys();
+     Assert.assertEquals(2, keys.length);
+     Assert.assertEquals(KEY1, keys[0]);
+     Assert.assertEquals(KEY2, keys[1]);
+ 
+     objectStore.updateMasterKey(id1, "new" + KEY1);
+     objectStore.updateMasterKey(id2, "new" + KEY2);
+     keys = objectStore.getMasterKeys();
+     Assert.assertEquals(2, keys.length);
+     Assert.assertEquals("new" + KEY1, keys[0]);
+     Assert.assertEquals("new" + KEY2, keys[1]);
+ 
+     objectStore.removeMasterKey(id1);
+     keys = objectStore.getMasterKeys();
+     Assert.assertEquals(1, keys.length);
+     Assert.assertEquals("new" + KEY2, keys[0]);
+ 
+     objectStore.removeMasterKey(id2);
+   }
+ 
+   /**
+    * Test role operation
+    */
+   @Test
+   public void testRoleOps() throws InvalidObjectException, MetaException, NoSuchObjectException {
+     objectStore.addRole(ROLE1, OWNER);
+     objectStore.addRole(ROLE2, OWNER);
+     List<String> roles = objectStore.listRoleNames();
+     Assert.assertEquals(2, roles.size());
+     Assert.assertEquals(ROLE2, roles.get(1));
+     Role role1 = objectStore.getRole(ROLE1);
+     Assert.assertEquals(OWNER, role1.getOwnerName());
+     objectStore.grantRole(role1, USER1, PrincipalType.USER, OWNER, PrincipalType.ROLE, true);
+     objectStore.revokeRole(role1, USER1, PrincipalType.USER, false);
+     objectStore.removeRole(ROLE1);
+   }
+ 
+   @Test
+   public void testDirectSqlErrorMetrics() throws Exception {
+     Configuration conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true);
+     Metrics.initialize(conf);
+     MetastoreConf.setVar(conf, MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES,
+         "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
+             "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter"
+     );
+ 
+     // recall setup so that we get an object store with the metrics initalized
+     setUp();
+     Counter directSqlErrors =
+         Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS);
+ 
+     objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) {
+       @Override
+       protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
+         return null;
+       }
+ 
+       @Override
+       protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException,
+           NoSuchObjectException {
+         return null;
+       }
+     }.run(false);
+ 
+     Assert.assertEquals(0, directSqlErrors.getCount());
+ 
+     objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) {
+       @Override
+       protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
+         throw new RuntimeException();
+       }
+ 
+       @Override
+       protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException,
+           NoSuchObjectException {
+         return null;
+       }
+     }.run(false);
+ 
+     Assert.assertEquals(1, directSqlErrors.getCount());
+   }
+ 
+   private static void dropAllStoreObjects(RawStore store)
+       throws MetaException, InvalidObjectException, InvalidInputException {
+     try {
+       Deadline.registerIfNot(100000);
+       List<Function> functions = store.getAllFunctions(DEFAULT_CATALOG_NAME);
+       for (Function func : functions) {
+         store.dropFunction(DEFAULT_CATALOG_NAME, func.getDbName(), func.getFunctionName());
+       }
+       for (String catName : store.getCatalogs()) {
+         List<String> dbs = store.getAllDatabases(catName);
+         for (String db : dbs) {
+           List<String> tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db);
+           for (String tbl : tbls) {
+             Deadline.startTimer("getPartition");
+             List<Partition> parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100);
+             for (Partition part : parts) {
+               store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues());
+             }
+             // Find any constraints and drop them
+             Set<String> constraints = new HashSet<>();
+             List<SQLPrimaryKey> pk = store.getPrimaryKeys(DEFAULT_CATALOG_NAME, db, tbl);
+             if (pk != null) {
+               for (SQLPrimaryKey pkcol : pk) {
+                 constraints.add(pkcol.getPk_name());
+               }
+             }
+             List<SQLForeignKey> fks = store.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, db, tbl);
+             if (fks != null) {
+               for (SQLForeignKey fkcol : fks) {
+                 constraints.add(fkcol.getFk_name());
+               }
+             }
+             for (String constraint : constraints) {
+               store.dropConstraint(DEFAULT_CATALOG_NAME, db, tbl, constraint);
+             }
+             store.dropTable(DEFAULT_CATALOG_NAME, db, tbl);
+           }
+           store.dropDatabase(catName, db);
+         }
+         store.dropCatalog(catName);
+       }
+       List<String> roles = store.listRoleNames();
+       for (String role : roles) {
+         store.removeRole(role);
+       }
+     } catch (NoSuchObjectException e) {
+     }
+   }
+ 
+   @Test
+   public void testQueryCloseOnError() throws Exception {
+     ObjectStore spy = Mockito.spy(objectStore);
+     spy.getAllDatabases(DEFAULT_CATALOG_NAME);
+     spy.getAllFunctions(DEFAULT_CATALOG_NAME);
+     spy.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     spy.getPartitionCount();
+     Mockito.verify(spy, Mockito.times(3))
+         .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.<Query>anyObject());
+   }
+ 
+   @Test
+   public void testRetryingExecutorSleep() throws Exception {
+     RetryingExecutor re = new ObjectStore.RetryingExecutor(MetastoreConf.newMetastoreConf(), null);
+     Assert.assertTrue("invalid sleep value", re.getSleepInterval() >= 0);
+   }
+ 
+   @Ignore // See comment in ObjectStore.getDataSourceProps
+   @Test
+   public void testNonConfDatanucleusValueSet() {
+     String key = "datanucleus.no.such.key";
+     String value = "test_value";
+     String key1 = "blabla.no.such.key";
+     String value1 = "another_value";
+     Assume.assumeTrue(System.getProperty(key) == null);
+     Configuration localConf = MetastoreConf.newMetastoreConf();
+     MetaStoreTestUtils.setConfForStandloneMode(localConf);
+     localConf.set(key, value);
+     localConf.set(key1, value1);
+     objectStore = new ObjectStore();
+     objectStore.setConf(localConf);
+     Assert.assertEquals(value, objectStore.getProp().getProperty(key));
+     Assert.assertNull(objectStore.getProp().getProperty(key1));
+   }
+ 
+   /**
+    * Test notification operations
+    */
+   // TODO MS-SPLIT uncomment once we move EventMessage over
+   @Test
+   public void testNotificationOps() throws InterruptedException, MetaException {
+     final int NO_EVENT_ID = 0;
+     final int FIRST_EVENT_ID = 1;
+     final int SECOND_EVENT_ID = 2;
+ 
+     NotificationEvent event =
+         new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), "");
+     NotificationEventResponse eventResponse;
+     CurrentNotificationEventId eventId;
+ 
+     // Verify that there is no notifications available yet
+     eventId = objectStore.getCurrentNotificationEventId();
+     Assert.assertEquals(NO_EVENT_ID, eventId.getEventId());
+ 
+     // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID
+     objectStore.addNotificationEvent(event);
+     Assert.assertEquals(FIRST_EVENT_ID, event.getEventId());
+     objectStore.addNotificationEvent(event);
+     Assert.assertEquals(SECOND_EVENT_ID, event.getEventId());
+ 
+     // Verify that objectStore fetches the latest notification event ID
+     eventId = objectStore.getCurrentNotificationEventId();
+     Assert.assertEquals(SECOND_EVENT_ID, eventId.getEventId());
+ 
+     // Verify that getNextNotification() returns all events
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
+     Assert.assertEquals(2, eventResponse.getEventsSize());
+     Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
+     Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId());
+ 
+     // Verify that getNextNotification(last) returns events after a specified event
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID));
+     Assert.assertEquals(1, eventResponse.getEventsSize());
+     Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
+ 
+     // Verify that getNextNotification(last) returns zero events if there are no more notifications available
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID));
+     Assert.assertEquals(0, eventResponse.getEventsSize());
+ 
+     // Verify that cleanNotificationEvents() cleans up all old notifications
+     Thread.sleep(1);
+     objectStore.cleanNotificationEvents(1);
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
+     Assert.assertEquals(0, eventResponse.getEventsSize());
+   }
+ 
+   @Ignore(
+       "This test is here to allow testing with other databases like mysql / postgres etc\n"
+           + " with  user changes to the code. This cannot be run on apache derby because of\n"
+           + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html"
+   )
+   @Test
+   public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException, MetaException {
+ 
+     final int NUM_THREADS = 10;
+     CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS,
+         () -> LoggerFactory.getLogger("test")
+             .debug(NUM_THREADS + " threads going to add notification"));
+ 
+     Configuration conf = MetastoreConf.newMetastoreConf();
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+     /*
+        Below are the properties that need to be set based on what database this test is going to be run
+      */
+ 
+ //    conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver");
+ //    conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
+ //        "jdbc:mysql://localhost:3306/metastore_db");
+ //    conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "");
+ //    conf.setVar(HiveConf.ConfVars.METASTOREPWD, "");
+ 
+     /*
+      we have to  add this one manually as for tests the db is initialized via the metastoreDiretSQL
+      and we don't run the schema creation sql that includes the an insert for notification_sequence
+      which can be locked. the entry in notification_sequence happens via notification_event insertion.
+     */
+     objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute();
+     objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute();
+ 
+     objectStore.addNotificationEvent(
+         new NotificationEvent(0, 0,
+             EventMessage.EventType.CREATE_DATABASE.toString(),
+             "CREATE DATABASE DB initial"));
+ 
+     ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS);
+     for (int i = 0; i < NUM_THREADS; i++) {
+       final int n = i;
+ 
+       executorService.execute(
+           () -> {
+             ObjectStore store = new ObjectStore();
+             store.setConf(conf);
+ 
+             String eventType = EventMessage.EventType.CREATE_DATABASE.toString();
+             NotificationEvent dbEvent =
+                 new NotificationEvent(0, 0, eventType,
+                     "CREATE DATABASE DB" + n);
+             System.out.println("ADDING NOTIFICATION");
+ 
+             try {
+               cyclicBarrier.await();
+               store.addNotificationEvent(dbEvent);
+             } catch (InterruptedException | BrokenBarrierException | MetaException e) {
+               throw new RuntimeException(e);
+             }
+             System.out.println("FINISH NOTIFICATION");
+           });
+     }
+     executorService.shutdown();
+     Assert.assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS));
+ 
+     // we have to setup this again as the underlying PMF keeps getting reinitialized with original
+     // reference closed
+     ObjectStore store = new ObjectStore();
+     store.setConf(conf);
+ 
+     NotificationEventResponse eventResponse = store.getNextNotification(
+         new NotificationEventRequest());
+     Assert.assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize());
+     long previousId = 0;
+     for (NotificationEvent event : eventResponse.getEvents()) {
+       Assert.assertTrue("previous:" + previousId + " current:" + event.getEventId(),
+           previousId < event.getEventId());
+       Assert.assertTrue(previousId + 1 == event.getEventId());
+       previousId = event.getEventId();
+     }
+   }
+ 
+   private void createTestCatalog(String catName) throws MetaException {
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation("/tmp")
+         .build();
+     objectStore.createCatalog(cat);
+   }
+ }
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
index 0000000,717c5ee..01a8f81
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
@@@ -1,0 -1,233 +1,233 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.List;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+ import org.junit.After;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Ignore;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ 
+ @Category(MetastoreUnitTest.class)
+ public class TestOldSchema {
+   private ObjectStore store = null;
+   private Configuration conf;
+ 
+   private static final Logger LOG = LoggerFactory.getLogger(TestOldSchema.class.getName());
+ 
+   public static class MockPartitionExpressionProxy implements PartitionExpressionProxy {
+     @Override
+     public String convertExprToFilter(byte[] expr) throws MetaException {
+       return null;
+     }
+ 
+     @Override
+     public boolean filterPartitionsByExpr(List<FieldSchema> partColumns, byte[] expr,
+                                           String defaultPartitionName,
+                                           List<String> partitionNames) throws MetaException {
+       return false;
+     }
+ 
+     @Override
+     public FileMetadataExprType getMetadataType(String inputFormat) {
+       return null;
+     }
+ 
+     @Override
+     public SearchArgument createSarg(byte[] expr) {
+       return null;
+     }
+ 
+     @Override
+     public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
+       return null;
+     }
+   }
+ 
+   private byte bitVectors[][] = new byte[2][];
+ 
+   @Before
+   public void setUp() throws Exception {
+     conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.STATS_FETCH_BITVECTOR, false);
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+ 
+     store = new ObjectStore();
+     store.setConf(conf);
+     dropAllStoreObjects(store);
+     HiveMetaStore.HMSHandler.createDefaultCatalog(store, new Warehouse(conf));
+ 
+     HyperLogLog hll = HyperLogLog.builder().build();
+     hll.addLong(1);
+     bitVectors[1] = hll.serialize();
+     hll = HyperLogLog.builder().build();
+     hll.addLong(2);
+     hll.addLong(3);
+     hll.addLong(3);
+     hll.addLong(4);
+     bitVectors[0] = hll.serialize();
+   }
+ 
+   @After
+   public void tearDown() {
+   }
+ 
+   /**
+    * Tests partition operations
+    */
+   @Ignore("HIVE-19509: Disable tests that are failing continuously")
+   @Test
+   public void testPartitionOps() throws Exception {
+     String dbName = "default";
+     String tableName = "snp";
+     Database db1 = new DatabaseBuilder()
+         .setName(dbName)
+         .setDescription("description")
+         .setLocation("locationurl")
+         .build(conf);
+     store.createDatabase(db1);
+     long now = System.currentTimeMillis();
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema("col1", "long", "nocomment"));
+     SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+     StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+         serde, null, null, Collections.emptyMap());
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("ds", "string", ""));
+     Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols,
+         Collections.emptyMap(), null, null, null);
+     store.createTable(table);
+ 
+     Deadline.startTimer("getPartition");
+     for (int i = 0; i < 10; i++) {
+       List<String> partVal = new ArrayList<>();
+       partVal.add(String.valueOf(i));
+       StorageDescriptor psd = new StorageDescriptor(sd);
+       psd.setLocation("file:/tmp/default/hit/ds=" + partVal);
+       Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd,
+           Collections.emptyMap());
+       part.setCatName(DEFAULT_CATALOG_NAME);
+       store.addPartition(part);
+       ColumnStatistics cs = new ColumnStatistics();
+       ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName);
+       desc.setLastAnalyzed(now);
+       desc.setPartName("ds=" + String.valueOf(i));
+       cs.setStatsDesc(desc);
+       ColumnStatisticsObj obj = new ColumnStatisticsObj();
+       obj.setColName("col1");
+       obj.setColType("bigint");
+       ColumnStatisticsData data = new ColumnStatisticsData();
+       LongColumnStatsData dcsd = new LongColumnStatsData();
+       dcsd.setHighValue(1000 + i);
+       dcsd.setLowValue(-1000 - i);
+       dcsd.setNumNulls(i);
+       dcsd.setNumDVs(10 * i + 1);
+       dcsd.setBitVectors(bitVectors[0]);
+       data.setLongStats(dcsd);
+       obj.setStatsData(data);
+       cs.addToStatsObj(obj);
 -      store.updatePartitionColumnStatistics(cs, partVal);
++      store.updatePartitionColumnStatistics(cs, partVal, -1, null, -1);
+ 
+     }
+ 
+     Checker statChecker = new Checker() {
+       @Override
+       public void checkStats(AggrStats aggrStats) throws Exception {
+         Assert.assertEquals(10, aggrStats.getPartsFound());
+         Assert.assertEquals(1, aggrStats.getColStatsSize());
+         ColumnStatisticsObj cso = aggrStats.getColStats().get(0);
+         Assert.assertEquals("col1", cso.getColName());
+         Assert.assertEquals("bigint", cso.getColType());
+         LongColumnStatsData lcsd = cso.getStatsData().getLongStats();
+         Assert.assertEquals(1009, lcsd.getHighValue(), 0.01);
+         Assert.assertEquals(-1009, lcsd.getLowValue(), 0.01);
+         Assert.assertEquals(45, lcsd.getNumNulls());
+         Assert.assertEquals(91, lcsd.getNumDVs());
+       }
+     };
+     List<String> partNames = new ArrayList<>();
+     for (int i = 0; i < 10; i++) {
+       partNames.add("ds=" + i);
+     }
+     AggrStats aggrStats = store.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tableName, partNames,
+         Arrays.asList("col1"));
+     statChecker.checkStats(aggrStats);
+ 
+   }
+ 
+   private interface Checker {
+     void checkStats(AggrStats aggrStats) throws Exception;
+   }
+ 
+   private static void dropAllStoreObjects(RawStore store) throws MetaException,
+       InvalidObjectException, InvalidInputException {
+     try {
+       Deadline.registerIfNot(100000);
+       Deadline.startTimer("getPartition");
+       List<String> dbs = store.getAllDatabases(DEFAULT_CATALOG_NAME);
+       for (int i = 0; i < dbs.size(); i++) {
+         String db = dbs.get(i);
+         List<String> tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db);
+         for (String tbl : tbls) {
+           List<Partition> parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100);
+           for (Partition part : parts) {
+             store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues());
+           }
+           store.dropTable(DEFAULT_CATALOG_NAME, db, tbl);
+         }
+         store.dropDatabase(DEFAULT_CATALOG_NAME, db);
+       }
+     } catch (NoSuchObjectException e) {
+     }
+   }
+ 
+ }


[30/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index 0000000,cb32236..75ab80b
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@@ -1,0 -1,3103 +1,3102 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.lang.reflect.Field;
+ import java.io.IOException;
+ import java.sql.Connection;
+ import java.sql.DriverManager;
+ import java.sql.PreparedStatement;
+ import java.sql.SQLException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.LinkedHashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ import java.util.concurrent.Callable;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.Future;
+ import java.util.concurrent.TimeUnit;
+ 
+ import com.google.common.collect.Sets;
+ import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.datanucleus.api.jdo.JDOPersistenceManager;
+ import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.fs.permission.FsPermission;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.FunctionType;
+ import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.ResourceType;
+ import org.apache.hadoop.hive.metastore.api.ResourceUri;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.Type;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.thrift.TException;
+ import org.junit.Test;
+ 
+ import com.google.common.collect.Lists;
+ 
+ import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertFalse;
+ import static org.junit.Assert.assertNotNull;
+ import static org.junit.Assert.assertNull;
+ import static org.junit.Assert.assertTrue;
+ import static org.junit.Assert.fail;
+ 
+ public abstract class TestHiveMetaStore {
+   private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStore.class);
+   protected static HiveMetaStoreClient client;
+   protected static Configuration conf = MetastoreConf.newMetastoreConf();
+   protected static Warehouse warehouse;
+   protected static boolean isThriftClient = false;
+ 
+   private static final String TEST_DB1_NAME = "testdb1";
+   private static final String TEST_DB2_NAME = "testdb2";
+ 
+   private static final int DEFAULT_LIMIT_PARTITION_REQUEST = 100;
+ 
+   protected abstract HiveMetaStoreClient createClient() throws Exception;
+ 
+   @Before
+   public void setUp() throws Exception {
+     warehouse = new Warehouse(conf);
+ 
+     // set some values to use for getting conf. vars
+     MetastoreConf.setBoolVar(conf, ConfVars.METRICS_ENABLED, true);
+     conf.set("hive.key1", "value1");
+     conf.set("hive.key2", "http://www.example.com");
+     conf.set("hive.key3", "");
+     conf.set("hive.key4", "0");
+     conf.set("datanucleus.autoCreateTables", "false");
+ 
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+     MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2);
+     MetastoreConf.setLongVar(conf, ConfVars.LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST);
+     MetastoreConf.setVar(conf, ConfVars.STORAGE_SCHEMA_READER_IMPL, "no.such.class");
+   }
+ 
+   @Test
+   public void testNameMethods() {
+     Map<String, String> spec = new LinkedHashMap<>();
+     spec.put("ds", "2008-07-01 14:13:12");
+     spec.put("hr", "14");
+     List<String> vals = new ArrayList<>();
+     vals.addAll(spec.values());
+     String partName = "ds=2008-07-01 14%3A13%3A12/hr=14";
+ 
+     try {
+       List<String> testVals = client.partitionNameToVals(partName);
+       assertTrue("Values from name are incorrect", vals.equals(testVals));
+ 
+       Map<String, String> testSpec = client.partitionNameToSpec(partName);
+       assertTrue("Spec from name is incorrect", spec.equals(testSpec));
+ 
+       List<String> emptyVals = client.partitionNameToVals("");
+       assertTrue("Values should be empty", emptyVals.size() == 0);
+ 
+       Map<String, String> emptySpec =  client.partitionNameToSpec("");
+       assertTrue("Spec should be empty", emptySpec.size() == 0);
+     } catch (Exception e) {
+       fail();
+     }
+   }
+ 
+   /**
+    * tests create table and partition and tries to drop the table without
+    * droppping the partition
+    *
+    */
+   @Test
+   public void testPartition() throws Exception {
+     partitionTester(client, conf);
+   }
+ 
+   private static void partitionTester(HiveMetaStoreClient client, Configuration conf)
+     throws Exception {
+     try {
+       String dbName = "compdb";
+       String tblName = "comptbl";
+       String typeName = "Person";
+       List<String> vals = makeVals("2008-07-01 14:13:12", "14");
+       List<String> vals2 = makeVals("2008-07-01 14:13:12", "15");
+       List<String> vals3 = makeVals("2008-07-02 14:13:12", "15");
+       List<String> vals4 = makeVals("2008-07-03 14:13:12", "151");
+ 
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+       new DatabaseBuilder()
+           .setName(dbName)
+           .create(client, conf);
+       Database db = client.getDatabase(dbName);
+       Path dbPath = new Path(db.getLocationUri());
+       FileSystem fs = FileSystem.get(dbPath.toUri(), conf);
+ 
+       client.dropType(typeName);
+       Type typ1 = new Type();
+       typ1.setName(typeName);
+       typ1.setFields(new ArrayList<>(2));
+       typ1.getFields().add(
+           new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       typ1.getFields().add(
+           new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+       client.createType(typ1);
+ 
+       List<String> skewedColValue = Collections.singletonList("1");
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName)
+           .setCols(typ1.getFields())
+           .setNumBuckets(1)
+           .addBucketCol("name")
+           .addTableParam("test_param_1", "Use this for comments etc")
+           .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1")
+           .addSkewedColName("name")
+           .setSkewedColValues(Collections.singletonList(skewedColValue))
+           .setSkewedColValueLocationMaps(Collections.singletonMap(skewedColValue, "location1"))
+           .addPartCol("ds", ColumnType.STRING_TYPE_NAME)
+           .addPartCol("hr", ColumnType.STRING_TYPE_NAME)
+           .create(client, conf);
+ 
+       if (isThriftClient) {
+         // the createTable() above does not update the location in the 'tbl'
+         // object when the client is a thrift client and the code below relies
+         // on the location being present in the 'tbl' object - so get the table
+         // from the metastore
+         tbl = client.getTable(dbName, tblName);
+       }
+ 
+       Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
+       Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
+       Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
+       Partition part4 = makePartitionObject(dbName, tblName, vals4, tbl, "/part4");
+ 
+       // check if the partition exists (it shouldn't)
+       boolean exceptionThrown = false;
+       try {
+         Partition p = client.getPartition(dbName, tblName, vals);
+       } catch(Exception e) {
+         assertEquals("partition should not have existed",
+             NoSuchObjectException.class, e.getClass());
+         exceptionThrown = true;
+       }
+       assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
+       Partition retp = client.add_partition(part);
+       assertNotNull("Unable to create partition " + part, retp);
+       Partition retp2 = client.add_partition(part2);
+       assertNotNull("Unable to create partition " + part2, retp2);
+       Partition retp3 = client.add_partition(part3);
+       assertNotNull("Unable to create partition " + part3, retp3);
+       Partition retp4 = client.add_partition(part4);
+       assertNotNull("Unable to create partition " + part4, retp4);
+ 
+       Partition part_get = client.getPartition(dbName, tblName, part.getValues());
 -      if(isThriftClient) {
 -        // since we are using thrift, 'part' will not have the create time and
 -        // last DDL time set since it does not get updated in the add_partition()
 -        // call - likewise part2 and part3 - set it correctly so that equals check
 -        // doesn't fail
 -        adjust(client, part, dbName, tblName);
 -        adjust(client, part2, dbName, tblName);
 -        adjust(client, part3, dbName, tblName);
 -      }
++      // since we are using thrift, 'part' will not have the create time and
++      // last DDL time set since it does not get updated in the add_partition()
++      // call - likewise part2 and part3 - set it correctly so that equals check
++      // doesn't fail
++      adjust(client, part, dbName, tblName, isThriftClient);
++      adjust(client, part2, dbName, tblName, isThriftClient);
++      adjust(client, part3, dbName, tblName, isThriftClient);
+       assertTrue("Partitions are not same", part.equals(part_get));
+ 
+       // check null cols schemas for a partition
+       List<String> vals6 = makeVals("2016-02-22 00:00:00", "16");
+       Partition part6 = makePartitionObject(dbName, tblName, vals6, tbl, "/part5");
+       part6.getSd().setCols(null);
+       LOG.info("Creating partition will null field schema");
+       client.add_partition(part6);
+       LOG.info("Listing all partitions for table " + dbName + "." + tblName);
+       final List<Partition> partitions = client.listPartitions(dbName, tblName, (short) -1);
+       boolean foundPart = false;
+       for (Partition p : partitions) {
+         if (p.getValues().equals(vals6)) {
+           assertNull(p.getSd().getCols());
+           LOG.info("Found partition " + p + " having null field schema");
+           foundPart = true;
+         }
+       }
+       assertTrue(foundPart);
+ 
+       String partName = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=14";
+       String part2Name = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=15";
+       String part3Name = "ds=" + FileUtils.escapePathName("2008-07-02 14:13:12") + "/hr=15";
+       String part4Name = "ds=" + FileUtils.escapePathName("2008-07-03 14:13:12") + "/hr=151";
+ 
+       part_get = client.getPartition(dbName, tblName, partName);
+       assertTrue("Partitions are not the same", part.equals(part_get));
+ 
+       // Test partition listing with a partial spec - ds is specified but hr is not
+       List<String> partialVals = new ArrayList<>();
+       partialVals.add(vals.get(0));
+       Set<Partition> parts = new HashSet<>();
+       parts.add(part);
+       parts.add(part2);
+ 
+       List<Partition> partial = client.listPartitions(dbName, tblName, partialVals,
+           (short) -1);
+       assertTrue("Should have returned 2 partitions", partial.size() == 2);
+       assertTrue("Not all parts returned", partial.containsAll(parts));
+ 
+       Set<String> partNames = new HashSet<>();
+       partNames.add(partName);
+       partNames.add(part2Name);
+       List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals,
+           (short) -1);
+       assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
+       assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+ 
+       partNames.add(part3Name);
+       partNames.add(part4Name);
+       partialVals.clear();
+       partialVals.add("");
+       partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
+       assertTrue("Should have returned 5 partition names", partialNames.size() == 5);
+       assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+ 
+       // Test partition listing with a partial spec - hr is specified but ds is not
+       parts.clear();
+       parts.add(part2);
+       parts.add(part3);
+ 
+       partialVals.clear();
+       partialVals.add("");
+       partialVals.add(vals2.get(1));
+ 
+       partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
+       assertEquals("Should have returned 2 partitions", 2, partial.size());
+       assertTrue("Not all parts returned", partial.containsAll(parts));
+ 
+       partNames.clear();
+       partNames.add(part2Name);
+       partNames.add(part3Name);
+       partialNames = client.listPartitionNames(dbName, tblName, partialVals,
+           (short) -1);
+       assertEquals("Should have returned 2 partition names", 2, partialNames.size());
+       assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+ 
+       // Verify escaped partition names don't return partitions
+       exceptionThrown = false;
+       try {
+         String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
+         client.getPartition(dbName, tblName, badPartName);
+       } catch(NoSuchObjectException e) {
+         exceptionThrown = true;
+       }
+       assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
+ 
+       Path partPath = new Path(part.getSd().getLocation());
+ 
+ 
+       assertTrue(fs.exists(partPath));
+       client.dropPartition(dbName, tblName, part.getValues(), true);
+       assertFalse(fs.exists(partPath));
+ 
+       // Test append_partition_by_name
+       client.appendPartition(dbName, tblName, partName);
+       Partition part5 = client.getPartition(dbName, tblName, part.getValues());
+       assertTrue("Append partition by name failed", part5.getValues().equals(vals));
+       Path part5Path = new Path(part5.getSd().getLocation());
+       assertTrue(fs.exists(part5Path));
+ 
+       // Test drop_partition_by_name
+       assertTrue("Drop partition by name failed",
+           client.dropPartition(dbName, tblName, partName, true));
+       assertFalse(fs.exists(part5Path));
+ 
+       // add the partition again so that drop table with a partition can be
+       // tested
+       retp = client.add_partition(part);
+       assertNotNull("Unable to create partition " + part, retp);
+ 
+       // test add_partitions
+ 
+       List<String> mvals1 = makeVals("2008-07-04 14:13:12", "14641");
+       List<String> mvals2 = makeVals("2008-07-04 14:13:12", "14642");
+       List<String> mvals3 = makeVals("2008-07-04 14:13:12", "14643");
+       List<String> mvals4 = makeVals("2008-07-04 14:13:12", "14643"); // equal to 3
+       List<String> mvals5 = makeVals("2008-07-04 14:13:12", "14645");
+ 
+       Exception savedException;
+ 
+       // add_partitions(empty list) : ok, normal operation
+       client.add_partitions(new ArrayList<>());
+ 
+       // add_partitions(1,2,3) : ok, normal operation
+       Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1");
+       Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2");
+       Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
+       client.add_partitions(Arrays.asList(mpart1,mpart2,mpart3));
+ 
 -      if(isThriftClient) {
 -        // do DDL time munging if thrift mode
 -        adjust(client, mpart1, dbName, tblName);
 -        adjust(client, mpart2, dbName, tblName);
 -        adjust(client, mpart3, dbName, tblName);
 -      }
++      // do DDL time munging if thrift mode
++      adjust(client, mpart1, dbName, tblName, isThriftClient);
++      adjust(client, mpart2, dbName, tblName, isThriftClient);
++      adjust(client, mpart3, dbName, tblName, isThriftClient);
+       verifyPartitionsPublished(client, dbName, tblName,
+           Arrays.asList(mvals1.get(0)),
+           Arrays.asList(mpart1,mpart2,mpart3));
+ 
+       Partition mpart4 = makePartitionObject(dbName, tblName, mvals4, tbl, "/mpart4");
+       Partition mpart5 = makePartitionObject(dbName, tblName, mvals5, tbl, "/mpart5");
+ 
+       // create dir for /mpart5
+       Path mp5Path = new Path(mpart5.getSd().getLocation());
+       warehouse.mkdirs(mp5Path);
+       assertTrue(fs.exists(mp5Path));
+ 
+       // add_partitions(5,4) : err = duplicate keyvals on mpart4
+       savedException = null;
+       try {
+         client.add_partitions(Arrays.asList(mpart5,mpart4));
+       } catch (Exception e) {
+         savedException = e;
+       } finally {
+         assertNotNull(savedException);
+       }
+ 
+       // check that /mpart4 does not exist, but /mpart5 still does.
+       assertTrue(fs.exists(mp5Path));
+       assertFalse(fs.exists(new Path(mpart4.getSd().getLocation())));
+ 
+       // add_partitions(5) : ok
+       client.add_partitions(Arrays.asList(mpart5));
+ 
 -      if(isThriftClient) {
 -        // do DDL time munging if thrift mode
 -        adjust(client, mpart5, dbName, tblName);
 -      }
++      // do DDL time munging if thrift mode
++      adjust(client, mpart5, dbName, tblName, isThriftClient);
+ 
+       verifyPartitionsPublished(client, dbName, tblName,
+           Arrays.asList(mvals1.get(0)),
+           Arrays.asList(mpart1,mpart2,mpart3,mpart5));
+ 
+       //// end add_partitions tests
+ 
+       client.dropTable(dbName, tblName);
+ 
+       client.dropType(typeName);
+ 
+       // recreate table as external, drop partition and it should
+       // still exist
+       tbl.setParameters(new HashMap<>());
+       tbl.getParameters().put("EXTERNAL", "TRUE");
+       client.createTable(tbl);
+       retp = client.add_partition(part);
+       assertTrue(fs.exists(partPath));
+       client.dropPartition(dbName, tblName, part.getValues(), true);
+       assertTrue(fs.exists(partPath));
+ 
+       for (String tableName : client.getTables(dbName, "*")) {
+         client.dropTable(dbName, tableName);
+       }
+ 
+       client.dropDatabase(dbName);
+ 
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testPartition() failed.");
+       throw e;
+     }
+   }
+ 
+   private static void verifyPartitionsPublished(HiveMetaStoreClient client,
+       String dbName, String tblName, List<String> partialSpec,
+       List<Partition> expectedPartitions) throws TException {
+     // Test partition listing with a partial spec
+ 
+     List<Partition> mpartial = client.listPartitions(dbName, tblName, partialSpec,
+         (short) -1);
+     assertEquals("Should have returned "+expectedPartitions.size()+
+         " partitions, returned " + mpartial.size(),
+         expectedPartitions.size(), mpartial.size());
+     assertTrue("Not all parts returned", mpartial.containsAll(expectedPartitions));
+   }
+ 
+   private static List<String> makeVals(String ds, String id) {
+     List <String> vals4 = new ArrayList<>(2);
+     vals4 = new ArrayList<>(2);
+     vals4.add(ds);
+     vals4.add(id);
+     return vals4;
+   }
+ 
+   private static Partition makePartitionObject(String dbName, String tblName,
+       List<String> ptnVals, Table tbl, String ptnLocationSuffix) throws MetaException {
+     Partition part4 = new Partition();
+     part4.setDbName(dbName);
+     part4.setTableName(tblName);
+     part4.setValues(ptnVals);
+     part4.setParameters(new HashMap<>());
+     part4.setSd(tbl.getSd().deepCopy());
+     part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy());
+     part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix);
+     MetaStoreUtils.updatePartitionStatsFast(part4, tbl, warehouse, false, false, null, true);
+     return part4;
+   }
+ 
+   @Test
+   public void testListPartitions() throws Throwable {
+     // create a table with multiple partitions
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+     List<List<String>> values = new ArrayList<>();
+     values.add(makeVals("2008-07-01 14:13:12", "14"));
+     values.add(makeVals("2008-07-01 14:13:12", "15"));
+     values.add(makeVals("2008-07-02 14:13:12", "15"));
+     values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+     List<Partition> partitions = client.listPartitions(dbName, tblName, (short)-1);
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() +
+       " partitions", values.size(), partitions.size());
+ 
+     partitions = client.listPartitions(dbName, tblName, (short)(values.size()/2));
+ 
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() / 2 +
+       " partitions",values.size() / 2, partitions.size());
+ 
+ 
+     partitions = client.listPartitions(dbName, tblName, (short) (values.size() * 2));
+ 
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() +
+       " partitions",values.size(), partitions.size());
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+   }
+ 
+   @Test
+   public void testListPartitionsWihtLimitEnabled() throws Throwable {
+     // create a table with multiple partitions
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+     // Create too many partitions, just enough to validate over limit requests
+     List<List<String>> values = new ArrayList<>();
+     for (int i=0; i<DEFAULT_LIMIT_PARTITION_REQUEST + 1; i++) {
+       values.add(makeVals("2008-07-01 14:13:12", Integer.toString(i)));
+     }
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+     List<Partition> partitions;
+     short maxParts;
+ 
+     // Requesting more partitions than allowed should throw an exception
+     try {
+       maxParts = -1;
+       partitions = client.listPartitions(dbName, tblName, maxParts);
+       fail("should have thrown MetaException about partition limit");
+     } catch (MetaException e) {
+       assertTrue(true);
+     }
+ 
+     // Requesting more partitions than allowed should throw an exception
+     try {
+       maxParts = DEFAULT_LIMIT_PARTITION_REQUEST + 1;
+       partitions = client.listPartitions(dbName, tblName, maxParts);
+       fail("should have thrown MetaException about partition limit");
+     } catch (MetaException e) {
+       assertTrue(true);
+     }
+ 
+     // Requesting less partitions than allowed should work
+     maxParts = DEFAULT_LIMIT_PARTITION_REQUEST / 2;
+     partitions = client.listPartitions(dbName, tblName, maxParts);
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned 50 partitions", maxParts, partitions.size());
+   }
+ 
+   @Test
+   public void testAlterTableCascade() throws Throwable {
+     // create a table with multiple partitions
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+     List<List<String>> values = new ArrayList<>();
+     values.add(makeVals("2008-07-01 14:13:12", "14"));
+     values.add(makeVals("2008-07-01 14:13:12", "15"));
+     values.add(makeVals("2008-07-02 14:13:12", "15"));
+     values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+     Table tbl = client.getTable(dbName, tblName);
+     List<FieldSchema> cols = tbl.getSd().getCols();
+     cols.add(new FieldSchema("new_col", ColumnType.STRING_TYPE_NAME, ""));
+     tbl.getSd().setCols(cols);
+     //add new column with cascade option
+     client.alter_table(dbName, tblName, tbl, true);
+     //
+     Table tbl2 = client.getTable(dbName, tblName);
+     assertEquals("Unexpected number of cols", 3, tbl2.getSd().getCols().size());
+     assertEquals("Unexpected column name", "new_col", tbl2.getSd().getCols().get(2).getName());
+     //get a partition
+     List<String> pvalues = new ArrayList<>(2);
+     pvalues.add("2008-07-01 14:13:12");
+     pvalues.add("14");
+     Partition partition = client.getPartition(dbName, tblName, pvalues);
+     assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size());
+     assertEquals("Unexpected column name", "new_col", partition.getSd().getCols().get(2).getName());
+ 
+     //add another column
+     cols = tbl.getSd().getCols();
+     cols.add(new FieldSchema("new_col2", ColumnType.STRING_TYPE_NAME, ""));
+     tbl.getSd().setCols(cols);
+     //add new column with no cascade option
+     client.alter_table(dbName, tblName, tbl, false);
+     tbl2 = client.getTable(dbName, tblName);
+     assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size());
+     assertEquals("Unexpected column name", "new_col2", tbl2.getSd().getCols().get(3).getName());
+     //get partition, this partition should not have the newly added column since cascade option
+     //was false
+     partition = client.getPartition(dbName, tblName, pvalues);
+     assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size());
+   }
+ 
+ 
+   @Test
+   public void testListPartitionNames() throws Throwable {
+     // create a table with multiple partitions
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+     List<List<String>> values = new ArrayList<>();
+     values.add(makeVals("2008-07-01 14:13:12", "14"));
+     values.add(makeVals("2008-07-01 14:13:12", "15"));
+     values.add(makeVals("2008-07-02 14:13:12", "15"));
+     values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+ 
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+     List<String> partitions = client.listPartitionNames(dbName, tblName, (short)-1);
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() +
+       " partitions", values.size(), partitions.size());
+ 
+     partitions = client.listPartitionNames(dbName, tblName, (short)(values.size()/2));
+ 
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() / 2 +
+       " partitions",values.size() / 2, partitions.size());
+ 
+ 
+     partitions = client.listPartitionNames(dbName, tblName, (short) (values.size() * 2));
+ 
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() +
+       " partitions",values.size(), partitions.size());
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+   }
+ 
+ 
+   @Test
+   public void testDropTable() throws Throwable {
+     // create a table with multiple partitions
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+     List<List<String>> values = new ArrayList<>();
+     values.add(makeVals("2008-07-01 14:13:12", "14"));
+     values.add(makeVals("2008-07-01 14:13:12", "15"));
+     values.add(makeVals("2008-07-02 14:13:12", "15"));
+     values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+     client.dropTable(dbName, tblName);
+     client.dropType(typeName);
+ 
+     boolean exceptionThrown = false;
+     try {
+       client.getTable(dbName, tblName);
+     } catch(Exception e) {
+       assertEquals("table should not have existed",
+           NoSuchObjectException.class, e.getClass());
+       exceptionThrown = true;
+     }
+     assertTrue("Table " + tblName + " should have been dropped ", exceptionThrown);
+ 
+   }
+ 
+   @Test
+   public void testAlterViewParititon() throws Throwable {
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String viewName = "compView";
+ 
+     client.dropTable(dbName, tblName);
+     silentDropDatabase(dbName);
+     new DatabaseBuilder()
+         .setName(dbName)
+         .setDescription("Alter Partition Test database")
+         .create(client, conf);
+ 
+     Table tbl = new TableBuilder()
+         .setDbName(dbName)
+         .setTableName(tblName)
+         .addCol("name", ColumnType.STRING_TYPE_NAME)
+         .addCol("income", ColumnType.INT_TYPE_NAME)
+         .create(client, conf);
+ 
+     if (isThriftClient) {
+       // the createTable() above does not update the location in the 'tbl'
+       // object when the client is a thrift client and the code below relies
+       // on the location being present in the 'tbl' object - so get the table
+       // from the metastore
+       tbl = client.getTable(dbName, tblName);
+     }
+ 
+     ArrayList<FieldSchema> viewCols = new ArrayList<>(1);
+     viewCols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ 
+     ArrayList<FieldSchema> viewPartitionCols = new ArrayList<>(1);
+     viewPartitionCols.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ 
+     Table view = new Table();
+     view.setDbName(dbName);
+     view.setTableName(viewName);
+     view.setTableType(TableType.VIRTUAL_VIEW.name());
+     view.setPartitionKeys(viewPartitionCols);
+     view.setViewOriginalText("SELECT income, name FROM " + tblName);
+     view.setViewExpandedText("SELECT `" + tblName + "`.`income`, `" + tblName +
+         "`.`name` FROM `" + dbName + "`.`" + tblName + "`");
+     view.setRewriteEnabled(false);
+     StorageDescriptor viewSd = new StorageDescriptor();
+     view.setSd(viewSd);
+     viewSd.setCols(viewCols);
+     viewSd.setCompressed(false);
+     viewSd.setParameters(new HashMap<>());
+     viewSd.setSerdeInfo(new SerDeInfo());
+     viewSd.getSerdeInfo().setParameters(new HashMap<>());
+ 
+     client.createTable(view);
+ 
+     if (isThriftClient) {
+       // the createTable() above does not update the location in the 'tbl'
+       // object when the client is a thrift client and the code below relies
+       // on the location being present in the 'tbl' object - so get the table
+       // from the metastore
+       view = client.getTable(dbName, viewName);
+     }
+ 
+     List<String> vals = new ArrayList<>(1);
+     vals.add("abc");
+ 
+     Partition part = new Partition();
+     part.setDbName(dbName);
+     part.setTableName(viewName);
+     part.setValues(vals);
+     part.setParameters(new HashMap<>());
+ 
+     client.add_partition(part);
+ 
+     Partition part2 = client.getPartition(dbName, viewName, part.getValues());
+ 
+     part2.getParameters().put("a", "b");
+ 
+     client.alter_partition(dbName, viewName, part2, null);
+ 
+     Partition part3 = client.getPartition(dbName, viewName, part.getValues());
+     assertEquals("couldn't view alter partition", part3.getParameters().get(
+         "a"), "b");
+ 
+     client.dropTable(dbName, viewName);
+ 
+     client.dropTable(dbName, tblName);
+ 
+     client.dropDatabase(dbName);
+   }
+ 
+   @Test
+   public void testAlterPartition() throws Throwable {
+ 
+     try {
+       String dbName = "compdb";
+       String tblName = "comptbl";
+       List<String> vals = new ArrayList<>(2);
+       vals.add("2008-07-01");
+       vals.add("14");
+ 
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+       new DatabaseBuilder()
+           .setName(dbName)
+           .setDescription("Alter Partition Test database")
+           .create(client, conf);
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName)
+           .addCol("name", ColumnType.STRING_TYPE_NAME)
+           .addCol("income", ColumnType.INT_TYPE_NAME)
+           .addTableParam("test_param_1", "Use this for comments etc")
+           .addBucketCol("name")
+           .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1")
+           .addPartCol("ds", ColumnType.STRING_TYPE_NAME)
+           .addPartCol("hr", ColumnType.INT_TYPE_NAME)
+           .create(client, conf);
+ 
+       if (isThriftClient) {
+         // the createTable() above does not update the location in the 'tbl'
+         // object when the client is a thrift client and the code below relies
+         // on the location being present in the 'tbl' object - so get the table
+         // from the metastore
+         tbl = client.getTable(dbName, tblName);
+       }
+ 
+       Partition part = new Partition();
+       part.setDbName(dbName);
+       part.setTableName(tblName);
+       part.setValues(vals);
+       part.setParameters(new HashMap<>());
+       part.setSd(tbl.getSd());
+       part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+       part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
+ 
+       client.add_partition(part);
+ 
+       Partition part2 = client.getPartition(dbName, tblName, part.getValues());
+ 
+       part2.getParameters().put("retention", "10");
+       part2.getSd().setNumBuckets(12);
+       part2.getSd().getSerdeInfo().getParameters().put("abc", "1");
+       client.alter_partition(dbName, tblName, part2, null);
+ 
+       Partition part3 = client.getPartition(dbName, tblName, part.getValues());
+       assertEquals("couldn't alter partition", part3.getParameters().get(
+           "retention"), "10");
+       assertEquals("couldn't alter partition", part3.getSd().getSerdeInfo()
+           .getParameters().get("abc"), "1");
+       assertEquals("couldn't alter partition", part3.getSd().getNumBuckets(),
+           12);
+ 
+       client.dropTable(dbName, tblName);
+ 
+       client.dropDatabase(dbName);
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testPartition() failed.");
+       throw e;
+     }
+   }
+ 
+   @Test
+   public void testRenamePartition() throws Throwable {
+ 
+     try {
+       String dbName = "compdb1";
+       String tblName = "comptbl1";
+       List<String> vals = new ArrayList<>(2);
+       vals.add("2011-07-11");
+       vals.add("8");
+       String part_path = "/ds=2011-07-11/hr=8";
+       List<String> tmp_vals = new ArrayList<>(2);
+       tmp_vals.add("tmp_2011-07-11");
+       tmp_vals.add("-8");
+       String part2_path = "/ds=tmp_2011-07-11/hr=-8";
+ 
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+       new DatabaseBuilder()
+           .setName(dbName)
+           .setDescription("Rename Partition Test database")
+           .create(client, conf);
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName)
+           .addCol("name", ColumnType.STRING_TYPE_NAME)
+           .addCol("income", ColumnType.INT_TYPE_NAME)
+           .addPartCol("ds", ColumnType.STRING_TYPE_NAME)
+           .addPartCol("hr", ColumnType.INT_TYPE_NAME)
+           .create(client, conf);
+ 
+       if (isThriftClient) {
+         // the createTable() above does not update the location in the 'tbl'
+         // object when the client is a thrift client and the code below relies
+         // on the location being present in the 'tbl' object - so get the table
+         // from the metastore
+         tbl = client.getTable(dbName, tblName);
+       }
+ 
+       Partition part = new Partition();
+       part.setDbName(dbName);
+       part.setTableName(tblName);
+       part.setValues(vals);
+       part.setParameters(new HashMap<>());
+       part.setSd(tbl.getSd().deepCopy());
+       part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+       part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
+       part.getParameters().put("retention", "10");
+       part.getSd().setNumBuckets(12);
+       part.getSd().getSerdeInfo().getParameters().put("abc", "1");
+ 
+       client.add_partition(part);
+ 
+       part.setValues(tmp_vals);
+       client.renamePartition(dbName, tblName, vals, part);
+ 
+       boolean exceptionThrown = false;
+       try {
+         Partition p = client.getPartition(dbName, tblName, vals);
+       } catch(Exception e) {
+         assertEquals("partition should not have existed",
+             NoSuchObjectException.class, e.getClass());
+         exceptionThrown = true;
+       }
+       assertTrue("Expected NoSuchObjectException", exceptionThrown);
+ 
+       Partition part3 = client.getPartition(dbName, tblName, tmp_vals);
+       assertEquals("couldn't rename partition", part3.getParameters().get(
+           "retention"), "10");
+       assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo()
+           .getParameters().get("abc"), "1");
+       assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(),
+           12);
+       assertEquals("new partition sd matches", part3.getSd().getLocation(),
+           tbl.getSd().getLocation() + part2_path);
+ 
+       part.setValues(vals);
+       client.renamePartition(dbName, tblName, tmp_vals, part);
+ 
+       exceptionThrown = false;
+       try {
+         Partition p = client.getPartition(dbName, tblName, tmp_vals);
+       } catch(Exception e) {
+         assertEquals("partition should not have existed",
+             NoSuchObjectException.class, e.getClass());
+         exceptionThrown = true;
+       }
+       assertTrue("Expected NoSuchObjectException", exceptionThrown);
+ 
+       part3 = client.getPartition(dbName, tblName, vals);
+       assertEquals("couldn't rename partition", part3.getParameters().get(
+           "retention"), "10");
+       assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo()
+           .getParameters().get("abc"), "1");
+       assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(),
+           12);
+       assertEquals("new partition sd matches", part3.getSd().getLocation(),
+           tbl.getSd().getLocation() + part_path);
+ 
+       client.dropTable(dbName, tblName);
+ 
+       client.dropDatabase(dbName);
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testRenamePartition() failed.");
+       throw e;
+     }
+   }
+ 
+   @Test
+   public void testDatabase() throws Throwable {
+     try {
+       // clear up any existing databases
+       silentDropDatabase(TEST_DB1_NAME);
+       silentDropDatabase(TEST_DB2_NAME);
+ 
+       Database db = new DatabaseBuilder()
+           .setName(TEST_DB1_NAME)
+           .setOwnerName(SecurityUtils.getUser())
+           .build(conf);
+       Assert.assertEquals(SecurityUtils.getUser(), db.getOwnerName());
+       client.createDatabase(db);
+ 
+       db = client.getDatabase(TEST_DB1_NAME);
+ 
+       assertEquals("name of returned db is different from that of inserted db",
+           TEST_DB1_NAME, db.getName());
+       assertEquals("location of the returned db is different from that of inserted db",
+           warehouse.getDatabasePath(db).toString(), db.getLocationUri());
+       assertEquals(db.getOwnerName(), SecurityUtils.getUser());
+       assertEquals(db.getOwnerType(), PrincipalType.USER);
+       assertEquals(Warehouse.DEFAULT_CATALOG_NAME, db.getCatalogName());
+       Database db2 = new DatabaseBuilder()
+           .setName(TEST_DB2_NAME)
+           .create(client, conf);
+ 
+       db2 = client.getDatabase(TEST_DB2_NAME);
+ 
+       assertEquals("name of returned db is different from that of inserted db",
+           TEST_DB2_NAME, db2.getName());
+       assertEquals("location of the returned db is different from that of inserted db",
+           warehouse.getDatabasePath(db2).toString(), db2.getLocationUri());
+ 
+       List<String> dbs = client.getDatabases(".*");
+ 
+       assertTrue("first database is not " + TEST_DB1_NAME, dbs.contains(TEST_DB1_NAME));
+       assertTrue("second database is not " + TEST_DB2_NAME, dbs.contains(TEST_DB2_NAME));
+ 
+       client.dropDatabase(TEST_DB1_NAME);
+       client.dropDatabase(TEST_DB2_NAME);
+       silentDropDatabase(TEST_DB1_NAME);
+       silentDropDatabase(TEST_DB2_NAME);
+     } catch (Throwable e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testDatabase() failed.");
+       throw e;
+     }
+   }
+ 
+   @Test
+   public void testDatabaseLocationWithPermissionProblems() throws Exception {
+ 
+     // Note: The following test will fail if you are running this test as root. Setting
+     // permission to '0' on the database folder will not preclude root from being able
+     // to create the necessary files.
+ 
+     if (System.getProperty("user.name").equals("root")) {
+       System.err.println("Skipping test because you are running as root!");
+       return;
+     }
+ 
+     silentDropDatabase(TEST_DB1_NAME);
+ 
+     String dbLocation =
+       MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test/_testDB_create_";
+     FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf);
+     fs.mkdirs(
+               new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"),
+               new FsPermission((short) 0));
+     Database db = new DatabaseBuilder()
+         .setName(TEST_DB1_NAME)
+         .setLocation(dbLocation)
+         .build(conf);
+ 
+ 
+     boolean createFailed = false;
+     try {
+       client.createDatabase(db);
+     } catch (MetaException cantCreateDB) {
+       createFailed = true;
+     } finally {
+       // Cleanup
+       if (!createFailed) {
+         try {
+           client.dropDatabase(TEST_DB1_NAME);
+         } catch(Exception e) {
+           System.err.println("Failed to remove database in cleanup: " + e.getMessage());
+         }
+       }
+ 
+       fs.setPermission(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"),
+                        new FsPermission((short) 755));
+       fs.delete(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), true);
+     }
+ 
+     assertTrue("Database creation succeeded even with permission problem", createFailed);
+   }
+ 
+   @Test
+   public void testDatabaseLocation() throws Throwable {
+     try {
+       // clear up any existing databases
+       silentDropDatabase(TEST_DB1_NAME);
+ 
+       String dbLocation =
+           MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_create_";
+       new DatabaseBuilder()
+           .setName(TEST_DB1_NAME)
+           .setLocation(dbLocation)
+           .create(client, conf);
+ 
+       Database db = client.getDatabase(TEST_DB1_NAME);
+ 
+       assertEquals("name of returned db is different from that of inserted db",
+           TEST_DB1_NAME, db.getName());
+       assertEquals("location of the returned db is different from that of inserted db",
+           warehouse.getDnsPath(new Path(dbLocation)).toString(), db.getLocationUri());
+ 
+       client.dropDatabase(TEST_DB1_NAME);
+       silentDropDatabase(TEST_DB1_NAME);
+ 
+       boolean objectNotExist = false;
+       try {
+         client.getDatabase(TEST_DB1_NAME);
+       } catch (NoSuchObjectException e) {
+         objectNotExist = true;
+       }
+       assertTrue("Database " + TEST_DB1_NAME + " exists ", objectNotExist);
+ 
+       dbLocation =
+           MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_file_";
+       FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf);
+       fs.createNewFile(new Path(dbLocation));
+       fs.deleteOnExit(new Path(dbLocation));
+       db = new DatabaseBuilder()
+           .setName(TEST_DB1_NAME)
+           .setLocation(dbLocation)
+           .build(conf);
+ 
+       boolean createFailed = false;
+       try {
+         client.createDatabase(db);
+       } catch (MetaException cantCreateDB) {
+         System.err.println(cantCreateDB.getMessage());
+         createFailed = true;
+       }
+       assertTrue("Database creation succeeded even location exists and is a file", createFailed);
+ 
+       objectNotExist = false;
+       try {
+         client.getDatabase(TEST_DB1_NAME);
+       } catch (NoSuchObjectException e) {
+         objectNotExist = true;
+       }
+       assertTrue("Database " + TEST_DB1_NAME + " exists when location is specified and is a file",
+           objectNotExist);
+ 
+     } catch (Throwable e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testDatabaseLocation() failed.");
+       throw e;
+     }
+   }
+ 
+ 
+   @Test
+   public void testSimpleTypeApi() throws Exception {
+     try {
+       client.dropType(ColumnType.INT_TYPE_NAME);
+ 
+       Type typ1 = new Type();
+       typ1.setName(ColumnType.INT_TYPE_NAME);
+       boolean ret = client.createType(typ1);
+       assertTrue("Unable to create type", ret);
+ 
+       Type typ1_2 = client.getType(ColumnType.INT_TYPE_NAME);
+       assertNotNull(typ1_2);
+       assertEquals(typ1.getName(), typ1_2.getName());
+ 
+       ret = client.dropType(ColumnType.INT_TYPE_NAME);
+       assertTrue("unable to drop type integer", ret);
+ 
+       boolean exceptionThrown = false;
+       try {
+         client.getType(ColumnType.INT_TYPE_NAME);
+       } catch (NoSuchObjectException e) {
+         exceptionThrown = true;
+       }
+       assertTrue("Expected NoSuchObjectException", exceptionThrown);
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testSimpleTypeApi() failed.");
+       throw e;
+     }
+   }
+ 
+   // TODO:pc need to enhance this with complex fields and getType_all function
+   @Test
+   public void testComplexTypeApi() throws Exception {
+     try {
+       client.dropType("Person");
+ 
+       Type typ1 = new Type();
+       typ1.setName("Person");
+       typ1.setFields(new ArrayList<>(2));
+       typ1.getFields().add(
+           new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       typ1.getFields().add(
+           new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+       boolean ret = client.createType(typ1);
+       assertTrue("Unable to create type", ret);
+ 
+       Type typ1_2 = client.getType("Person");
+       assertNotNull("type Person not found", typ1_2);
+       assertEquals(typ1.getName(), typ1_2.getName());
+       assertEquals(typ1.getFields().size(), typ1_2.getFields().size());
+       assertEquals(typ1.getFields().get(0), typ1_2.getFields().get(0));
+       assertEquals(typ1.getFields().get(1), typ1_2.getFields().get(1));
+ 
+       client.dropType("Family");
+ 
+       Type fam = new Type();
+       fam.setName("Family");
+       fam.setFields(new ArrayList<>(2));
+       fam.getFields().add(
+           new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       fam.getFields().add(
+           new FieldSchema("members",
+               ColumnType.getListType(typ1.getName()), ""));
+ 
+       ret = client.createType(fam);
+       assertTrue("Unable to create type " + fam.getName(), ret);
+ 
+       Type fam2 = client.getType("Family");
+       assertNotNull("type Person not found", fam2);
+       assertEquals(fam.getName(), fam2.getName());
+       assertEquals(fam.getFields().size(), fam2.getFields().size());
+       assertEquals(fam.getFields().get(0), fam2.getFields().get(0));
+       assertEquals(fam.getFields().get(1), fam2.getFields().get(1));
+ 
+       ret = client.dropType("Family");
+       assertTrue("unable to drop type Family", ret);
+ 
+       ret = client.dropType("Person");
+       assertTrue("unable to drop type Person", ret);
+ 
+       boolean exceptionThrown = false;
+       try {
+         client.getType("Person");
+       } catch (NoSuchObjectException e) {
+         exceptionThrown = true;
+       }
+       assertTrue("Expected NoSuchObjectException", exceptionThrown);
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testComplexTypeApi() failed.");
+       throw e;
+     }
+   }
+ 
+   @Test
+   public void testSimpleTable() throws Exception {
+     try {
+       String dbName = "simpdb";
+       String tblName = "simptbl";
+       String tblName2 = "simptbl2";
+       String typeName = "Person";
+ 
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+ 
+       new DatabaseBuilder()
+           .setName(dbName)
+           .create(client, conf);
+ 
+       client.dropType(typeName);
+       Type typ1 = new Type();
+       typ1.setName(typeName);
+       typ1.setFields(new ArrayList<>(2));
+       typ1.getFields().add(
+           new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       typ1.getFields().add(
+           new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+       client.createType(typ1);
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName)
+           .setCols(typ1.getFields())
+           .setNumBuckets(1)
+           .addBucketCol("name")
+           .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+           .create(client, conf);
+ 
+       if (isThriftClient) {
+         // the createTable() above does not update the location in the 'tbl'
+         // object when the client is a thrift client and the code below relies
+         // on the location being present in the 'tbl' object - so get the table
+         // from the metastore
+         tbl = client.getTable(dbName, tblName);
+       }
+ 
+       Table tbl2 = client.getTable(dbName, tblName);
+       assertNotNull(tbl2);
+       assertEquals(tbl2.getDbName(), dbName);
+       assertEquals(tbl2.getTableName(), tblName);
+       assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
+       assertEquals(tbl2.getSd().isCompressed(), false);
+       assertEquals(tbl2.getSd().getNumBuckets(), 1);
+       assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
+       assertNotNull(tbl2.getSd().getSerdeInfo());
+       tbl.getSd().getSerdeInfo().setParameters(new HashMap<>());
+       tbl.getSd().getSerdeInfo().getParameters().put(ColumnType.SERIALIZATION_FORMAT, "1");
+ 
+       tbl2.setTableName(tblName2);
+       tbl2.setParameters(new HashMap<>());
+       tbl2.getParameters().put("EXTERNAL", "TRUE");
+       tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
+ 
+       List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
+       assertNotNull(fieldSchemas);
+       assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
+       for (FieldSchema fs : tbl.getSd().getCols()) {
+         assertTrue(fieldSchemas.contains(fs));
+       }
+ 
+       List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
+       assertNotNull(fieldSchemasFull);
+       assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
+           + tbl.getPartitionKeys().size());
+       for (FieldSchema fs : tbl.getSd().getCols()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+       for (FieldSchema fs : tbl.getPartitionKeys()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+ 
+       client.createTable(tbl2);
+       if (isThriftClient) {
+         tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
+       }
+ 
+       Table tbl3 = client.getTable(dbName, tblName2);
+       assertNotNull(tbl3);
+       assertEquals(tbl3.getDbName(), dbName);
+       assertEquals(tbl3.getTableName(), tblName2);
+       assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
+       assertEquals(tbl3.getSd().isCompressed(), false);
+       assertEquals(tbl3.getSd().getNumBuckets(), 1);
+       assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
+       assertEquals(tbl3.getParameters(), tbl2.getParameters());
+ 
+       fieldSchemas = client.getFields(dbName, tblName2);
+       assertNotNull(fieldSchemas);
+       assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
+       for (FieldSchema fs : tbl2.getSd().getCols()) {
+         assertTrue(fieldSchemas.contains(fs));
+       }
+ 
+       fieldSchemasFull = client.getSchema(dbName, tblName2);
+       assertNotNull(fieldSchemasFull);
+       assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size()
+           + tbl2.getPartitionKeys().size());
+       for (FieldSchema fs : tbl2.getSd().getCols()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+       for (FieldSchema fs : tbl2.getPartitionKeys()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+ 
+       assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
+           .get("test_param_1"));
+       assertEquals("name", tbl2.getSd().getBucketCols().get(0));
+       assertTrue("Partition key list is not empty",
+           (tbl2.getPartitionKeys() == null)
+               || (tbl2.getPartitionKeys().size() == 0));
+ 
+       //test get_table_objects_by_name functionality
+       ArrayList<String> tableNames = new ArrayList<>();
+       tableNames.add(tblName2);
+       tableNames.add(tblName);
+       tableNames.add(tblName2);
+       List<Table> foundTables = client.getTableObjectsByName(dbName, tableNames);
+ 
+       assertEquals(2, foundTables.size());
+       for (Table t: foundTables) {
+         if (t.getTableName().equals(tblName2)) {
+           assertEquals(t.getSd().getLocation(), tbl2.getSd().getLocation());
+         } else {
+           assertEquals(t.getTableName(), tblName);
+           assertEquals(t.getSd().getLocation(), tbl.getSd().getLocation());
+         }
+         assertEquals(t.getSd().getCols().size(), typ1.getFields().size());
+         assertEquals(t.getSd().isCompressed(), false);
+         assertEquals(foundTables.get(0).getSd().getNumBuckets(), 1);
+         assertNotNull(t.getSd().getSerdeInfo());
+         assertEquals(t.getDbName(), dbName);
+       }
+ 
+       tableNames.add(1, "table_that_doesnt_exist");
+       foundTables = client.getTableObjectsByName(dbName, tableNames);
+       assertEquals(foundTables.size(), 2);
+ 
+       InvalidOperationException ioe = null;
+       try {
+         foundTables = client.getTableObjectsByName(dbName, null);
+       } catch (InvalidOperationException e) {
+         ioe = e;
+       }
+       assertNotNull(ioe);
+       assertTrue("Table not found", ioe.getMessage().contains("null tables"));
+ 
+       UnknownDBException udbe = null;
+       try {
+         foundTables = client.getTableObjectsByName("db_that_doesnt_exist", tableNames);
+       } catch (UnknownDBException e) {
+         udbe = e;
+       }
+       assertNotNull(udbe);
+       assertTrue("DB not found",
+           udbe.getMessage().contains("not find database hive.db_that_doesnt_exist"));
+ 
+       udbe = null;
+       try {
+         foundTables = client.getTableObjectsByName("", tableNames);
+       } catch (UnknownDBException e) {
+         udbe = e;
+       }
+       assertNotNull(udbe);
+       assertTrue("DB not found", udbe.getMessage().contains("is null or empty"));
+ 
+       FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf);
+       client.dropTable(dbName, tblName);
+       assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
+ 
+       client.dropTable(dbName, tblName2);
+       assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
+ 
+       client.dropType(typeName);
+       client.dropDatabase(dbName);
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testSimpleTable() failed.");
+       throw e;
+     }
+   }
+ 
+   // Tests that in the absence of stats for partitions, and/or absence of columns
+   // to get stats for, the metastore does not break. See HIVE-12083 for motivation.
+   @Test
+   public void testStatsFastTrivial() throws Throwable {
+     String dbName = "tstatsfast";
+     String tblName = "t1";
+     String tblOwner = "statstester";
+     String typeName = "Person";
+     int lastAccessed = 12083;
+ 
+     cleanUp(dbName,tblName,typeName);
+ 
+     List<List<String>> values = new ArrayList<>();
+     values.add(makeVals("2008-07-01 14:13:12", "14"));
+     values.add(makeVals("2008-07-01 14:13:12", "15"));
+     values.add(makeVals("2008-07-02 14:13:12", "15"));
+     values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+     List<String> emptyColNames = new ArrayList<>();
+     List<String> emptyPartNames = new ArrayList<>();
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add("name");
+     colNames.add("income");
+     List<String> partNames = client.listPartitionNames(dbName,tblName,(short)-1);
+ 
+     assertEquals(0,emptyColNames.size());
+     assertEquals(0,emptyPartNames.size());
+     assertEquals(2,colNames.size());
+     assertEquals(4,partNames.size());
+ 
+     // Test for both colNames and partNames being empty:
+     AggrStats aggrStatsEmpty = client.getAggrColStatsFor(dbName,tblName,emptyColNames,emptyPartNames);
+     assertNotNull(aggrStatsEmpty); // short-circuited on client-side, verifying that it's an empty object, not null
+     assertEquals(0,aggrStatsEmpty.getPartsFound());
+     assertNotNull(aggrStatsEmpty.getColStats());
+     assert(aggrStatsEmpty.getColStats().isEmpty());
+ 
+     // Test for only colNames being empty
+     AggrStats aggrStatsOnlyParts = client.getAggrColStatsFor(dbName,tblName,emptyColNames,partNames);
+     assertNotNull(aggrStatsOnlyParts); // short-circuited on client-side, verifying that it's an empty object, not null
+     assertEquals(0,aggrStatsOnlyParts.getPartsFound());
+     assertNotNull(aggrStatsOnlyParts.getColStats());
+     assert(aggrStatsOnlyParts.getColStats().isEmpty());
+ 
+     // Test for only partNames being empty
+     AggrStats aggrStatsOnlyCols = client.getAggrColStatsFor(dbName,tblName,colNames,emptyPartNames);
+     assertNotNull(aggrStatsOnlyCols); // short-circuited on client-side, verifying that it's an empty object, not null
+     assertEquals(0,aggrStatsOnlyCols.getPartsFound());
+     assertNotNull(aggrStatsOnlyCols.getColStats());
+     assert(aggrStatsOnlyCols.getColStats().isEmpty());
+ 
+     // Test for valid values for both.
+     AggrStats aggrStatsFull = client.getAggrColStatsFor(dbName,tblName,colNames,partNames);
+     assertNotNull(aggrStatsFull);
+     assertEquals(0,aggrStatsFull.getPartsFound()); // would still be empty, because no stats are actually populated.
+     assertNotNull(aggrStatsFull.getColStats());
+     assert(aggrStatsFull.getColStats().isEmpty());
+ 
+   }
+ 
+   @Test
+   public void testColumnStatistics() throws Throwable {
+ 
+     String dbName = "columnstatstestdb";
+     String tblName = "tbl";
+     String typeName = "Person";
+     String tblOwner = "testowner";
+     int lastAccessed = 6796;
+ 
+     try {
+       cleanUp(dbName, tblName, typeName);
+       new DatabaseBuilder()
+           .setName(dbName)
+           .create(client, conf);
+       createTableForTestFilter(dbName,tblName, tblOwner, lastAccessed, true);
+ 
+       // Create a ColumnStatistics Obj
+       String[] colName = new String[]{"income", "name"};
+       double lowValue = 50000.21;
+       double highValue = 1200000.4525;
+       long numNulls = 3;
+       long numDVs = 22;
+       double avgColLen = 50.30;
+       long maxColLen = 102;
+       String[] colType = new String[] {"double", "string"};
+       boolean isTblLevel = true;
+       String partName = null;
+       List<ColumnStatisticsObj> statsObjs = new ArrayList<>();
+ 
+       ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
+       statsDesc.setDbName(dbName);
+       statsDesc.setTableName(tblName);
+       statsDesc.setIsTblLevel(isTblLevel);
+       statsDesc.setPartName(partName);
+ 
+       ColumnStatisticsObj statsObj = new ColumnStatisticsObj();
+       statsObj.setColName(colName[0]);
+       statsObj.setColType(colType[0]);
+ 
+       ColumnStatisticsData statsData = new ColumnStatisticsData();
+       DoubleColumnStatsData numericStats = new DoubleColumnStatsData();
+       statsData.setDoubleStats(numericStats);
+ 
+       statsData.getDoubleStats().setHighValue(highValue);
+       statsData.getDoubleStats().setLowValue(lowValue);
+       statsData.getDoubleStats().setNumDVs(numDVs);
+       statsData.getDoubleStats().setNumNulls(numNulls);
+ 
+       statsObj.setStatsData(statsData);
+       statsObjs.add(statsObj);
+ 
+       statsObj = new ColumnStatisticsObj();
+       statsObj.setColName(colName[1]);
+       statsObj.setColType(colType[1]);
+ 
+       statsData = new ColumnStatisticsData();
+       StringColumnStatsData stringStats = new StringColumnStatsData();
+       statsData.setStringStats(stringStats);
+       statsData.getStringStats().setAvgColLen(avgColLen);
+       statsData.getStringStats().setMaxColLen(maxColLen);
+       statsData.getStringStats().setNumDVs(numDVs);
+       statsData.getStringStats().setNumNulls(numNulls);
+ 
+       statsObj.setStatsData(statsData);
+       statsObjs.add(statsObj);
+ 
+       ColumnStatistics colStats = new ColumnStatistics();
+       colStats.setStatsDesc(statsDesc);
+       colStats.setStatsObj(statsObjs);
+ 
+       // write stats objs persistently
+       client.updateTableColumnStatistics(colStats);
+ 
+       // retrieve the stats obj that was just written
+       ColumnStatisticsObj colStats2 = client.getTableColumnStatistics(
+           dbName, tblName, Lists.newArrayList(colName[0])).get(0);
+ 
+      // compare stats obj to ensure what we get is what we wrote
+       assertNotNull(colStats2);
+       assertEquals(colStats2.getColName(), colName[0]);
+       assertEquals(colStats2.getStatsData().getDoubleStats().getLowValue(), lowValue, 0.01);
+       assertEquals(colStats2.getStatsData().getDoubleStats().getHighValue(), highValue, 0.01);
+       assertEquals(colStats2.getStatsData().getDoubleStats().getNumNulls(), numNulls);
+       assertEquals(colStats2.getStatsData().getDoubleStats().getNumDVs(), numDVs);
+ 
+       // test delete column stats; if no col name is passed all column stats associated with the
+       // table is deleted
+       boolean status = client.deleteTableColumnStatistics(dbName, tblName, null);
+       assertTrue(status);
+       // try to query stats for a column for which stats doesn't exist
+       assertTrue(client.getTableColumnStatistics(
+           dbName, tblName, Lists.newArrayList(colName[1])).isEmpty());
+ 
+       colStats.setStatsDesc(statsDesc);
+       colStats.setStatsObj(statsObjs);
+ 
+       // update table level column stats
+       client.updateTableColumnStatistics(colStats);
+ 
+       // query column stats for column whose stats were updated in the previous call
+       colStats2 = client.getTableColumnStatistics(
+           dbName, tblName, Lists.newArrayList(colName[0])).get(0);
+ 
+       // partition level column statistics test
+       // create a table with multiple partitions
+       cleanUp(dbName, tblName, typeName);
+ 
+       List<List<String>> values = new ArrayList<>();
+       values.add(makeVals("2008-07-01 14:13:12", "14"));
+       values.add(makeVals("2008-07-01 14:13:12", "15"));
+       values.add(makeVals("2008-07-02 14:13:12", "15"));
+       values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+       createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+       List<String> partitions = client.listPartitionNames(dbName, tblName, (short)-1);
+ 
+       partName = partitions.get(0);
+       isTblLevel = false;
+ 
+       // create a new columnstatistics desc to represent partition level column stats
+       statsDesc = new ColumnStatisticsDesc();
+       statsDesc.setDbName(dbName);
+       statsDesc.setTableName(tblName);
+       statsDesc.setPartName(partName);
+       statsDesc.setIsTblLevel(isTblLevel);
+ 
+       colStats = new ColumnStatistics();
+       colStats.setStatsDesc(statsDesc);
+       colStats.setStatsObj(statsObjs);
+ 
+      client.updatePartitionColumnStatistics(colStats);
+ 
+      colStats2 = client.getPartitionColumnStatistics(dbName, tblName,
+          Lists.newArrayList(partName), Lists.newArrayList(colName[1])).get(partName).get(0);
+ 
+      // compare stats obj to ensure what we get is what we wrote
+      assertNotNull(colStats2);
+      assertEquals(colStats.getStatsDesc().getPartName(), partName);
+      assertEquals(colStats2.getColName(), colName[1]);
+      assertEquals(colStats2.getStatsData().getStringStats().getMaxColLen(), maxColLen);
+      assertEquals(colStats2.getStatsData().getStringStats().getAvgColLen(), avgColLen, 0.01);
+      assertEquals(colStats2.getStatsData().getStringStats().getNumNulls(), numNulls);
+      assertEquals(colStats2.getStatsData().getStringStats().getNumDVs(), numDVs);
+ 
+      // test stats deletion at partition level
+      client.deletePartitionColumnStatistics(dbName, tblName, partName, colName[1]);
+ 
+      colStats2 = client.getPartitionColumnStatistics(dbName, tblName,
+          Lists.newArrayList(partName), Lists.newArrayList(colName[0])).get(partName).get(0);
+ 
+      // test get stats on a column for which stats doesn't exist
+      assertTrue(client.getPartitionColumnStatistics(dbName, tblName,
+            Lists.newArrayList(partName), Lists.newArrayList(colName[1])).isEmpty());
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testColumnStatistics() failed.");
+       throw e;
+     } finally {
+       cleanUp(dbName, tblName, typeName);
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testGetSchemaWithNoClassDefFoundError() throws TException {
+     String dbName = "testDb";
+     String tblName = "testTable";
+ 
+     client.dropTable(dbName, tblName);
+     silentDropDatabase(dbName);
+ 
+     new DatabaseBuilder()
+         .setName(dbName)
+         .create(client, conf);
+ 
+     Table tbl = new TableBuilder()
+         .setDbName(dbName)
+         .setTableName(tblName)
+         .addCol("name", ColumnType.STRING_TYPE_NAME, "")
+         .setSerdeLib("no.such.class")
+         .create(client, conf);
+ 
+     client.getSchema(dbName, tblName);
+   }
+ 
+   @Test
+   public void testAlterTable() throws Exception {
+     String dbName = "alterdb";
+     String invTblName = "alter-tbl";
+     String tblName = "altertbl";
+ 
+     try {
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+ 
+       new DatabaseBuilder()
+           .setName(dbName)
+           .create(client, conf);
+ 
+       ArrayList<FieldSchema> invCols = new ArrayList<>(2);
+       invCols.add(new FieldSchema("n-ame", ColumnType.STRING_TYPE_NAME, ""));
+       invCols.add(new FieldSchema("in.come", ColumnType.INT_TYPE_NAME, ""));
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(invTblName)
+           .setCols(invCols)
+           .build(conf);
+ 
+       boolean failed = false;
+       try {
+         client.createTable(tbl);
+       } catch (InvalidObjectException ex) {
+         failed = true;
+       }
+       if (!failed) {
+         assertTrue("Able to create table with invalid name: " + invTblName,
+             false);
+       }
+ 
+       // create an invalid table which has wrong column type
+       ArrayList<FieldSchema> invColsInvType = new ArrayList<>(2);
+       invColsInvType.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       invColsInvType.add(new FieldSchema("income", "xyz", ""));
+       tbl.setTableName(tblName);
+       tbl.getSd().setCols(invColsInvType);
+       boolean failChecker = false;
+       try {
+         client.createTable(tbl);
+       } catch (InvalidObjectException ex) {
+         failChecker = true;
+       }
+       if (!failChecker) {
+         assertTrue("Able to create table with invalid column type: " + invTblName,
+             false);
+       }
+ 
+       ArrayList<FieldSchema> cols = new ArrayList<>(2);
+       cols.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       cols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ 
+       // create a valid table
+       tbl.setTableName(tblName);
+       tbl.getSd().setCols(cols);
+       client.createTable(tbl);
+ 
+       if (isThriftClient) {
+         tbl = client.getTable(tbl.getDbName(), tbl.getTableName());
+       }
+ 
+       // now try to invalid alter table
+       Table tbl2 = client.getTable(dbName, tblName);
+       failed = false;
+       try {
+         tbl2.setTableName(invTblName);
+         tbl2.getSd().setCols(invCols);
+         client.alter_table(dbName, tblName, tbl2);
+       } catch (InvalidOperationException ex) {
+         failed = true;
+       }
+       if (!failed) {
+         assertTrue("Able to rename table with invalid name: " + invTblName,
+             false);
+       }
+ 
+       //try an invalid alter table with partition key name
+       Table tbl_pk = client.getTable(tbl.getDbName(), tbl.getTableName());
+       List<FieldSchema> partitionKeys = tbl_pk.getPartitionKeys();
+       for (FieldSchema fs : partitionKeys) {
+         fs.setName("invalid_to_change_name");
+         fs.setComment("can_change_comment");
+       }
+       tbl_pk.setPartitionKeys(partitionKeys);
+       try {
+         client.alter_table(dbName, tblName, tbl_pk);
+       } catch (InvalidOperationException ex) {
+         failed = true;
+       }
+       assertTrue("Should not have succeeded in altering partition key name", failed);
+ 
+       //try a valid alter table partition key comment
+       failed = false;
+       tbl_pk = client.getTable(tbl.getDbName(), tbl.getTableName());
+       partitionKeys = tbl_pk.getPartitionKeys();
+       for (FieldSchema fs : partitionKeys) {
+         fs.setComment("can_change_comment");
+       }
+       tbl_pk.setPartitionKeys(partitionKeys);
+       try {
+         client.alter_table(dbName, tblName, tbl_pk);
+       } catch (InvalidOperationException ex) {
+         failed = true;
+       }
+       assertFalse("Should not have failed alter table partition comment", failed);
+       Table newT = client.getTable(tbl.getDbName(), tbl.getTableName());
+       assertEquals(partitionKeys, newT.getPartitionKeys());
+ 
+       // try a valid alter table
+       tbl2.setTableName(tblName + "_renamed");
+       tbl2.getSd().setCols(cols);
+       tbl2.getSd().setNumBuckets(32);
+       client.alter_table(dbName, tblName, tbl2);
+       Table tbl3 = client.getTable(dbName, tbl2.getTableName());
+       assertEquals("Alter table didn't succeed. Num buckets is different ",
+           tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets());
+       // check that data has moved
+       FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf);
+       assertFalse("old table location still exists", fs.exists(new Path(tbl
+           .getSd().getLocation())));
+       assertTrue("data did not move to new location", fs.exists(new Path(tbl3
+           .getSd().getLocation())));
+ 
+       if (!isThriftClient) {
+         assertEquals("alter table didn't move data correct location", tbl3
+             .getSd().getLocation(), tbl2.getSd().getLocation());
+       }
+ 
+       // alter table with invalid column type
+       tbl_pk.getSd().setCols(invColsInvType);
+       failed = false;
+       try {
+         client.alter_table(dbName, tbl2.getTableName(), tbl_pk);
+       } catch (InvalidOperationException ex) {
+         failed = true;
+       }
+       assertTrue("Should not have succeeded in altering column", failed);
+ 
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testSimpleTable() failed.");
+       throw e;
+     } finally {
+       silentDropDatabase(dbName);
+     }
+   }
+ 
+   @Test
+   public void testComplexTable() throws Exception {
+ 
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     try {
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+       new DatabaseBuilder()
+           .setName(dbName)
+           .create(client, conf);
+ 
+       client.dropType(typeName);
+       Type typ1 = new Type();
+       typ1.setName(typeName);
+       typ1.setFields(new ArrayList<>(2));
+       typ1.getFields().add(
+           new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       typ1.getFields().add(
+           new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+       client.createType(typ1);
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName)
+           .setCols(typ1.getFields())
+           .addPartCol("ds", ColumnType.DATE_TYPE_NAME)
+           .addPartCol("hr", ColumnType.INT_TYPE_NAME)
+           .setNumBuckets(1)
+           .addBucketCol("name")
+           .addStorageDescriptorParam("test_param_1","Use this for comments etc")
+           .create(client, conf);
+ 
+       Table tbl2 = client.getTable(dbName, tblName);
+       assertEquals(tbl2.getDbName(), dbName);
+       assertEquals(tbl2.getTableName(), tblName);
+       assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
+       assertFalse(tbl2.getSd().isCompressed());
+       assertFalse(tbl2.getSd().isStoredAsSubDirectories());
+       assertEquals(tbl2.getSd().getNumBuckets(), 1);
+ 
+       assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
+           .get("test_param_1"));
+       assertEquals("name", tbl2.getSd().getBucketCols().get(0));
+ 
+       assertNotNull(tbl2.getPartitionKeys());
+       assertEquals(2, tbl2.getPartitionKeys().size());
+       assertEquals(ColumnType.DATE_TYPE_NAME, tbl2.getPartitionKeys().get(0)
+           .getType());
+       assertEquals(ColumnType.INT_TYPE_NAME, tbl2.getPartitionKeys().get(1)
+           .getType());
+       assertEquals("ds", tbl2.getPartitionKeys().get(0).getName());
+       assertEquals("hr", tbl2.getPartitionKeys().get(1).getName());
+ 
+       List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
+       assertNotNull(fieldSchemas);
+       assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
+       for (FieldSchema fs : tbl.getSd().getCols()) {
+         assertTrue(fieldSchemas.contains(fs));
+       }
+ 
+       List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
+       assertNotNull(fieldSchemasFull);
+       assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
+           + tbl.getPartitionKeys().size());
+       for (FieldSchema fs : tbl.getSd().getCols()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+       for (FieldSchema fs : tbl.getPartitionKeys()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testComplexTable() failed.");
+       throw e;
+     } finally {
+       client.dropTable(dbName, tblName);
+       boolean ret = client.dropType(typeName);
+       assertTrue("Unable to drop type " + typeName, ret);
+       client.dropDatabase(dbName);
+     }
+   }
+ 
+   @Test
+   public void testTableDatabase() throws Exception {
+     String dbName = "testDb";
+     String tblName_1 = "testTbl_1";
+     String tblName_2 = "testTbl_2";
+ 
+     try {
+       silentDropDatabase(dbName);
+ 
+       String dbLocation =
+           MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "_testDB_table_create_";
+       new DatabaseBuilder()
+           .setName(dbName)
+           .setLocation(dbLocation)
+           .create(client, conf);
+       Database db = client.getDatabase(dbName);
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName_1)
+           .addCol("name", ColumnType.STRING_TYPE_NAME)
+           .addCol("income", ColumnType.INT_TYPE_NAME)
+           .create(client, conf);
+ 
+       tbl = client.getTable(dbName, tblName_1);
+ 
+       Path path = new Path(tbl.getSd().getLocation());
+       System.err.println("Table's location " + path + ", Database's location " + db.getLocationUri());
+       assertEquals("Table location is not a subset of the database location",
+           path.getParent().toString(), db.getLocationUri());
+ 
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testTableDatabase() failed.");
+       throw e;
+     } finally {
+       silentDropDatabase(dbName);
+     }
+   }
+ 
+ 
+   @Test
+   public void testGetConfigValue() {
+ 
+     String val = "value";
+ 
+     if (!isThriftClient) {
+       try {
+         assertEquals(client.getConfigValue("hive.key1", val), "value1");
+         assertEquals(client.getConfigValue("hive.key2", val), "http://www.example.com");
+         assertEquals(client.getConfigValue("hive.key3", val), "");
+         assertEquals(client.getConfigValue("hive.key4", val), "0");
+         assertEquals(client.getConfigValue("hive.key5", val), val);
+         assertEquals(client.getConfigValue(null, val), val);
+       } catch (TException e) {
+         e.printStackTrace();
+         fail();
+       }
+     }
+ 
+     boolean threwException = false;
+     try {
+       // Attempting to get the password should throw an exception
+       client.getConfigValue("javax.jdo.option.ConnectionPassword", "password");
+     } catch (ConfigValSecurityException e) {
+       threwException = true;
+     } catch (TException e) {
+       e.printStackTrace();
+       fail();
+     }
+     assert (threwException);
+   }
+ 
+   private static void adjust(HiveMetaStoreClient client, Partition part,
 -      String dbName, String tblName) throws TException {
++      String dbName, String tblName, boolean isThriftClient) throws TException {
+     Partition part_get = client.getPartition(dbName, tblName, part.getValues());
 -    part.setCreateTime(part_get.getCreateTime());
 -    part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
++    if (isThriftClient) {
++      part.setCreateTime(part_get.getCreateTime());
++      part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
++    }
++    part.setWriteId(part_get.getWriteId());
+   }
+ 
++
++
+   private static void silentDropDatabase(String dbName) throws TException {
+     try {
+       for (String tableName : client.getTables(dbName, "*")) {
+         client.dropTable(dbName, tableName);
+       }
+       client.dropDatabase(dbName);
+     } catch (NoSuchObjectException|InvalidOperationException e) {
+       // NOP
+     }
+   }
+ 
+   /**
+    * Tests for list partition by filter functionality.
+    */
+ 
+   @Test
+   public void testPartitionFilter() throws Exception {
+     String dbName = "filterdb";
+     String tblName = "filtertbl";
+ 
+     silentDropDatabase(dbName);
+ 
+     new DatabaseBuilder()
+         .setName(dbName)
+         .create(client, conf);
+ 
+     Table tbl = new TableBuilder()
+         .setDbName(dbName)
+         .setTableName(tblName)
+         .addCol("c1", ColumnType.STRING_TYPE_NAME)
+         .addCol("c2", ColumnType.INT_TYPE_NAME)
+         .addPartCol("p1", ColumnType.STRING_TYPE_NAME)
+         .addPartCol("p2", ColumnType.STRING_TYPE_NAME)
+         .addPartCol("p3", ColumnType.INT_TYPE_NAME)
+         .create(client, conf);
+ 
+     tbl = client.getTable(dbName, tblName);
+ 
+     add_partition(client, tbl, Lists.newArrayList("p11", "p21", "31"), "part1");
+     add_partition(client, tbl, Lists.newArrayList("p11", "p22", "32"), "part2");
+     add_partition(client, tbl, Lists.newArrayList("p12", "p21", "31"), "part3");
+     add_partition(client, tbl, Lists.newArrayList("p12", "p23", "32"), "part4");
+     add_partition(client, tbl, Lists.newArrayList("p13", "p24", "31"), "part5");
+     add_partition(client, tbl, Lists.newArrayList("p13", "p25", "-33"), "part6");
+ 
+     // Test equals operator for strings and integers.
+     checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2);
+     checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2);
+     checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2);
+     checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1);
+     checkFilter(client, dbName, tblName, "p3 = 31", 3);
+     checkFilter(client, dbName, tblName, "p3 = 33", 0);
+     checkFilter(client, dbName, tblName, "p3 = -33", 1);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" and p3 = 31", 1);
+     checkFilter(client, dbName, tblName, "p3 = -33 or p1 = \"p12\"", 3);
+ 
+     // Test not-equals operator for strings and integers.
+     checkFilter(client, dbName, tblName, "p1 != \"p11\"", 4);
+     checkFilter(client, dbName, tblName, "p2 != \"p23\"", 5);
+     checkFilter(client, dbName, tblName, "p2 != \"p33\"", 6);
+     checkFilter(client, dbName, tblName, "p3 != 32", 4);
+     checkFilter(client, dbName, tblName, "p3 != 8589934592", 6);
+     checkFilter(client, dbName, tblName, "p1 != \"p11\" and p1 != \"p12\"", 2);
+     checkFilter(client, dbName, tblName, "p1 != \"p11\" and p2 != \"p22\"", 4);
+     checkFilter(client, dbName, tblName, "p1 != \"p11\" or p2 != \"p22\"", 5);
+     checkFilter(client, dbName, tblName, "p1 != \"p12\" and p2 != \"p25\"", 3);
+     checkFilter(client, dbName, tblName, "p1 != \"p12\" or p2 != \"p25\"", 6);
+     checkFilter(client, dbName, tblName, "p3 != -33 or p1 != \"p13\"", 5);
+     checkFilter(client, dbName, tblName, "p1 != \"p11\" and p3 = 31", 2);
+     checkFilter(client, dbName, tblName, "p3 != 31 and p1 = \"p12\"", 1);
+ 
+     // Test reverse order.
+     checkFilter(client, dbName, tblName, "31 != p3 and p1 = \"p12\"", 1);
+     checkFilter(client, dbName, tblName, "\"p23\" = p2", 1);
+ 
+     // Test and/or more...
+     checkFilter(client, dbName, tblName,
+         "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3);
+     checkFilter(client, dbName, tblName,
+        "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " +
+        "(p1=\"p13\" aNd p2=\"p24\")", 4);
+     //test for and or precedence
+     checkFilter(client, dbName, tblName,
+        "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1);
+     checkFilter(client, dbName, tblName,
+        "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2);
+ 
+     // Test gt/lt/lte/gte/like for strings.
+     checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2);
+     checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2);
+     checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6);
+     checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1);
+ 
+     // Test gt/lt/lte/gte for numbers.
+     checkFilter(client, dbName, tblName, "p3 < 0", 1);
+     checkFilter(client, dbName, tblName, "p3 >= -33", 6);
+     checkFilter(client, dbName, tblName, "p3 > -33", 5);
+     checkFilter(client, dbName, tblName, "p3 > 31 and p3 < 32", 0);
+     checkFilter(client, dbName, tblName, "p3 > 31 or p3 < 31", 3);
+     checkFilter(client, dbName, tblName, "p3 > 30 or p3 < 30", 6);
+     checkFilter(client, dbName, tblName, "p3 >= 31 or p3 < -32", 6);
+     checkFilter(client, dbName, tblName, "p3 >= 32", 2);
+     checkFilter(client, dbName, tblName, "p3 > 32", 0);
+ 
+     // Test between
+     checkFilter(client, dbName, tblName, "p1 between \"p11\" and \"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 not between \"p11\" and \"p12\"", 2);
+     checkFilter(client, dbName, tblName, "p3 not between 0 and 2", 6);
+     checkFilter(client, dbName, tblName, "p3 between 31 and 32", 5);
+     checkFilter(client, dbName, tblName, "p3 between 32 and 31", 0);
+     checkFilter(client, dbName, tblName, "p3 between -32 and 34 and p3 not between 31 and 32", 0);
+     checkFilter(client, dbName, tblName, "p3 between 1 and 3 or p3 not between 1 and 3", 6);
+     checkFilter(client, dbName, tblName,
+         "p3 between 31 and 32 and p1 between \"p12\" and \"p14\"", 3);
+ 
+     //Test for setting the maximum partition count
+     List<Partition> partitions = client.listPartitionsByFilter(dbName,
+         tblName, "p1 >= \"p12\"", (short) 2);
+     assertEquals("User specified row limit for partitions",
+         2, partitions.size());
+ 
+     //Negative tests
+     Exception me = null;
+     try {
+       client.listPartitionsByFilter(dbName,
+           tblName, "p3 >= \"p12\"", (short) -1);
+     } catch(MetaException e) {
+       me = e;
+     }
+     assertNotNull(me);
+     assertTrue("Filter on int partition key", me.getMessage().contains(
+           "Filtering is supported only on partition keys of type string"));
+ 
+     me = null;
+     try {
+       client.listPartitionsByFilter(dbName,
+           tblName, "c1 >= \"p12\"", (short) -1);
+     } catch(MetaException e) {
+       me = e;
+     }
+     assertNotNull(me);
+     assertTrue("Filter on invalid key", me.getMessage().contains(
+           "<c1> is not a partitioning key for the table"));
+ 
+     me = null;
+     try {
+       client.listPartitionsByFilter(dbName,
+           tblName, "c1 >= ", (short) -1);
+     } catch(MetaException e) {
+       me = e;
+     }
+     assertNotNull(me);
+     assertTrue("Invalid filter string", me.getMessage().contain

<TRUNCATED>

[34/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 0000000,2454479..a5e6918
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@@ -1,0 -1,1226 +1,1268 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.hive.common.TableName;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -
 -import java.nio.ByteBuffer;
 -import java.util.ArrayList;
 -import java.util.Collections;
 -import java.util.List;
 -import java.util.Map;
 -
 -import org.apache.hadoop.conf.Configurable;
 -import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
++import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
++import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.ISchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMTrigger;
+ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+ import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.TableMeta;
+ import org.apache.hadoop.hive.metastore.api.Type;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+ import org.apache.hadoop.hive.metastore.api.WMMapping;
+ import org.apache.hadoop.hive.metastore.api.WMPool;
++import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
++import org.apache.hadoop.hive.metastore.api.ISchemaName;
++import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
++
++import java.nio.ByteBuffer;
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++import java.util.Map;
++
++import org.apache.hadoop.conf.Configurable;
++import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.thrift.TException;
+ 
+ /**
+  * A wrapper around {@link org.apache.hadoop.hive.metastore.ObjectStore}
+  * with the ability to control the result of commitTransaction().
+  * All other functions simply delegate to an embedded ObjectStore object.
+  * Ideally, we should have just extended ObjectStore instead of using
+  * delegation.  However, since HiveMetaStore uses a Proxy, this class must
+  * not inherit from any other class.
+  */
+ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
+ 
+   private final ObjectStore objectStore;
+   public DummyRawStoreControlledCommit() {
+     objectStore = new ObjectStore();
+   }
+ 
+  /**
+   * If true, shouldCommit() will simply call delegate commitTransaction() to the
+   * underlying ObjectStore.
+   * If false, shouldCommit() immediately returns false.
+   */
+   private static boolean shouldCommitSucceed = true;
+   public static void setCommitSucceed(boolean flag) {
+     shouldCommitSucceed = flag;
+   }
+ 
+   @Override
+   public boolean commitTransaction() {
+     if (shouldCommitSucceed) {
+       return objectStore.commitTransaction();
+     } else {
+       return false;
+     }
+   }
+ 
+   @Override
+   public boolean isActiveTransaction() {
+     return false;
+   }
+ 
+   // All remaining functions simply delegate to objectStore
+ 
+   @Override
+   public Configuration getConf() {
+     return objectStore.getConf();
+   }
+ 
+   @Override
+   public void setConf(Configuration conf) {
+     objectStore.setConf(conf);
+   }
+ 
+   @Override
+   public void shutdown() {
+     objectStore.shutdown();
+   }
+ 
+   @Override
+   public boolean openTransaction() {
+     return objectStore.openTransaction();
+   }
+ 
+   @Override
+   public void rollbackTransaction() {
+     objectStore.rollbackTransaction();
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+     objectStore.createCatalog(cat);
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat) throws MetaException,
+       InvalidOperationException {
+     objectStore.alterCatalog(catName, cat);
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     return objectStore.getCatalog(catalogName);
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     return objectStore.getCatalogs();
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     objectStore.dropCatalog(catalogName);
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+     objectStore.createDatabase(db);
+   }
+ 
+   @Override
+   public Database getDatabase(String catName, String dbName) throws NoSuchObjectException {
+     return objectStore.getDatabase(catName, dbName);
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbName)
+       throws NoSuchObjectException, MetaException {
+     return objectStore.dropDatabase(catName, dbName);
+   }
+ 
+   @Override
+   public boolean alterDatabase(String catName, String dbName, Database db)
+       throws NoSuchObjectException, MetaException {
+ 
+     return objectStore.alterDatabase(catName, dbName, db);
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+     return objectStore.getDatabases(catName, pattern);
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+     return objectStore.getAllDatabases(catName);
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+     return objectStore.createType(type);
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+     return objectStore.getType(typeName);
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+     return objectStore.dropType(typeName);
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     objectStore.createTable(tbl);
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException,
+       InvalidObjectException, InvalidInputException {
+     return objectStore.dropTable(catName, dbName, tableName);
+   }
+ 
+   @Override
+   public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+     return objectStore.getTable(catName, dbName, tableName);
+   }
+ 
+   @Override
++  public Table getTable(String catName, String dbName, String tableName, long txnId, String writeIdList)
++      throws MetaException {
++    return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean addPartition(Partition part)
+       throws InvalidObjectException, MetaException {
+     return objectStore.addPartition(part);
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tableName, List<String> partVals)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getPartition(catName, dbName, tableName, partVals);
+   }
+ 
+   @Override
++  public Partition getPartition(String catName, String dbName, String tableName,
++                                List<String> partVals, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean dropPartition(String catName, String dbName, String tableName, List<String> partVals)
+       throws MetaException, NoSuchObjectException,
+       InvalidObjectException, InvalidInputException {
+     return objectStore.dropPartition(catName, dbName, tableName, partVals);
+   }
+ 
+   @Override
+   public List<Partition> getPartitions(String catName, String dbName, String tableName, int max)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitions(catName, dbName, tableName, max);
+   }
+ 
+   @Override
+   public Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max) {
+     return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max);
+   }
+ 
+   @Override
 -  public void alterTable(String catName, String dbName, String name, Table newTable)
++  public void alterTable(String catName, String dbName, String name, Table newTable,
++      long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException {
 -    objectStore.alterTable(catName, dbName, name, newTable);
++    objectStore.alterTable(catName, dbName, name, newTable, queryTxnId, queryValidWriteIds);
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException {
+     objectStore.updateCreationMetadata(catName, dbname, tablename, cm);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
+     return objectStore.getTables(catName, dbName, pattern);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException {
+     return objectStore.getTables(catName, dbName, pattern, tableType);
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getMaterializedViewsForRewriting(catName, dbName);
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames, List<String> tableTypes)
+       throws MetaException {
+     return objectStore.getTableMeta(catName, dbNames, tableNames, tableTypes);
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbName, List<String> tableNames)
+       throws MetaException, UnknownDBException {
+     return objectStore.getTableObjectsByName(catName, dbName, tableNames);
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+     return objectStore.getAllTables(catName, dbName);
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+       short maxTables) throws MetaException, UnknownDBException {
+     return objectStore.listTableNamesByFilter(catName, dbName, filter, maxTables);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tblName, short maxParts)
+       throws MetaException {
+     return objectStore.listPartitionNames(catName, dbName, tblName, maxParts);
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(String catName, String db_name,
+       String tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter,
+       boolean ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
 -      Partition newPart) throws InvalidObjectException, MetaException {
 -    objectStore.alterPartition(catName, dbName, tblName, partVals, newPart);
++      Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
++    objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds);
+   }
+ 
+   @Override
+   public void alterPartitions(String catName, String dbName, String tblName,
 -      List<List<String>> partValsList, List<Partition> newParts)
 -      throws InvalidObjectException, MetaException {
 -    objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
++      List<List<String>> partValsList, List<Partition> newParts,
++      long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
++    objectStore.alterPartitions(
++        catName, dbName, tblName, partValsList, newParts, writeId, queryTxnId, queryValidWriteIds);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByFilter(String catName, String dbName, String tblName,
+       String filter, short maxParts) throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts);
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tblName,
+                                       String filter) throws MetaException, NoSuchObjectException {
+     return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter);
+   }
+ 
+   @Override
+   public int getNumPartitionsByExpr(String catName, String dbName, String tblName,
+                                       byte[] expr) throws MetaException, NoSuchObjectException {
+     return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+   }
+ 
+   @Override
+   public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
+       String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+     return objectStore.getPartitionsByExpr(catName,
+         dbName, tblName, expr, defaultPartitionName, maxParts, result);
+   }
+ 
+   @Override
+   public Table markPartitionForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partVals, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return objectStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType);
+   }
+ 
+   @Override
+   public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partName, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return objectStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType);
+   }
+ 
+   @Override
+   public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+     return objectStore.addRole(rowName, ownerName);
+   }
+ 
+   @Override
+   public boolean removeRole(String roleName)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.removeRole(roleName);
+   }
+ 
+   @Override
+   public boolean grantRole(Role role, String userName, PrincipalType principalType,
+       String grantor, PrincipalType grantorType, boolean grantOption)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return objectStore.grantRole(role, userName, principalType, grantor, grantorType,
+         grantOption);
+   }
+ 
+   @Override
+   public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.revokeRole(role, userName, principalType, grantOption);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return objectStore.getUserPrivilegeSet(userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return objectStore.getDBPrivilegeSet(catName, dbName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName,
+       String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return objectStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName,
+       String partition, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return objectStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition,
+         userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName,
+       String partitionName, String columnName, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return objectStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName,
+         columnName, userName, groupNames);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+       PrincipalType principalType) {
+     return objectStore.listPrincipalGlobalGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName) {
+     return objectStore.listPrincipalDBGrants(principalName, principalType, catName, dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName) {
+     return objectStore.listAllTableGrants(principalName, principalType,
+         catName, dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, List<String> partValues,
+       String partName) {
+     return objectStore.listPrincipalPartitionGrants(principalName, principalType,
+         catName, dbName, tableName, partValues, partName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, String columnName) {
+     return objectStore.listPrincipalTableColumnGrants(principalName, principalType,
+         catName, dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName, String tableName,
+       List<String> partVals, String partName, String columnName) {
+     return objectStore.listPrincipalPartitionColumnGrants(principalName, principalType,
+         catName, dbName, tableName, partVals, partName, columnName);
+   }
+ 
+   @Override
+   public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+     return objectStore.grantPrivileges(privileges);
+   }
+ 
+   @Override
+   public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return objectStore.revokePrivileges(privileges, grantOption);
+   }
+ 
+   @Override
+   public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+           throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return objectStore.refreshPrivileges(objToRefresh, authorizer, grantPrivileges);
+   }
+   @Override
+   public Role getRole(String roleName) throws NoSuchObjectException {
+     return objectStore.getRole(roleName);
+   }
+ 
+   @Override
+   public List<String> listRoleNames() {
+     return objectStore.listRoleNames();
+   }
+ 
+   @Override
+   public List<Role> listRoles(String principalName, PrincipalType principalType) {
+     return objectStore.listRoles(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+                                                       PrincipalType principalType) {
+     return objectStore.listRolesWithGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+     return objectStore.listRoleMembers(roleName);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName,
+         groupNames);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsWithAuth(String catName, String dbName, String tblName,
+       short maxParts, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName,
+         groupNames);
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesPs(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsPsWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException {
+     return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts,
+         userName, groupNames);
+   }
+ 
+   @Override
+   public long cleanupEvents() {
+     return objectStore.cleanupEvents();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalDBGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalTableGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalPartitionGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalTableColumnGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalPartitionColumnGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+     return objectStore.listGlobalGrantsAll();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listDBGrantsAll(String catName, String dbName) {
+     return objectStore.listDBGrantsAll(catName, dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String catName, String dbName, String tableName,
+       String partitionName, String columnName) {
+     return objectStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableGrantsAll(String catName, String dbName, String tableName) {
+     return objectStore.listTableGrantsAll(catName, dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionGrantsAll(String catName, String dbName, String tableName,
+       String partitionName) {
+     return objectStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableColumnGrantsAll(String catName, String dbName, String tableName,
+       String columnName) {
+     return objectStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+       List<String> colNames) throws MetaException, NoSuchObjectException {
+     return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames);
+   }
+ 
+   @Override
++  public ColumnStatistics getTableColumnStatistics(String catName, String dbName,
++                                                   String tableName, List<String> colNames,
++                                                   long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return objectStore.getTableColumnStatistics(
++        catName, dbName, tableName, colNames, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
+                                              String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName);
+   }
+ 
+   @Override
+   public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
+       String partName, List<String> partVals, String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
+     return objectStore.deletePartitionColumnStatistics(catName, dbName, tableName, partName,
+         partVals, colName);
+   }
+ 
+   @Override
 -  public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
++  public boolean updateTableColumnStatistics(ColumnStatistics statsObj, long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
 -    return objectStore.updateTableColumnStatistics(statsObj);
++    return objectStore.updateTableColumnStatistics(statsObj, txnId, validWriteIds, writeId);
+   }
+ 
+   @Override
+   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
 -      List<String> partVals)
++      List<String> partVals, long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
 -    return objectStore.updatePartitionColumnStatistics(statsObj, partVals);
++    return objectStore.updatePartitionColumnStatistics(statsObj, partVals, txnId, validWriteIds, writeId);
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) {
+     return false;
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) {
+     return false;
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) {
+     return "";
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() {
+     return new ArrayList<>();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) throws MetaException {
+     return -1;
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key)
+     throws NoSuchObjectException, MetaException {}
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) {
+     return false;
+   }
+ 
+   @Override
+   public String[] getMasterKeys() {
+     return new String[0];
+   }
+ 
+   @Override
+   public void verifySchema() throws MetaException {
+   }
+ 
+   @Override
+   public String getMetaStoreSchemaVersion() throws MetaException {
+     return objectStore.getMetaStoreSchemaVersion();
+   }
+ 
+   @Override
+   public void setMetaStoreSchemaVersion(String schemaVersion, String comment) throws MetaException {
+     objectStore.setMetaStoreSchemaVersion(schemaVersion, comment);
+ 
+   }
+ 
+   @Override
+   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName,
+       String tblName, List<String> colNames, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitionColumnStatistics(catName, dbName, tblName  , colNames, partNames);
+   }
+ 
+   @Override
++  public List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return objectStore.getPartitionColumnStatistics(
++             catName, dbName, tblName  , colNames, partNames, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean doesPartitionExist(String catName, String dbName, String tableName,
+       List<FieldSchema> partKeys, List<String> partVals)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals);
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException {
+     return objectStore.addPartitions(catName, dbName, tblName, parts);
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+     return false;
+   }
+ 
+   @Override
+   public void dropPartitions(String catName, String dbName, String tblName, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     objectStore.dropPartitions(catName, dbName, tblName, partNames);
+   }
+ 
+   @Override
+   public void createFunction(Function func) throws InvalidObjectException,
+       MetaException {
+     objectStore.createFunction(func);
+   }
+ 
+   @Override
+   public void alterFunction(String catName, String dbName, String funcName, Function newFunction)
+       throws InvalidObjectException, MetaException {
+     objectStore.alterFunction(catName, dbName, funcName, newFunction);
+   }
+ 
+   @Override
+   public void dropFunction(String catName, String dbName, String funcName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException,
+       InvalidInputException {
+     objectStore.dropFunction(catName, dbName, funcName);
+   }
+ 
+   @Override
+   public Function getFunction(String catName, String dbName, String funcName)
+       throws MetaException {
+     return objectStore.getFunction(catName, dbName, funcName);
+   }
+ 
+   @Override
+   public List<Function> getAllFunctions(String catName)
+           throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getFunctions(String catName, String dbName, String pattern)
+       throws MetaException {
+     return objectStore.getFunctions(catName, dbName, pattern);
+   }
+ 
+   @Override
+   public AggrStats get_aggr_stats_for(String catName, String dbName,
+       String tblName, List<String> partNames, List<String> colNames)
+       throws MetaException {
+     return null;
+   }
+ 
+   @Override
++  public AggrStats get_aggr_stats_for(String catName, String dbName,
++                                      String tblName, List<String> partNames,
++                                      List<String> colNames,
++                                      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+     return objectStore.getNextNotification(rqst);
+   }
+ 
+   @Override
+   public void addNotificationEvent(NotificationEvent event) throws MetaException {
+     objectStore.addNotificationEvent(event);
+   }
+ 
+   @Override
+   public void cleanNotificationEvents(int olderThan) {
+     objectStore.cleanNotificationEvents(olderThan);
+   }
+ 
+   @Override
+   public CurrentNotificationEventId getCurrentNotificationEventId() {
+     return objectStore.getCurrentNotificationEventId();
+   }
+ 
+   @Override
+   public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) {
+     return  objectStore.getNotificationEventsCount(rqst);
+   }
+ 
+   @Override
+   public void flushCache() {
+     objectStore.flushCache();
+   }
+ 
+   @Override
+   public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
+     return null;
+   }
+ 
+   @Override
+   public void putFileMetadata(
+       List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
+   }
+ 
+   @Override
+   public boolean isFileMetadataSupported() {
+     return false;
+   }
+ 
+ 
+   @Override
+   public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+       ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+   }
+ 
+   @Override
+   public int getTableCount() throws MetaException {
+     return objectStore.getTableCount();
+   }
+ 
+   @Override
+   public int getPartitionCount() throws MetaException {
+     return objectStore.getPartitionCount();
+   }
+ 
+   @Override
+   public int getDatabaseCount() throws MetaException {
+     return objectStore.getDatabaseCount();
+   }
+ 
+   @Override
+   public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
+     return null;
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(String catName, String parent_db_name,
+     String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLUniqueConstraint> getUniqueConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLNotNullConstraint> getNotNullConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLDefaultConstraint> getDefaultConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLCheckConstraint> getCheckConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void dropConstraint(String catName, String dbName, String tableName,
+    String constraintName, boolean missingOk) throws NoSuchObjectException {
+    // TODO Auto-generated method stub
+   }
+ 
+   @Override
+   public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks)
+     throws InvalidObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<String> addForeignKeys(List<SQLForeignKey> fks)
+     throws InvalidObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addDefaultConstraints(List<SQLDefaultConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addCheckConstraints(List<SQLCheckConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public String getMetastoreDbUuid() throws MetaException {
+     throw new MetaException("Get metastore uuid is not implemented");
+   }
+ 
+   @Override
+   public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException {
+     objectStore.createResourcePlan(resourcePlan, copyFrom, defaultPoolSize);
+   }
+ 
+   @Override
+   public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException,
+       MetaException {
+     return objectStore.getResourcePlan(name);
+   }
+ 
+   @Override
+   public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
+     return objectStore.getAllResourcePlans();
+   }
+ 
+   @Override
+   public WMFullResourcePlan alterResourcePlan(String name, WMNullableResourcePlan resourcePlan,
+       boolean canActivateDisabled, boolean canDeactivate, boolean isReplace)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException {
+     return objectStore.alterResourcePlan(
+       name, resourcePlan, canActivateDisabled, canDeactivate, isReplace);
+   }
+ 
+   @Override
+   public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
+     return objectStore.getActiveResourcePlan();
+   }
+ 
+   @Override
+   public WMValidateResourcePlanResponse validateResourcePlan(String name)
+       throws NoSuchObjectException, InvalidObjectException, MetaException {
+     return objectStore.validateResourcePlan(name);
+   }
+ 
+   @Override
+   public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
+     objectStore.dropResourcePlan(name);
+   }
+ 
+   @Override
+   public void createWMTrigger(WMTrigger trigger)
+       throws AlreadyExistsException, MetaException, NoSuchObjectException,
+           InvalidOperationException {
+     objectStore.createWMTrigger(trigger);
+   }
+ 
+   @Override
+   public void alterWMTrigger(WMTrigger trigger)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.alterWMTrigger(trigger);
+   }
+ 
+   @Override
+   public void dropWMTrigger(String resourcePlanName, String triggerName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMTrigger(resourcePlanName, triggerName);
+   }
+ 
+   @Override
+   public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+       throws NoSuchObjectException, MetaException {
+     return objectStore.getTriggersForResourcePlan(resourcePlanName);
+   }
+ 
+   @Override
+   public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+     objectStore.createPool(pool);
+   }
+ 
+   @Override
+   public void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException,
+       NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.alterPool(pool, poolPath);
+   }
+ 
+   @Override
+   public void dropWMPool(String resourcePlanName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMPool(resourcePlanName, poolPath);
+   }
+ 
+   @Override
+   public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+       MetaException {
+     objectStore.createOrUpdateWMMapping(mapping, update);
+   }
+ 
+   @Override
+   public void dropWMMapping(WMMapping mapping)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMMapping(mapping);
+   }
+ 
+   @Override
+   public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+     objectStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+   }
+ 
+   @Override
+   public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+   }
+ 
+ 
+   @Override
+   public List<ColStatsObjWithSourceInfo> getPartitionColStatsForDatabase(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
+       NoSuchObjectException {
+     objectStore.createISchema(schema);
+   }
+ 
+   @Override
+   public void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException,
+       MetaException {
+     objectStore.alterISchema(schemaName, newSchema);
+   }
+ 
+   @Override
+   public ISchema getISchema(ISchemaName schemaName) throws MetaException {
+     return objectStore.getISchema(schemaName);
+   }
+ 
+   @Override
+   public void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException {
+     objectStore.dropISchema(schemaName);
+   }
+ 
+   @Override
+   public void addSchemaVersion(SchemaVersion schemaVersion) throws
+       AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException {
+     objectStore.addSchemaVersion(schemaVersion);
+   }
+ 
+   @Override
+   public void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion) throws
+       NoSuchObjectException, MetaException {
+     objectStore.alterSchemaVersion(version, newVersion);
+   }
+ 
+   @Override
+   public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException {
+     return objectStore.getSchemaVersion(version);
+   }
+ 
+   @Override
+   public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return objectStore.getLatestSchemaVersion(schemaName);
+   }
+ 
+   @Override
+   public List<SchemaVersion> getAllSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return objectStore.getAllSchemaVersion(schemaName);
+   }
+ 
+   @Override
+   public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                         String type) throws MetaException {
+     return objectStore.getSchemaVersionsByColumns(colName, colNamespace, type);
+   }
+ 
+   @Override
+   public void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException,
+       MetaException {
+     objectStore.dropSchemaVersion(version);
+   }
+ 
+   @Override
+   public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException {
+     return objectStore.getSerDeInfo(serDeName);
+   }
+ 
+   @Override
+   public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+     objectStore.addSerde(serde);
+   }
+ 
+   @Override
+   public void addRuntimeStat(RuntimeStat stat) throws MetaException {
+     objectStore.addRuntimeStat(stat);
+   }
+ 
+   @Override
+   public List<RuntimeStat> getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException {
+     return objectStore.getRuntimeStats(maxEntries, maxCreateTime);
+   }
+ 
+   @Override
+   public int deleteRuntimeStats(int maxRetainSecs) throws MetaException {
+     return objectStore.deleteRuntimeStats(maxRetainSecs);
+   }
+ 
+   @Override
+   public void cleanWriteNotificationEvents(int olderThan) {
+     objectStore.cleanWriteNotificationEvents(olderThan);
+   }
+ 
+   @Override
+   public List<WriteEventInfo> getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException {
+     return objectStore.getAllWriteEventInfo(txnId, dbName, tableName);
+   }
+ 
+   @Override
+   public List<TableName> getTableNamesWithStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public List<TableName> getAllTableNamesForStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public Map<String, List<String>> getPartitionColsWithStats(String catName,
+       String dbName, String tableName) throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ }


[33/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 0000000,9b79446..8270f6a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@@ -1,0 -1,1212 +1,1249 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.ISchemaName;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+ 
+ import java.nio.ByteBuffer;
+ import java.util.Collections;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.ISchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMTrigger;
+ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+ import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.TableMeta;
+ import org.apache.hadoop.hive.metastore.api.Type;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+ import org.apache.hadoop.hive.metastore.api.WMMapping;
+ import org.apache.hadoop.hive.metastore.api.WMPool;
+ import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.thrift.TException;
+ import org.junit.Assert;
+ 
+ /**
+  *
+  * DummyRawStoreForJdoConnection.
+  *
+  * An implementation of RawStore that verifies the DummyJdoConnectionUrlHook has already been
+  * applied when this class's setConf method is called, by checking that the value of the
+  * METASTORECONNECTURLKEY ConfVar has been updated.
+  *
+  * All non-void methods return default values.
+  */
+ public class DummyRawStoreForJdoConnection implements RawStore {
+ 
+   @Override
+   public Configuration getConf() {
+ 
+     return null;
+   }
+ 
+   @Override
+   public void setConf(Configuration arg0) {
+     String expected = DummyJdoConnectionUrlHook.newUrl;
+     String actual = MetastoreConf.getVar(arg0, MetastoreConf.ConfVars.CONNECT_URL_KEY);
+ 
+     Assert.assertEquals("The expected URL used by JDO to connect to the metastore: " + expected +
+         " did not match the actual value when the Raw Store was initialized: " + actual,
+         expected, actual);
+   }
+ 
+   @Override
+   public void shutdown() {
+ 
+ 
+   }
+ 
+   @Override
+   public boolean openTransaction() {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean commitTransaction() {
+     return false;
+   }
+ 
+   @Override
+   public boolean isActiveTransaction() {
+     return false;
+   }
+ 
+   @Override
+   public void rollbackTransaction() {
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+ 
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat) throws MetaException,
+       InvalidOperationException {
+ 
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+ 
+ 
+   }
+ 
+   @Override
+   public Database getDatabase(String catName, String name) throws NoSuchObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbname) throws NoSuchObjectException, MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean alterDatabase(String catName, String dbname, Database db) throws NoSuchObjectException,
+       MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+ 
+     return false;
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+ 
+     return null;
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+ 
+     return false;
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+ 
+ 
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tableName) throws MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
++  public Table getTable(String catalogName, String dbName, String tableName,
++                        long txnid, String writeIdList) throws MetaException {
++    return null;
++  }
++
++  @Override
+   public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tableName, List<String> part_vals)
+       throws MetaException, NoSuchObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
++  public Partition getPartition(String catName, String dbName, String tableName, List<String> part_vals,
++                                long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public boolean dropPartition(String catName, String dbName, String tableName, List<String> part_vals)
+       throws MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public List<Partition> getPartitions(String catName, String dbName, String tableName, int max)
+       throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max) {
+     return Collections.emptyMap();
+   }
+ 
+   @Override
 -  public void alterTable(String catName, String dbname, String name, Table newTable)
++  public void alterTable(String catName, String dbname, String name, Table newTable, long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException {
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames, List<String> tableTypes)
+       throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbname, List<String> tableNames)
+       throws MetaException, UnknownDBException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter, short max_tables)
+       throws MetaException, UnknownDBException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String db_name, String tbl_name, short max_parts)
+       throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(String catName, String db_name,
+                                                      String tbl_name, List<FieldSchema> cols,
+                                                      boolean applyDistinct, String filter,
+                                                      boolean ascending, List<FieldSchema> order,
+                                                      long maxParts) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
 -      Partition new_part) throws InvalidObjectException, MetaException {
++      Partition new_part, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public void alterPartitions(String catName, String db_name, String tbl_name,
 -                              List<List<String>> part_vals_list, List<Partition> new_parts)
 -      throws InvalidObjectException, MetaException {
 -
 -
++                              List<List<String>> part_vals_list, List<Partition> new_parts,
++                              long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByFilter(String catName, String dbName, String tblName,
+                                                String filter, short maxParts)
+       throws MetaException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
+       String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+     return false;
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter)
+     throws MetaException, NoSuchObjectException {
+     return -1;
+   }
+ 
+   @Override
+   public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr)
+       throws MetaException, NoSuchObjectException {
+     return -1;
+   }
+ 
+   @Override
+   public Table markPartitionForEvent(String catName, String dbName, String tblName, Map<String, String> partVals,
+       PartitionEventType evtType) throws MetaException, UnknownTableException,
+       InvalidPartitionException, UnknownPartitionException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partName, PartitionEventType evtType) throws MetaException,
+       UnknownTableException, InvalidPartitionException, UnknownPartitionException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor,
+       PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException,
+       InvalidObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
+       throws MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName,
+       String userName, List<String> groupNames) throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName,
+       String partition, String userName, List<String> groupNames) throws InvalidObjectException,
+       MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName,
+       String partitionName, String columnName, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+       PrincipalType principalType) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, List<String> partValues,
+       String partName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, String columnName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, List<String> partVals,
+       String partName, String columnName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return false;
+   }
+ 
+   @Override
+   public Role getRole(String roleName) throws NoSuchObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public List<String> listRoleNames() {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Role> listRoles(String principalName, PrincipalType principalType) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+                                                       PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+     return null;
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List<String> partVals,
+       String user_name, List<String> group_names) throws MetaException, NoSuchObjectException,
+       InvalidObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts,
+       String userName, List<String> groupNames) throws MetaException, NoSuchObjectException,
+       InvalidObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesPs(String catName, String db_name, String tbl_name, List<String> part_vals,
+       short max_parts) throws MetaException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsPsWithAuth(String catName, String db_name, String tbl_name,
+       List<String> part_vals, short max_parts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public long cleanupEvents() {
+ 
+     return 0;
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) {
+     return false;
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) {
+     return false;
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) {
+     return null;
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) {
+     return 0;
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key) {
+   }
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) {
+     return false;
+   }
+ 
+   @Override
+   public String[] getMasterKeys() {
+     return new String[0];
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listDBGrantsAll(String catName, String dbName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableGrantsAll(String catName, String dbName, String tableName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public  ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+       List<String> colName) throws MetaException, NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
++  public ColumnStatistics getTableColumnStatistics(
++      String catName, String dbName, String tableName, List<String> colName,
++      long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
+                                              String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException {
+     return false;
+   }
+ 
+ 
+   @Override
+   public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
+     String partName, List<String> partVals, String colName)
+     throws NoSuchObjectException, MetaException, InvalidObjectException,
+     InvalidInputException {
+     return false;
+ 
+   }
+ 
+   @Override
 -  public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
++  public boolean updateTableColumnStatistics(ColumnStatistics statsObj,
++      long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException {
+     return false;
+   }
+ 
+   @Override
 -  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals)
++  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals,
++      long txnId, String validWriteIds, long writeId)
+     throws NoSuchObjectException, MetaException, InvalidObjectException {
+     return false;
+   }
+ 
+   @Override
+   public void verifySchema() throws MetaException {
+   }
+ 
+   @Override
+   public String getMetaStoreSchemaVersion() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException {
+   }
+ 
+   @Override
+   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName,
+       String tblName, List<String> colNames, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
++  public List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return Collections.emptyList();
++  }
++
++  @Override
+   public boolean doesPartitionExist(String catName, String dbName, String tableName,
+       List<FieldSchema> partKeys, List<String> partVals)
+       throws MetaException, NoSuchObjectException {
+     return false;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException {
+     return false;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+     return false;
+   }
+ 
+   @Override
+   public void dropPartitions(String catName, String dbName, String tblName, List<String> partNames) {
+   }
+ 
+   @Override
+   public void createFunction(Function func) throws InvalidObjectException,
+       MetaException {
+   }
+ 
+   @Override
+   public void alterFunction(String catName, String dbName, String funcName, Function newFunction)
+       throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public void dropFunction(String catName, String dbName, String funcName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException,
+       InvalidInputException {
+   }
+ 
+   @Override
+   public Function getFunction(String catName, String dbName, String funcName)
+       throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<Function> getAllFunctions(String catName)
+           throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getFunctions(String catName, String dbName, String pattern)
+       throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public AggrStats get_aggr_stats_for(String catName, String dbName,
+       String tblName, List<String> partNames, List<String> colNames)
+       throws MetaException {
+     return null;
+   }
+ 
+   @Override
++  public AggrStats get_aggr_stats_for(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+     return null;
+   }
+ 
+   @Override
+   public void addNotificationEvent(NotificationEvent event) throws MetaException {
+ 
+   }
+ 
+   @Override
+   public void cleanNotificationEvents(int olderThan) {
+ 
+   }
+ 
+   @Override
+   public CurrentNotificationEventId getCurrentNotificationEventId() {
+     return null;
+   }
+ 
+   @Override
+   public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) {
+     return null;
+   }
+ 
+   @Override
+   public void flushCache() {
+ 
+   }
+ 
+   @Override
+   public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
+     return null;
+   }
+ 
+   @Override
+   public void putFileMetadata(
+       List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
+   }
+ 
+   @Override
+   public boolean isFileMetadataSupported() {
+     return false;
+   }
+ 
+   @Override
+   public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+       ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+   }
+ 
+   @Override
+   public int getTableCount() throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public int getPartitionCount() throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public int getDatabaseCount() throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
+     return null;
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(String catName, String parent_db_name,
+     String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLUniqueConstraint> getUniqueConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLNotNullConstraint> getNotNullConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLDefaultConstraint> getDefaultConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLCheckConstraint> getCheckConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void dropConstraint(String catName, String dbName, String tableName,
+   String constraintName, boolean missingOk) throws NoSuchObjectException {
+     // TODO Auto-generated method stub
+   }
+ 
+   @Override
+   public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addForeignKeys(List<SQLForeignKey> fks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addDefaultConstraints(List<SQLDefaultConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addCheckConstraints(List<SQLCheckConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public String getMetastoreDbUuid() throws MetaException {
+     throw new MetaException("Get metastore uuid is not implemented");
+   }
+ 
+   @Override
+   public void createResourcePlan(
+       WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize) throws MetaException {
+   }
+ 
+   @Override
+   public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public WMFullResourcePlan alterResourcePlan(
+       String name, WMNullableResourcePlan resourcePlan, boolean canActivateDisabled, boolean canDeactivate,
+       boolean isReplace)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public WMValidateResourcePlanResponse validateResourcePlan(String name)
+       throws NoSuchObjectException, InvalidObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
+   }
+ 
+   @Override
+   public void createWMTrigger(WMTrigger trigger) throws MetaException {
+   }
+ 
+   @Override
+   public void alterWMTrigger(WMTrigger trigger)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void dropWMTrigger(String resourcePlanName, String triggerName)
+       throws NoSuchObjectException, MetaException {
+   }
+ 
+   @Override
+   public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+       throws NoSuchObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException,
+       NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void dropWMPool(String resourcePlanName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+       MetaException {
+   }
+ 
+   @Override
+   public void dropWMMapping(WMMapping mapping)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public List<MetaStoreUtils.ColStatsObjWithSourceInfo> getPartitionColStatsForDatabase(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException,
+       MetaException {
+ 
+   }
+ 
+   @Override
+   public ISchema getISchema(ISchemaName schemaName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void addSchemaVersion(SchemaVersion schemaVersion) throws
+       AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion) throws
+       NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<SchemaVersion> getAllSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                         String type) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException,
+       MetaException {
+ 
+   }
+ 
+   @Override
+   public SerDeInfo getSerDeInfo(String serDeName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void addRuntimeStat(RuntimeStat stat) throws MetaException {
+   }
+ 
+   @Override
+   public List<RuntimeStat> getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public int deleteRuntimeStats(int maxRetainSecs) throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public List<TableName> getTableNamesWithStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public List<TableName> getAllTableNamesForStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public Map<String, List<String>> getPartitionColsWithStats(String catName,
+       String dbName, String tableName) throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public void cleanWriteNotificationEvents(int olderThan) {
+   }
+ 
+   @Override
+   public List<WriteEventInfo> getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException {
+     return null;
+   }
+ }


[03/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query8.q.out b/ql/src/test/results/clientpositive/perf/tez/query8.q.out
index 26c7d8b..ee20e61 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query8.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query8.q.out
@@ -234,150 +234,152 @@ Stage-0
     limit:100
     Stage-1
       Reducer 5 vectorized
-      File Output Operator [FS_150]
-        Limit [LIM_149] (rows=100 width=88)
+      File Output Operator [FS_151]
+        Limit [LIM_150] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_148] (rows=348477374 width=88)
+          Select Operator [SEL_149] (rows=348477374 width=88)
             Output:["_col0","_col1"]
           <-Reducer 4 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_147]
-              Group By Operator [GBY_146] (rows=348477374 width=88)
+            SHUFFLE [RS_148]
+              Group By Operator [GBY_147] (rows=348477374 width=88)
                 Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
               <-Reducer 3 [SIMPLE_EDGE]
                 SHUFFLE [RS_57]
                   PartitionCols:_col0
                   Group By Operator [GBY_56] (rows=696954748 width=88)
                     Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col8
-                    Merge Join Operator [MERGEJOIN_117] (rows=696954748 width=88)
-                      Conds:RS_52._col1=RS_53._col1(Inner),Output:["_col2","_col8"]
-                    <-Reducer 12 [SIMPLE_EDGE]
-                      SHUFFLE [RS_53]
-                        PartitionCols:_col1
-                        Merge Join Operator [MERGEJOIN_116] (rows=1874 width=1911)
-                          Conds:RS_137.substr(_col0, 1, 2)=RS_140.substr(_col2, 1, 2)(Inner),Output:["_col1","_col2"]
-                        <-Map 19 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_140]
-                            PartitionCols:substr(_col2, 1, 2)
-                            Select Operator [SEL_139] (rows=1704 width=1910)
-                              Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_138] (rows=1704 width=1910)
-                                predicate:(s_store_sk is not null and substr(s_zip, 1, 2) is not null)
-                                TableScan [TS_42] (rows=1704 width=1910)
-                                  default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name","s_zip"]
-                        <-Reducer 11 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_137]
-                            PartitionCols:substr(_col0, 1, 2)
-                            Select Operator [SEL_136] (rows=1 width=1014)
-                              Output:["_col0"]
-                              Filter Operator [FIL_135] (rows=1 width=1014)
-                                predicate:(_col1 = 2L)
-                                Group By Operator [GBY_134] (rows=6833333 width=1014)
-                                  Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
-                                <-Union 10 [SIMPLE_EDGE]
-                                  <-Reducer 17 [CONTAINS] vectorized
-                                    Reduce Output Operator [RS_171]
-                                      PartitionCols:_col0
-                                      Group By Operator [GBY_170] (rows=13666666 width=1014)
-                                        Output:["_col0","_col1"],aggregations:["count(_col1)"],keys:_col0
-                                        Group By Operator [GBY_169] (rows=3666666 width=1014)
-                                          Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
-                                        <-Reducer 16 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_168]
-                                            PartitionCols:_col0
-                                            Group By Operator [GBY_167] (rows=7333333 width=1014)
-                                              Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
-                                              Select Operator [SEL_166] (rows=7333333 width=1014)
-                                                Output:["_col0"]
-                                                Filter Operator [FIL_165] (rows=7333333 width=1014)
-                                                  predicate:(_col1 > 10L)
-                                                  Group By Operator [GBY_164] (rows=22000000 width=1014)
-                                                    Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
-                                                  <-Reducer 15 [SIMPLE_EDGE]
-                                                    SHUFFLE [RS_25]
-                                                      PartitionCols:_col0
-                                                      Group By Operator [GBY_24] (rows=44000000 width=1014)
-                                                        Output:["_col0","_col1"],aggregations:["count()"],keys:_col1
-                                                        Merge Join Operator [MERGEJOIN_115] (rows=44000000 width=1014)
-                                                          Conds:RS_160._col0=RS_163._col0(Inner),Output:["_col1"]
-                                                        <-Map 14 [SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_160]
-                                                            PartitionCols:_col0
-                                                            Select Operator [SEL_159] (rows=40000000 width=1014)
-                                                              Output:["_col0","_col1"]
-                                                              Filter Operator [FIL_158] (rows=40000000 width=1014)
-                                                                predicate:(ca_address_sk is not null and substr(substr(ca_zip, 1, 5), 1, 2) is not null)
-                                                                TableScan [TS_14] (rows=40000000 width=1014)
-                                                                  default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_zip"]
-                                                        <-Map 18 [SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_163]
-                                                            PartitionCols:_col0
-                                                            Select Operator [SEL_162] (rows=40000000 width=860)
-                                                              Output:["_col0"]
-                                                              Filter Operator [FIL_161] (rows=40000000 width=860)
-                                                                predicate:((c_preferred_cust_flag = 'Y') and c_current_addr_sk is not null)
-                                                                TableScan [TS_17] (rows=80000000 width=860)
-                                                                  default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_current_addr_sk","c_preferred_cust_flag"]
-                                  <-Reducer 9 [CONTAINS] vectorized
-                                    Reduce Output Operator [RS_157]
-                                      PartitionCols:_col0
-                                      Group By Operator [GBY_156] (rows=13666666 width=1014)
-                                        Output:["_col0","_col1"],aggregations:["count(_col1)"],keys:_col0
-                                        Group By Operator [GBY_155] (rows=10000000 width=1014)
-                                          Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
-                                        <-Map 8 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_154]
-                                            PartitionCols:_col0
-                                            Group By Operator [GBY_153] (rows=20000000 width=1014)
-                                              Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
-                                              Select Operator [SEL_152] (rows=20000000 width=1014)
-                                                Output:["_col0"]
-                                                Filter Operator [FIL_151] (rows=20000000 width=1014)
-                                                  predicate:((substr(ca_zip, 1, 5)) IN ('89436', '30868', '65085', '22977', '83927', '77557', '58429', '40697', '80614', '10502', '32779', '91137', '61265', '98294', '17921', '18427', '21203', '59362', '87291', '84093', '21505', '17184', '10866', '67898', '25797', '28055', '18377', '80332', '74535', '21757', '29742', '90885', '29898', '17819', '40811', '25990', '47513', '89531', '91068', '10391', '18846', '99223', '82637', '41368', '83658', '86199', '81625', '26696', '89338', '88425', '32200', '81427', '19053', '77471', '36610', '99823', '43276', '41249', '48584', '83550', '82276', '18842', '78890', '14090', '38123', '40936', '34425', '19850', '43286', '80072', '79188', '54191', '11395', '50497', '84861', '90733', '21068', '57666', '37119', '25004', '57835', '70067', '62878', '95806', '19303', '18840', '19124', '29785', '16737', '16022', '49613', '89977', '68310', '60069', '98360', '48649', '39050', '41793', '25002', '27413', '39736', 
 '47208', '16515', '94808', '57648', '15009', '80015', '42961', '63982', '21744', '71853', '81087', '67468', '34175', '64008', '20261', '11201', '51799', '48043', '45645', '61163', '48375', '36447', '57042', '21218', '41100', '89951', '22745', '35851', '83326', '61125', '78298', '80752', '49858', '52940', '96976', '63792', '11376', '53582', '18717', '90226', '50530', '94203', '99447', '27670', '96577', '57856', '56372', '16165', '23427', '54561', '28806', '44439', '22926', '30123', '61451', '92397', '56979', '92309', '70873', '13355', '21801', '46346', '37562', '56458', '28286', '47306', '99555', '69399', '26234', '47546', '49661', '88601', '35943', '39936', '25632', '24611', '44166', '56648', '30379', '59785', '11110', '14329', '93815', '52226', '71381', '13842', '25612', '63294', '14664', '21077', '82626', '18799', '60915', '81020', '56447', '76619', '11433', '13414', '42548', '92713', '70467', '30884', '47484', '16072', '38936', '13036', '88376', '45539', '35901', '19506', '65690'
 , '73957', '71850', '49231', '14276', '20005', '18384', '76615', '11635', '38177', '55607', '41369', '95447', '58581', '58149', '91946', '33790', '76232', '75692', '95464', '22246', '51061', '56692', '53121', '77209', '15482', '10688', '14868', '45907', '73520', '72666', '25734', '17959', '24677', '66446', '94627', '53535', '15560', '41967', '69297', '11929', '59403', '33283', '52232', '57350', '43933', '40921', '36635', '10827', '71286', '19736', '80619', '25251', '95042', '15526', '36496', '55854', '49124', '81980', '35375', '49157', '63512', '28944', '14946', '36503', '54010', '18767', '23969', '43905', '66979', '33113', '21286', '58471', '59080', '13395', '79144', '70373', '67031', '38360', '26705', '50906', '52406', '26066', '73146', '15884', '31897', '30045', '61068', '45550', '92454', '13376', '14354', '19770', '22928', '97790', '50723', '46081', '30202', '14410', '20223', '88500', '67298', '13261', '14172', '81410', '93578', '83583', '46047', '94167', '82564', '21156', '1579
 9', '86709', '37931', '74703', '83103', '23054', '70470', '72008', '49247', '91911', '69998', '20961', '70070', '63197', '54853', '88191', '91830', '49521', '19454', '81450', '89091', '62378', '25683', '61869', '51744', '36580', '85778', '36871', '48121', '28810', '83712', '45486', '67393', '26935', '42393', '20132', '55349', '86057', '21309', '80218', '10094', '11357', '48819', '39734', '40758', '30432', '21204', '29467', '30214', '61024', '55307', '74621', '11622', '68908', '33032', '52868', '99194', '99900', '84936', '69036', '99149', '45013', '32895', '59004', '32322', '14933', '32936', '33562', '72550', '27385', '58049', '58200', '16808', '21360', '32961', '18586', '79307', '15492') and substr(substr(ca_zip, 1, 5), 1, 2) is not null)
-                                                  TableScan [TS_6] (rows=40000000 width=1014)
-                                                    default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_zip"]
-                    <-Reducer 2 [SIMPLE_EDGE]
-                      SHUFFLE [RS_52]
-                        PartitionCols:_col1
-                        Merge Join Operator [MERGEJOIN_114] (rows=633595212 width=88)
-                          Conds:RS_145._col0=RS_128._col0(Inner),Output:["_col1","_col2"]
-                        <-Map 6 [SIMPLE_EDGE] vectorized
-                          PARTITION_ONLY_SHUFFLE [RS_128]
-                            PartitionCols:_col0
-                            Select Operator [SEL_127] (rows=18262 width=1119)
-                              Output:["_col0"]
-                              Filter Operator [FIL_126] (rows=18262 width=1119)
-                                predicate:((d_qoy = 1) and (d_year = 2002) and d_date_sk is not null)
-                                TableScan [TS_3] (rows=73049 width=1119)
-                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                        <-Map 1 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_145]
-                            PartitionCols:_col0
-                            Select Operator [SEL_144] (rows=575995635 width=88)
-                              Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_143] (rows=575995635 width=88)
-                                predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_50_date_dim_d_date_sk_min) AND DynamicValue(RS_50_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_50_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_53_store_s_store_sk_min) AND DynamicValue(RS_53_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_53_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
-                                TableScan [TS_0] (rows=575995635 width=88)
-                                  default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_store_sk","ss_net_profit"]
-                                <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                  BROADCAST [RS_142]
-                                    Group By Operator [GBY_141] (rows=1 width=12)
-                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                    <-Reducer 12 [CUSTOM_SIMPLE_EDGE]
-                                      SHUFFLE [RS_91]
-                                        Group By Operator [GBY_90] (rows=1 width=12)
-                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                          Select Operator [SEL_89] (rows=1874 width=1911)
-                                            Output:["_col0"]
-                                             Please refer to the previous Merge Join Operator [MERGEJOIN_116]
-                                <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                  BROADCAST [RS_133]
-                                    Group By Operator [GBY_132] (rows=1 width=12)
-                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                    <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_131]
-                                        Group By Operator [GBY_130] (rows=1 width=12)
-                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                          Select Operator [SEL_129] (rows=18262 width=1119)
-                                            Output:["_col0"]
-                                             Please refer to the previous Select Operator [SEL_127]
+                    Top N Key Operator [TNK_84] (rows=696954748 width=88)
+                      keys:_col8,sort order:+,top n:100
+                      Merge Join Operator [MERGEJOIN_118] (rows=696954748 width=88)
+                        Conds:RS_52._col1=RS_53._col1(Inner),Output:["_col2","_col8"]
+                      <-Reducer 12 [SIMPLE_EDGE]
+                        SHUFFLE [RS_53]
+                          PartitionCols:_col1
+                          Merge Join Operator [MERGEJOIN_117] (rows=1874 width=1911)
+                            Conds:RS_138.substr(_col0, 1, 2)=RS_141.substr(_col2, 1, 2)(Inner),Output:["_col1","_col2"]
+                          <-Map 19 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_141]
+                              PartitionCols:substr(_col2, 1, 2)
+                              Select Operator [SEL_140] (rows=1704 width=1910)
+                                Output:["_col0","_col1","_col2"]
+                                Filter Operator [FIL_139] (rows=1704 width=1910)
+                                  predicate:(s_store_sk is not null and substr(s_zip, 1, 2) is not null)
+                                  TableScan [TS_42] (rows=1704 width=1910)
+                                    default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name","s_zip"]
+                          <-Reducer 11 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_138]
+                              PartitionCols:substr(_col0, 1, 2)
+                              Select Operator [SEL_137] (rows=1 width=1014)
+                                Output:["_col0"]
+                                Filter Operator [FIL_136] (rows=1 width=1014)
+                                  predicate:(_col1 = 2L)
+                                  Group By Operator [GBY_135] (rows=6833333 width=1014)
+                                    Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
+                                  <-Union 10 [SIMPLE_EDGE]
+                                    <-Reducer 17 [CONTAINS] vectorized
+                                      Reduce Output Operator [RS_172]
+                                        PartitionCols:_col0
+                                        Group By Operator [GBY_171] (rows=13666666 width=1014)
+                                          Output:["_col0","_col1"],aggregations:["count(_col1)"],keys:_col0
+                                          Group By Operator [GBY_170] (rows=3666666 width=1014)
+                                            Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
+                                          <-Reducer 16 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_169]
+                                              PartitionCols:_col0
+                                              Group By Operator [GBY_168] (rows=7333333 width=1014)
+                                                Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
+                                                Select Operator [SEL_167] (rows=7333333 width=1014)
+                                                  Output:["_col0"]
+                                                  Filter Operator [FIL_166] (rows=7333333 width=1014)
+                                                    predicate:(_col1 > 10L)
+                                                    Group By Operator [GBY_165] (rows=22000000 width=1014)
+                                                      Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
+                                                    <-Reducer 15 [SIMPLE_EDGE]
+                                                      SHUFFLE [RS_25]
+                                                        PartitionCols:_col0
+                                                        Group By Operator [GBY_24] (rows=44000000 width=1014)
+                                                          Output:["_col0","_col1"],aggregations:["count()"],keys:_col1
+                                                          Merge Join Operator [MERGEJOIN_116] (rows=44000000 width=1014)
+                                                            Conds:RS_161._col0=RS_164._col0(Inner),Output:["_col1"]
+                                                          <-Map 14 [SIMPLE_EDGE] vectorized
+                                                            SHUFFLE [RS_161]
+                                                              PartitionCols:_col0
+                                                              Select Operator [SEL_160] (rows=40000000 width=1014)
+                                                                Output:["_col0","_col1"]
+                                                                Filter Operator [FIL_159] (rows=40000000 width=1014)
+                                                                  predicate:(ca_address_sk is not null and substr(substr(ca_zip, 1, 5), 1, 2) is not null)
+                                                                  TableScan [TS_14] (rows=40000000 width=1014)
+                                                                    default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_zip"]
+                                                          <-Map 18 [SIMPLE_EDGE] vectorized
+                                                            SHUFFLE [RS_164]
+                                                              PartitionCols:_col0
+                                                              Select Operator [SEL_163] (rows=40000000 width=860)
+                                                                Output:["_col0"]
+                                                                Filter Operator [FIL_162] (rows=40000000 width=860)
+                                                                  predicate:((c_preferred_cust_flag = 'Y') and c_current_addr_sk is not null)
+                                                                  TableScan [TS_17] (rows=80000000 width=860)
+                                                                    default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_current_addr_sk","c_preferred_cust_flag"]
+                                    <-Reducer 9 [CONTAINS] vectorized
+                                      Reduce Output Operator [RS_158]
+                                        PartitionCols:_col0
+                                        Group By Operator [GBY_157] (rows=13666666 width=1014)
+                                          Output:["_col0","_col1"],aggregations:["count(_col1)"],keys:_col0
+                                          Group By Operator [GBY_156] (rows=10000000 width=1014)
+                                            Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
+                                          <-Map 8 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_155]
+                                              PartitionCols:_col0
+                                              Group By Operator [GBY_154] (rows=20000000 width=1014)
+                                                Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
+                                                Select Operator [SEL_153] (rows=20000000 width=1014)
+                                                  Output:["_col0"]
+                                                  Filter Operator [FIL_152] (rows=20000000 width=1014)
+                                                    predicate:((substr(ca_zip, 1, 5)) IN ('89436', '30868', '65085', '22977', '83927', '77557', '58429', '40697', '80614', '10502', '32779', '91137', '61265', '98294', '17921', '18427', '21203', '59362', '87291', '84093', '21505', '17184', '10866', '67898', '25797', '28055', '18377', '80332', '74535', '21757', '29742', '90885', '29898', '17819', '40811', '25990', '47513', '89531', '91068', '10391', '18846', '99223', '82637', '41368', '83658', '86199', '81625', '26696', '89338', '88425', '32200', '81427', '19053', '77471', '36610', '99823', '43276', '41249', '48584', '83550', '82276', '18842', '78890', '14090', '38123', '40936', '34425', '19850', '43286', '80072', '79188', '54191', '11395', '50497', '84861', '90733', '21068', '57666', '37119', '25004', '57835', '70067', '62878', '95806', '19303', '18840', '19124', '29785', '16737', '16022', '49613', '89977', '68310', '60069', '98360', '48649', '39050', '41793', '25002', '27413', '39736'
 , '47208', '16515', '94808', '57648', '15009', '80015', '42961', '63982', '21744', '71853', '81087', '67468', '34175', '64008', '20261', '11201', '51799', '48043', '45645', '61163', '48375', '36447', '57042', '21218', '41100', '89951', '22745', '35851', '83326', '61125', '78298', '80752', '49858', '52940', '96976', '63792', '11376', '53582', '18717', '90226', '50530', '94203', '99447', '27670', '96577', '57856', '56372', '16165', '23427', '54561', '28806', '44439', '22926', '30123', '61451', '92397', '56979', '92309', '70873', '13355', '21801', '46346', '37562', '56458', '28286', '47306', '99555', '69399', '26234', '47546', '49661', '88601', '35943', '39936', '25632', '24611', '44166', '56648', '30379', '59785', '11110', '14329', '93815', '52226', '71381', '13842', '25612', '63294', '14664', '21077', '82626', '18799', '60915', '81020', '56447', '76619', '11433', '13414', '42548', '92713', '70467', '30884', '47484', '16072', '38936', '13036', '88376', '45539', '35901', '19506', '6569
 0', '73957', '71850', '49231', '14276', '20005', '18384', '76615', '11635', '38177', '55607', '41369', '95447', '58581', '58149', '91946', '33790', '76232', '75692', '95464', '22246', '51061', '56692', '53121', '77209', '15482', '10688', '14868', '45907', '73520', '72666', '25734', '17959', '24677', '66446', '94627', '53535', '15560', '41967', '69297', '11929', '59403', '33283', '52232', '57350', '43933', '40921', '36635', '10827', '71286', '19736', '80619', '25251', '95042', '15526', '36496', '55854', '49124', '81980', '35375', '49157', '63512', '28944', '14946', '36503', '54010', '18767', '23969', '43905', '66979', '33113', '21286', '58471', '59080', '13395', '79144', '70373', '67031', '38360', '26705', '50906', '52406', '26066', '73146', '15884', '31897', '30045', '61068', '45550', '92454', '13376', '14354', '19770', '22928', '97790', '50723', '46081', '30202', '14410', '20223', '88500', '67298', '13261', '14172', '81410', '93578', '83583', '46047', '94167', '82564', '21156', '15
 799', '86709', '37931', '74703', '83103', '23054', '70470', '72008', '49247', '91911', '69998', '20961', '70070', '63197', '54853', '88191', '91830', '49521', '19454', '81450', '89091', '62378', '25683', '61869', '51744', '36580', '85778', '36871', '48121', '28810', '83712', '45486', '67393', '26935', '42393', '20132', '55349', '86057', '21309', '80218', '10094', '11357', '48819', '39734', '40758', '30432', '21204', '29467', '30214', '61024', '55307', '74621', '11622', '68908', '33032', '52868', '99194', '99900', '84936', '69036', '99149', '45013', '32895', '59004', '32322', '14933', '32936', '33562', '72550', '27385', '58049', '58200', '16808', '21360', '32961', '18586', '79307', '15492') and substr(substr(ca_zip, 1, 5), 1, 2) is not null)
+                                                    TableScan [TS_6] (rows=40000000 width=1014)
+                                                      default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_zip"]
+                      <-Reducer 2 [SIMPLE_EDGE]
+                        SHUFFLE [RS_52]
+                          PartitionCols:_col1
+                          Merge Join Operator [MERGEJOIN_115] (rows=633595212 width=88)
+                            Conds:RS_146._col0=RS_129._col0(Inner),Output:["_col1","_col2"]
+                          <-Map 6 [SIMPLE_EDGE] vectorized
+                            PARTITION_ONLY_SHUFFLE [RS_129]
+                              PartitionCols:_col0
+                              Select Operator [SEL_128] (rows=18262 width=1119)
+                                Output:["_col0"]
+                                Filter Operator [FIL_127] (rows=18262 width=1119)
+                                  predicate:((d_qoy = 1) and (d_year = 2002) and d_date_sk is not null)
+                                  TableScan [TS_3] (rows=73049 width=1119)
+                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                          <-Map 1 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_146]
+                              PartitionCols:_col0
+                              Select Operator [SEL_145] (rows=575995635 width=88)
+                                Output:["_col0","_col1","_col2"]
+                                Filter Operator [FIL_144] (rows=575995635 width=88)
+                                  predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_50_date_dim_d_date_sk_min) AND DynamicValue(RS_50_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_50_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_53_store_s_store_sk_min) AND DynamicValue(RS_53_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_53_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
+                                  TableScan [TS_0] (rows=575995635 width=88)
+                                    default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_store_sk","ss_net_profit"]
+                                  <-Reducer 13 [BROADCAST_EDGE] vectorized
+                                    BROADCAST [RS_143]
+                                      Group By Operator [GBY_142] (rows=1 width=12)
+                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                      <-Reducer 12 [CUSTOM_SIMPLE_EDGE]
+                                        SHUFFLE [RS_92]
+                                          Group By Operator [GBY_91] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                            Select Operator [SEL_90] (rows=1874 width=1911)
+                                              Output:["_col0"]
+                                               Please refer to the previous Merge Join Operator [MERGEJOIN_117]
+                                  <-Reducer 7 [BROADCAST_EDGE] vectorized
+                                    BROADCAST [RS_134]
+                                      Group By Operator [GBY_133] (rows=1 width=12)
+                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                      <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_132]
+                                          Group By Operator [GBY_131] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                            Select Operator [SEL_130] (rows=18262 width=1119)
+                                              Output:["_col0"]
+                                               Please refer to the previous Select Operator [SEL_128]
 


[20/54] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - more tests (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 913bbf0..d098dba 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -768,6 +768,13 @@ class Iface(fb303.FacebookService.Iface):
     """
     pass
 
+  def rename_partition_req(self, req):
+    """
+    Parameters:
+     - req
+    """
+    pass
+
   def partition_name_has_valid_characters(self, part_vals, throw_exception):
     """
     Parameters:
@@ -4996,6 +5003,41 @@ class Client(fb303.FacebookService.Client, Iface):
       raise result.o2
     return
 
+  def rename_partition_req(self, req):
+    """
+    Parameters:
+     - req
+    """
+    self.send_rename_partition_req(req)
+    return self.recv_rename_partition_req()
+
+  def send_rename_partition_req(self, req):
+    self._oprot.writeMessageBegin('rename_partition_req', TMessageType.CALL, self._seqid)
+    args = rename_partition_req_args()
+    args.req = req
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_rename_partition_req(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = rename_partition_req_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "rename_partition_req failed: unknown result")
+
   def partition_name_has_valid_characters(self, part_vals, throw_exception):
     """
     Parameters:
@@ -9348,6 +9390,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     self._processMap["alter_partitions_req"] = Processor.process_alter_partitions_req
     self._processMap["alter_partition_with_environment_context"] = Processor.process_alter_partition_with_environment_context
     self._processMap["rename_partition"] = Processor.process_rename_partition
+    self._processMap["rename_partition_req"] = Processor.process_rename_partition_req
     self._processMap["partition_name_has_valid_characters"] = Processor.process_partition_name_has_valid_characters
     self._processMap["get_config_value"] = Processor.process_get_config_value
     self._processMap["partition_name_to_vals"] = Processor.process_partition_name_to_vals
@@ -11767,6 +11810,31 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
+  def process_rename_partition_req(self, seqid, iprot, oprot):
+    args = rename_partition_req_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = rename_partition_req_result()
+    try:
+      result.success = self._handler.rename_partition_req(args.req)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except InvalidOperationException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except MetaException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("rename_partition_req", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
   def process_partition_name_has_valid_characters(self, seqid, iprot, oprot):
     args = partition_name_has_valid_characters_args()
     args.read(iprot)
@@ -16400,10 +16468,10 @@ class get_databases_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype847, _size844) = iprot.readListBegin()
-          for _i848 in xrange(_size844):
-            _elem849 = iprot.readString()
-            self.success.append(_elem849)
+          (_etype854, _size851) = iprot.readListBegin()
+          for _i855 in xrange(_size851):
+            _elem856 = iprot.readString()
+            self.success.append(_elem856)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16426,8 +16494,8 @@ class get_databases_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter850 in self.success:
-        oprot.writeString(iter850)
+      for iter857 in self.success:
+        oprot.writeString(iter857)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -16532,10 +16600,10 @@ class get_all_databases_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype854, _size851) = iprot.readListBegin()
-          for _i855 in xrange(_size851):
-            _elem856 = iprot.readString()
-            self.success.append(_elem856)
+          (_etype861, _size858) = iprot.readListBegin()
+          for _i862 in xrange(_size858):
+            _elem863 = iprot.readString()
+            self.success.append(_elem863)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16558,8 +16626,8 @@ class get_all_databases_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter857 in self.success:
-        oprot.writeString(iter857)
+      for iter864 in self.success:
+        oprot.writeString(iter864)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17329,12 +17397,12 @@ class get_type_all_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype859, _vtype860, _size858 ) = iprot.readMapBegin()
-          for _i862 in xrange(_size858):
-            _key863 = iprot.readString()
-            _val864 = Type()
-            _val864.read(iprot)
-            self.success[_key863] = _val864
+          (_ktype866, _vtype867, _size865 ) = iprot.readMapBegin()
+          for _i869 in xrange(_size865):
+            _key870 = iprot.readString()
+            _val871 = Type()
+            _val871.read(iprot)
+            self.success[_key870] = _val871
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -17357,9 +17425,9 @@ class get_type_all_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
-      for kiter865,viter866 in self.success.items():
-        oprot.writeString(kiter865)
-        viter866.write(oprot)
+      for kiter872,viter873 in self.success.items():
+        oprot.writeString(kiter872)
+        viter873.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -17502,11 +17570,11 @@ class get_fields_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype870, _size867) = iprot.readListBegin()
-          for _i871 in xrange(_size867):
-            _elem872 = FieldSchema()
-            _elem872.read(iprot)
-            self.success.append(_elem872)
+          (_etype877, _size874) = iprot.readListBegin()
+          for _i878 in xrange(_size874):
+            _elem879 = FieldSchema()
+            _elem879.read(iprot)
+            self.success.append(_elem879)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17541,8 +17609,8 @@ class get_fields_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter873 in self.success:
-        iter873.write(oprot)
+      for iter880 in self.success:
+        iter880.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17709,11 +17777,11 @@ class get_fields_with_environment_context_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype877, _size874) = iprot.readListBegin()
-          for _i878 in xrange(_size874):
-            _elem879 = FieldSchema()
-            _elem879.read(iprot)
-            self.success.append(_elem879)
+          (_etype884, _size881) = iprot.readListBegin()
+          for _i885 in xrange(_size881):
+            _elem886 = FieldSchema()
+            _elem886.read(iprot)
+            self.success.append(_elem886)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17748,8 +17816,8 @@ class get_fields_with_environment_context_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter880 in self.success:
-        iter880.write(oprot)
+      for iter887 in self.success:
+        iter887.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17902,11 +17970,11 @@ class get_schema_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype884, _size881) = iprot.readListBegin()
-          for _i885 in xrange(_size881):
-            _elem886 = FieldSchema()
-            _elem886.read(iprot)
-            self.success.append(_elem886)
+          (_etype891, _size888) = iprot.readListBegin()
+          for _i892 in xrange(_size888):
+            _elem893 = FieldSchema()
+            _elem893.read(iprot)
+            self.success.append(_elem893)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17941,8 +18009,8 @@ class get_schema_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter887 in self.success:
-        iter887.write(oprot)
+      for iter894 in self.success:
+        iter894.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18109,11 +18177,11 @@ class get_schema_with_environment_context_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype891, _size888) = iprot.readListBegin()
-          for _i892 in xrange(_size888):
-            _elem893 = FieldSchema()
-            _elem893.read(iprot)
-            self.success.append(_elem893)
+          (_etype898, _size895) = iprot.readListBegin()
+          for _i899 in xrange(_size895):
+            _elem900 = FieldSchema()
+            _elem900.read(iprot)
+            self.success.append(_elem900)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18148,8 +18216,8 @@ class get_schema_with_environment_context_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter894 in self.success:
-        iter894.write(oprot)
+      for iter901 in self.success:
+        iter901.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18602,66 +18670,66 @@ class create_table_with_constraints_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.primaryKeys = []
-          (_etype898, _size895) = iprot.readListBegin()
-          for _i899 in xrange(_size895):
-            _elem900 = SQLPrimaryKey()
-            _elem900.read(iprot)
-            self.primaryKeys.append(_elem900)
+          (_etype905, _size902) = iprot.readListBegin()
+          for _i906 in xrange(_size902):
+            _elem907 = SQLPrimaryKey()
+            _elem907.read(iprot)
+            self.primaryKeys.append(_elem907)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.LIST:
           self.foreignKeys = []
-          (_etype904, _size901) = iprot.readListBegin()
-          for _i905 in xrange(_size901):
-            _elem906 = SQLForeignKey()
-            _elem906.read(iprot)
-            self.foreignKeys.append(_elem906)
+          (_etype911, _size908) = iprot.readListBegin()
+          for _i912 in xrange(_size908):
+            _elem913 = SQLForeignKey()
+            _elem913.read(iprot)
+            self.foreignKeys.append(_elem913)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.LIST:
           self.uniqueConstraints = []
-          (_etype910, _size907) = iprot.readListBegin()
-          for _i911 in xrange(_size907):
-            _elem912 = SQLUniqueConstraint()
-            _elem912.read(iprot)
-            self.uniqueConstraints.append(_elem912)
+          (_etype917, _size914) = iprot.readListBegin()
+          for _i918 in xrange(_size914):
+            _elem919 = SQLUniqueConstraint()
+            _elem919.read(iprot)
+            self.uniqueConstraints.append(_elem919)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 5:
         if ftype == TType.LIST:
           self.notNullConstraints = []
-          (_etype916, _size913) = iprot.readListBegin()
-          for _i917 in xrange(_size913):
-            _elem918 = SQLNotNullConstraint()
-            _elem918.read(iprot)
-            self.notNullConstraints.append(_elem918)
+          (_etype923, _size920) = iprot.readListBegin()
+          for _i924 in xrange(_size920):
+            _elem925 = SQLNotNullConstraint()
+            _elem925.read(iprot)
+            self.notNullConstraints.append(_elem925)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 6:
         if ftype == TType.LIST:
           self.defaultConstraints = []
-          (_etype922, _size919) = iprot.readListBegin()
-          for _i923 in xrange(_size919):
-            _elem924 = SQLDefaultConstraint()
-            _elem924.read(iprot)
-            self.defaultConstraints.append(_elem924)
+          (_etype929, _size926) = iprot.readListBegin()
+          for _i930 in xrange(_size926):
+            _elem931 = SQLDefaultConstraint()
+            _elem931.read(iprot)
+            self.defaultConstraints.append(_elem931)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 7:
         if ftype == TType.LIST:
           self.checkConstraints = []
-          (_etype928, _size925) = iprot.readListBegin()
-          for _i929 in xrange(_size925):
-            _elem930 = SQLCheckConstraint()
-            _elem930.read(iprot)
-            self.checkConstraints.append(_elem930)
+          (_etype935, _size932) = iprot.readListBegin()
+          for _i936 in xrange(_size932):
+            _elem937 = SQLCheckConstraint()
+            _elem937.read(iprot)
+            self.checkConstraints.append(_elem937)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18682,43 +18750,43 @@ class create_table_with_constraints_args:
     if self.primaryKeys is not None:
       oprot.writeFieldBegin('primaryKeys', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys))
-      for iter931 in self.primaryKeys:
-        iter931.write(oprot)
+      for iter938 in self.primaryKeys:
+        iter938.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.foreignKeys is not None:
       oprot.writeFieldBegin('foreignKeys', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys))
-      for iter932 in self.foreignKeys:
-        iter932.write(oprot)
+      for iter939 in self.foreignKeys:
+        iter939.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.uniqueConstraints is not None:
       oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4)
       oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints))
-      for iter933 in self.uniqueConstraints:
-        iter933.write(oprot)
+      for iter940 in self.uniqueConstraints:
+        iter940.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.notNullConstraints is not None:
       oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5)
       oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints))
-      for iter934 in self.notNullConstraints:
-        iter934.write(oprot)
+      for iter941 in self.notNullConstraints:
+        iter941.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.defaultConstraints is not None:
       oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6)
       oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints))
-      for iter935 in self.defaultConstraints:
-        iter935.write(oprot)
+      for iter942 in self.defaultConstraints:
+        iter942.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.checkConstraints is not None:
       oprot.writeFieldBegin('checkConstraints', TType.LIST, 7)
       oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints))
-      for iter936 in self.checkConstraints:
-        iter936.write(oprot)
+      for iter943 in self.checkConstraints:
+        iter943.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20278,10 +20346,10 @@ class truncate_table_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.partNames = []
-          (_etype940, _size937) = iprot.readListBegin()
-          for _i941 in xrange(_size937):
-            _elem942 = iprot.readString()
-            self.partNames.append(_elem942)
+          (_etype947, _size944) = iprot.readListBegin()
+          for _i948 in xrange(_size944):
+            _elem949 = iprot.readString()
+            self.partNames.append(_elem949)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20306,8 +20374,8 @@ class truncate_table_args:
     if self.partNames is not None:
       oprot.writeFieldBegin('partNames', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.partNames))
-      for iter943 in self.partNames:
-        oprot.writeString(iter943)
+      for iter950 in self.partNames:
+        oprot.writeString(iter950)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20652,10 +20720,10 @@ class get_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype947, _size944) = iprot.readListBegin()
-          for _i948 in xrange(_size944):
-            _elem949 = iprot.readString()
-            self.success.append(_elem949)
+          (_etype954, _size951) = iprot.readListBegin()
+          for _i955 in xrange(_size951):
+            _elem956 = iprot.readString()
+            self.success.append(_elem956)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20678,8 +20746,8 @@ class get_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter950 in self.success:
-        oprot.writeString(iter950)
+      for iter957 in self.success:
+        oprot.writeString(iter957)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -20829,10 +20897,10 @@ class get_tables_by_type_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype954, _size951) = iprot.readListBegin()
-          for _i955 in xrange(_size951):
-            _elem956 = iprot.readString()
-            self.success.append(_elem956)
+          (_etype961, _size958) = iprot.readListBegin()
+          for _i962 in xrange(_size958):
+            _elem963 = iprot.readString()
+            self.success.append(_elem963)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20855,8 +20923,8 @@ class get_tables_by_type_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter957 in self.success:
-        oprot.writeString(iter957)
+      for iter964 in self.success:
+        oprot.writeString(iter964)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -20980,10 +21048,10 @@ class get_materialized_views_for_rewriting_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype961, _size958) = iprot.readListBegin()
-          for _i962 in xrange(_size958):
-            _elem963 = iprot.readString()
-            self.success.append(_elem963)
+          (_etype968, _size965) = iprot.readListBegin()
+          for _i969 in xrange(_size965):
+            _elem970 = iprot.readString()
+            self.success.append(_elem970)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21006,8 +21074,8 @@ class get_materialized_views_for_rewriting_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter964 in self.success:
-        oprot.writeString(iter964)
+      for iter971 in self.success:
+        oprot.writeString(iter971)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21080,10 +21148,10 @@ class get_table_meta_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.tbl_types = []
-          (_etype968, _size965) = iprot.readListBegin()
-          for _i969 in xrange(_size965):
-            _elem970 = iprot.readString()
-            self.tbl_types.append(_elem970)
+          (_etype975, _size972) = iprot.readListBegin()
+          for _i976 in xrange(_size972):
+            _elem977 = iprot.readString()
+            self.tbl_types.append(_elem977)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21108,8 +21176,8 @@ class get_table_meta_args:
     if self.tbl_types is not None:
       oprot.writeFieldBegin('tbl_types', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.tbl_types))
-      for iter971 in self.tbl_types:
-        oprot.writeString(iter971)
+      for iter978 in self.tbl_types:
+        oprot.writeString(iter978)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -21165,11 +21233,11 @@ class get_table_meta_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype975, _size972) = iprot.readListBegin()
-          for _i976 in xrange(_size972):
-            _elem977 = TableMeta()
-            _elem977.read(iprot)
-            self.success.append(_elem977)
+          (_etype982, _size979) = iprot.readListBegin()
+          for _i983 in xrange(_size979):
+            _elem984 = TableMeta()
+            _elem984.read(iprot)
+            self.success.append(_elem984)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21192,8 +21260,8 @@ class get_table_meta_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter978 in self.success:
-        iter978.write(oprot)
+      for iter985 in self.success:
+        iter985.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21317,10 +21385,10 @@ class get_all_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype982, _size979) = iprot.readListBegin()
-          for _i983 in xrange(_size979):
-            _elem984 = iprot.readString()
-            self.success.append(_elem984)
+          (_etype989, _size986) = iprot.readListBegin()
+          for _i990 in xrange(_size986):
+            _elem991 = iprot.readString()
+            self.success.append(_elem991)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21343,8 +21411,8 @@ class get_all_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter985 in self.success:
-        oprot.writeString(iter985)
+      for iter992 in self.success:
+        oprot.writeString(iter992)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21580,10 +21648,10 @@ class get_table_objects_by_name_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.tbl_names = []
-          (_etype989, _size986) = iprot.readListBegin()
-          for _i990 in xrange(_size986):
-            _elem991 = iprot.readString()
-            self.tbl_names.append(_elem991)
+          (_etype996, _size993) = iprot.readListBegin()
+          for _i997 in xrange(_size993):
+            _elem998 = iprot.readString()
+            self.tbl_names.append(_elem998)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21604,8 +21672,8 @@ class get_table_objects_by_name_args:
     if self.tbl_names is not None:
       oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.tbl_names))
-      for iter992 in self.tbl_names:
-        oprot.writeString(iter992)
+      for iter999 in self.tbl_names:
+        oprot.writeString(iter999)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -21657,11 +21725,11 @@ class get_table_objects_by_name_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype996, _size993) = iprot.readListBegin()
-          for _i997 in xrange(_size993):
-            _elem998 = Table()
-            _elem998.read(iprot)
-            self.success.append(_elem998)
+          (_etype1003, _size1000) = iprot.readListBegin()
+          for _i1004 in xrange(_size1000):
+            _elem1005 = Table()
+            _elem1005.read(iprot)
+            self.success.append(_elem1005)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21678,8 +21746,8 @@ class get_table_objects_by_name_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter999 in self.success:
-        iter999.write(oprot)
+      for iter1006 in self.success:
+        iter1006.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -22547,10 +22615,10 @@ class get_table_names_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1003, _size1000) = iprot.readListBegin()
-          for _i1004 in xrange(_size1000):
-            _elem1005 = iprot.readString()
-            self.success.append(_elem1005)
+          (_etype1010, _size1007) = iprot.readListBegin()
+          for _i1011 in xrange(_size1007):
+            _elem1012 = iprot.readString()
+            self.success.append(_elem1012)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22585,8 +22653,8 @@ class get_table_names_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1006 in self.success:
-        oprot.writeString(iter1006)
+      for iter1013 in self.success:
+        oprot.writeString(iter1013)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23715,11 +23783,11 @@ class add_partitions_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype1010, _size1007) = iprot.readListBegin()
-          for _i1011 in xrange(_size1007):
-            _elem1012 = Partition()
-            _elem1012.read(iprot)
-            self.new_parts.append(_elem1012)
+          (_etype1017, _size1014) = iprot.readListBegin()
+          for _i1018 in xrange(_size1014):
+            _elem1019 = Partition()
+            _elem1019.read(iprot)
+            self.new_parts.append(_elem1019)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23736,8 +23804,8 @@ class add_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter1013 in self.new_parts:
-        iter1013.write(oprot)
+      for iter1020 in self.new_parts:
+        iter1020.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23895,11 +23963,11 @@ class add_partitions_pspec_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype1017, _size1014) = iprot.readListBegin()
-          for _i1018 in xrange(_size1014):
-            _elem1019 = PartitionSpec()
-            _elem1019.read(iprot)
-            self.new_parts.append(_elem1019)
+          (_etype1024, _size1021) = iprot.readListBegin()
+          for _i1025 in xrange(_size1021):
+            _elem1026 = PartitionSpec()
+            _elem1026.read(iprot)
+            self.new_parts.append(_elem1026)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23916,8 +23984,8 @@ class add_partitions_pspec_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter1020 in self.new_parts:
-        iter1020.write(oprot)
+      for iter1027 in self.new_parts:
+        iter1027.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -24091,10 +24159,10 @@ class append_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1024, _size1021) = iprot.readListBegin()
-          for _i1025 in xrange(_size1021):
-            _elem1026 = iprot.readString()
-            self.part_vals.append(_elem1026)
+          (_etype1031, _size1028) = iprot.readListBegin()
+          for _i1032 in xrange(_size1028):
+            _elem1033 = iprot.readString()
+            self.part_vals.append(_elem1033)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24119,8 +24187,8 @@ class append_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1027 in self.part_vals:
-        oprot.writeString(iter1027)
+      for iter1034 in self.part_vals:
+        oprot.writeString(iter1034)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -24473,10 +24541,10 @@ class append_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1031, _size1028) = iprot.readListBegin()
-          for _i1032 in xrange(_size1028):
-            _elem1033 = iprot.readString()
-            self.part_vals.append(_elem1033)
+          (_etype1038, _size1035) = iprot.readListBegin()
+          for _i1039 in xrange(_size1035):
+            _elem1040 = iprot.readString()
+            self.part_vals.append(_elem1040)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24507,8 +24575,8 @@ class append_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1034 in self.part_vals:
-        oprot.writeString(iter1034)
+      for iter1041 in self.part_vals:
+        oprot.writeString(iter1041)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -25103,10 +25171,10 @@ class drop_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1038, _size1035) = iprot.readListBegin()
-          for _i1039 in xrange(_size1035):
-            _elem1040 = iprot.readString()
-            self.part_vals.append(_elem1040)
+          (_etype1045, _size1042) = iprot.readListBegin()
+          for _i1046 in xrange(_size1042):
+            _elem1047 = iprot.readString()
+            self.part_vals.append(_elem1047)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -25136,8 +25204,8 @@ class drop_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1041 in self.part_vals:
-        oprot.writeString(iter1041)
+      for iter1048 in self.part_vals:
+        oprot.writeString(iter1048)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -25310,10 +25378,10 @@ class drop_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1045, _size1042) = iprot.readListBegin()
-          for _i1046 in xrange(_size1042):
-            _elem1047 = iprot.readString()
-            self.part_vals.append(_elem1047)
+          (_etype1052, _size1049) = iprot.readListBegin()
+          for _i1053 in xrange(_size1049):
+            _elem1054 = iprot.readString()
+            self.part_vals.append(_elem1054)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -25349,8 +25417,8 @@ class drop_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1048 in self.part_vals:
-        oprot.writeString(iter1048)
+      for iter1055 in self.part_vals:
+        oprot.writeString(iter1055)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -26087,10 +26155,10 @@ class get_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1052, _size1049) = iprot.readListBegin()
-          for _i1053 in xrange(_size1049):
-            _elem1054 = iprot.readString()
-            self.part_vals.append(_elem1054)
+          (_etype1059, _size1056) = iprot.readListBegin()
+          for _i1060 in xrange(_size1056):
+            _elem1061 = iprot.readString()
+            self.part_vals.append(_elem1061)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26115,8 +26183,8 @@ class get_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1055 in self.part_vals:
-        oprot.writeString(iter1055)
+      for iter1062 in self.part_vals:
+        oprot.writeString(iter1062)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -26275,11 +26343,11 @@ class exchange_partition_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype1057, _vtype1058, _size1056 ) = iprot.readMapBegin()
-          for _i1060 in xrange(_size1056):
-            _key1061 = iprot.readString()
-            _val1062 = iprot.readString()
-            self.partitionSpecs[_key1061] = _val1062
+          (_ktype1064, _vtype1065, _size1063 ) = iprot.readMapBegin()
+          for _i1067 in xrange(_size1063):
+            _key1068 = iprot.readString()
+            _val1069 = iprot.readString()
+            self.partitionSpecs[_key1068] = _val1069
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -26316,9 +26384,9 @@ class exchange_partition_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter1063,viter1064 in self.partitionSpecs.items():
-        oprot.writeString(kiter1063)
-        oprot.writeString(viter1064)
+      for kiter1070,viter1071 in self.partitionSpecs.items():
+        oprot.writeString(kiter1070)
+        oprot.writeString(viter1071)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -26523,11 +26591,11 @@ class exchange_partitions_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype1066, _vtype1067, _size1065 ) = iprot.readMapBegin()
-          for _i1069 in xrange(_size1065):
-            _key1070 = iprot.readString()
-            _val1071 = iprot.readString()
-            self.partitionSpecs[_key1070] = _val1071
+          (_ktype1073, _vtype1074, _size1072 ) = iprot.readMapBegin()
+          for _i1076 in xrange(_size1072):
+            _key1077 = iprot.readString()
+            _val1078 = iprot.readString()
+            self.partitionSpecs[_key1077] = _val1078
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -26564,9 +26632,9 @@ class exchange_partitions_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter1072,viter1073 in self.partitionSpecs.items():
-        oprot.writeString(kiter1072)
-        oprot.writeString(viter1073)
+      for kiter1079,viter1080 in self.partitionSpecs.items():
+        oprot.writeString(kiter1079)
+        oprot.writeString(viter1080)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -26649,11 +26717,11 @@ class exchange_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1077, _size1074) = iprot.readListBegin()
-          for _i1078 in xrange(_size1074):
-            _elem1079 = Partition()
-            _elem1079.read(iprot)
-            self.success.append(_elem1079)
+          (_etype1084, _size1081) = iprot.readListBegin()
+          for _i1085 in xrange(_size1081):
+            _elem1086 = Partition()
+            _elem1086.read(iprot)
+            self.success.append(_elem1086)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26694,8 +26762,8 @@ class exchange_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1080 in self.success:
-        iter1080.write(oprot)
+      for iter1087 in self.success:
+        iter1087.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -26789,10 +26857,10 @@ class get_partition_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1084, _size1081) = iprot.readListBegin()
-          for _i1085 in xrange(_size1081):
-            _elem1086 = iprot.readString()
-            self.part_vals.append(_elem1086)
+          (_etype1091, _size1088) = iprot.readListBegin()
+          for _i1092 in xrange(_size1088):
+            _elem1093 = iprot.readString()
+            self.part_vals.append(_elem1093)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26804,10 +26872,10 @@ class get_partition_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1090, _size1087) = iprot.readListBegin()
-          for _i1091 in xrange(_size1087):
-            _elem1092 = iprot.readString()
-            self.group_names.append(_elem1092)
+          (_etype1097, _size1094) = iprot.readListBegin()
+          for _i1098 in xrange(_size1094):
+            _elem1099 = iprot.readString()
+            self.group_names.append(_elem1099)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26832,8 +26900,8 @@ class get_partition_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1093 in self.part_vals:
-        oprot.writeString(iter1093)
+      for iter1100 in self.part_vals:
+        oprot.writeString(iter1100)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.user_name is not None:
@@ -26843,8 +26911,8 @@ class get_partition_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1094 in self.group_names:
-        oprot.writeString(iter1094)
+      for iter1101 in self.group_names:
+        oprot.writeString(iter1101)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -27273,11 +27341,11 @@ class get_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1098, _size1095) = iprot.readListBegin()
-          for _i1099 in xrange(_size1095):
-            _elem1100 = Partition()
-            _elem1100.read(iprot)
-            self.success.append(_elem1100)
+          (_etype1105, _size1102) = iprot.readListBegin()
+          for _i1106 in xrange(_size1102):
+            _elem1107 = Partition()
+            _elem1107.read(iprot)
+            self.success.append(_elem1107)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27306,8 +27374,8 @@ class get_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1101 in self.success:
-        iter1101.write(oprot)
+      for iter1108 in self.success:
+        iter1108.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27401,10 +27469,10 @@ class get_partitions_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1105, _size1102) = iprot.readListBegin()
-          for _i1106 in xrange(_size1102):
-            _elem1107 = iprot.readString()
-            self.group_names.append(_elem1107)
+          (_etype1112, _size1109) = iprot.readListBegin()
+          for _i1113 in xrange(_size1109):
+            _elem1114 = iprot.readString()
+            self.group_names.append(_elem1114)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27437,8 +27505,8 @@ class get_partitions_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1108 in self.group_names:
-        oprot.writeString(iter1108)
+      for iter1115 in self.group_names:
+        oprot.writeString(iter1115)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -27499,11 +27567,11 @@ class get_partitions_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1112, _size1109) = iprot.readListBegin()
-          for _i1113 in xrange(_size1109):
-            _elem1114 = Partition()
-            _elem1114.read(iprot)
-            self.success.append(_elem1114)
+          (_etype1119, _size1116) = iprot.readListBegin()
+          for _i1120 in xrange(_size1116):
+            _elem1121 = Partition()
+            _elem1121.read(iprot)
+            self.success.append(_elem1121)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27532,8 +27600,8 @@ class get_partitions_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1115 in self.success:
-        iter1115.write(oprot)
+      for iter1122 in self.success:
+        iter1122.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27691,11 +27759,11 @@ class get_partitions_pspec_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1119, _size1116) = iprot.readListBegin()
-          for _i1120 in xrange(_size1116):
-            _elem1121 = PartitionSpec()
-            _elem1121.read(iprot)
-            self.success.append(_elem1121)
+          (_etype1126, _size1123) = iprot.readListBegin()
+          for _i1127 in xrange(_size1123):
+            _elem1128 = PartitionSpec()
+            _elem1128.read(iprot)
+            self.success.append(_elem1128)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27724,8 +27792,8 @@ class get_partitions_pspec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1122 in self.success:
-        iter1122.write(oprot)
+      for iter1129 in self.success:
+        iter1129.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27883,10 +27951,10 @@ class get_partition_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1126, _size1123) = iprot.readListBegin()
-          for _i1127 in xrange(_size1123):
-            _elem1128 = iprot.readString()
-            self.success.append(_elem1128)
+          (_etype1133, _size1130) = iprot.readListBegin()
+          for _i1134 in xrange(_size1130):
+            _elem1135 = iprot.readString()
+            self.success.append(_elem1135)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27915,8 +27983,8 @@ class get_partition_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1129 in self.success:
-        oprot.writeString(iter1129)
+      for iter1136 in self.success:
+        oprot.writeString(iter1136)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -28156,10 +28224,10 @@ class get_partitions_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1133, _size1130) = iprot.readListBegin()
-          for _i1134 in xrange(_size1130):
-            _elem1135 = iprot.readString()
-            self.part_vals.append(_elem1135)
+          (_etype1140, _size1137) = iprot.readListBegin()
+          for _i1141 in xrange(_size1137):
+            _elem1142 = iprot.readString()
+            self.part_vals.append(_elem1142)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28189,8 +28257,8 @@ class get_partitions_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1136 in self.part_vals:
-        oprot.writeString(iter1136)
+      for iter1143 in self.part_vals:
+        oprot.writeString(iter1143)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -28254,11 +28322,11 @@ class get_partitions_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1140, _size1137) = iprot.readListBegin()
-          for _i1141 in xrange(_size1137):
-            _elem1142 = Partition()
-            _elem1142.read(iprot)
-            self.success.append(_elem1142)
+          (_etype1147, _size1144) = iprot.readListBegin()
+          for _i1148 in xrange(_size1144):
+            _elem1149 = Partition()
+            _elem1149.read(iprot)
+            self.success.append(_elem1149)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28287,8 +28355,8 @@ class get_partitions_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1143 in self.success:
-        iter1143.write(oprot)
+      for iter1150 in self.success:
+        iter1150.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -28375,10 +28443,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1147, _size1144) = iprot.readListBegin()
-          for _i1148 in xrange(_size1144):
-            _elem1149 = iprot.readString()
-            self.part_vals.append(_elem1149)
+          (_etype1154, _size1151) = iprot.readListBegin()
+          for _i1155 in xrange(_size1151):
+            _elem1156 = iprot.readString()
+            self.part_vals.append(_elem1156)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28395,10 +28463,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 6:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1153, _size1150) = iprot.readListBegin()
-          for _i1154 in xrange(_size1150):
-            _elem1155 = iprot.readString()
-            self.group_names.append(_elem1155)
+          (_etype1160, _size1157) = iprot.readListBegin()
+          for _i1161 in xrange(_size1157):
+            _elem1162 = iprot.readString()
+            self.group_names.append(_elem1162)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28423,8 +28491,8 @@ class get_partitions_ps_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1156 in self.part_vals:
-        oprot.writeString(iter1156)
+      for iter1163 in self.part_vals:
+        oprot.writeString(iter1163)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -28438,8 +28506,8 @@ class get_partitions_ps_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 6)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1157 in self.group_names:
-        oprot.writeString(iter1157)
+      for iter1164 in self.group_names:
+        oprot.writeString(iter1164)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -28501,11 +28569,11 @@ class get_partitions_ps_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1161, _size1158) = iprot.readListBegin()
-          for _i1162 in xrange(_size1158):
-            _elem1163 = Partition()
-            _elem1163.read(iprot)
-            self.success.append(_elem1163)
+          (_etype1168, _size1165) = iprot.readListBegin()
+          for _i1169 in xrange(_size1165):
+            _elem1170 = Partition()
+            _elem1170.read(iprot)
+            self.success.append(_elem1170)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28534,8 +28602,8 @@ class get_partitions_ps_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1164 in self.success:
-        iter1164.write(oprot)
+      for iter1171 in self.success:
+        iter1171.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -28616,10 +28684,10 @@ class get_partition_names_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1168, _size1165) = iprot.readListBegin()
-          for _i1169 in xrange(_size1165):
-            _elem1170 = iprot.readString()
-            self.part_vals.append(_elem1170)
+          (_etype1175, _size1172) = iprot.readListBegin()
+          for _i1176 in xrange(_size1172):
+            _elem1177 = iprot.readString()
+            self.part_vals.append(_elem1177)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28649,8 +28717,8 @@ class get_partition_names_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1171 in self.part_vals:
-        oprot.writeString(iter1171)
+      for iter1178 in self.part_vals:
+        oprot.writeString(iter1178)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -28714,10 +28782,10 @@ class get_partition_names_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1175, _size1172) = iprot.readListBegin()
-          for _i1176 in xrange(_size1172):
-            _elem1177 = iprot.readString()
-            self.success.append(_elem1177)
+          (_etype1182, _size1179) = iprot.readListBegin()
+          for _i1183 in xrange(_size1179):
+            _elem1184 = iprot.readString()
+            self.success.append(_elem1184)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28746,8 +28814,8 @@ class get_partition_names_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1178 in self.success:
-        oprot.writeString(iter1178)
+      for iter1185 in self.success:
+        oprot.writeString(iter1185)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -28918,11 +28986,11 @@ class get_partitions_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1182, _size1179) = iprot.readListBegin()
-          for _i1183 in xrange(_size1179):
-            _elem1184 = Partition()
-            _elem1184.read(iprot)
-            self.success.append(_elem1184)
+          (_etype1189, _size1186) = iprot.readListBegin()
+          for _i1190 in xrange(_size1186):
+            _elem1191 = Partition()
+            _elem1191.read(iprot)
+            self.success.append(_elem1191)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28951,8 +29019,8 @@ class get_partitions_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1185 in self.success:
-        iter1185.write(oprot)
+      for iter1192 in self.success:
+        iter1192.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -29123,11 +29191,11 @@ class get_part_specs_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1189, _size1186) = iprot.readListBegin()
-          for _i1190 in xrange(_size1186):
-            _elem1191 = PartitionSpec()
-            _elem1191.read(iprot)
-            self.success.append(_elem1191)
+          (_etype1196, _size1193) = iprot.readListBegin()
+          for _i1197 in xrange(_size1193):
+            _elem1198 = PartitionSpec()
+            _elem1198.read(iprot)
+            self.success.append(_elem1198)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -29156,8 +29224,8 @@ class get_part_specs_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1192 in self.success:
-        iter1192.write(oprot)
+      for iter1199 in self.success:
+        iter1199.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -29577,10 +29645,10 @@ class get_partitions_by_names_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.names = []
-          (_etype1196, _size1193) = iprot.readListBegin()
-          for _i1197 in xrange(_size1193):
-            _elem1198 = iprot.readString()
-            self.names.append(_elem1198)
+          (_etype1203, _size1200) = iprot.readListBegin()
+          for _i1204 in xrange(_size1200):
+            _elem1205 = iprot.readString()
+            self.names.append(_elem1205)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -29605,8 +29673,8 @@ class get_partitions_by_names_args:
     if self.names is not None:
       oprot.writeFieldBegin('names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.names))
-      for iter1199 in self.names:
-        oprot.writeString(iter1199)
+      for iter1206 in self.names:
+        oprot.writeString(iter1206)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -29665,11 +29733,11 @@ class get_partitions_by_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1203, _size1200) = iprot.readListBegin()
-          for _i1204 in xrange(_size1200):
-            _elem1205 = Partition()
-            _elem1205.read(iprot)
-            self.success.append(_elem1205)
+          (_etype1210, _size1207) = iprot.readListBegin()
+          for _i1211 in xrange(_size1207):
+            _elem1212 = Partition()
+            _elem1212.read(iprot)
+            self.success.append(_elem1212)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -29698,8 +29766,8 @@ class get_partitions_by_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1206 in self.success:
-        iter1206.write(oprot)
+      for iter1213 in self.success:
+        iter1213.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -29949,11 +30017,11 @@ class alter_partitions_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype1210, _size1207) = iprot.readListBegin()
-          for _i1211 in xrange(_size1207):
-            _elem1212 = Partition()
-            _elem1212.read(iprot)
-            self.new_parts.append(_elem1212)
+          (_etype1217, _size1214) = iprot.readListBegin()
+          for _i1218 in xrange(_size1214):
+            _elem1219 = Partition()
+            _elem1219.read(iprot)
+            self.new_parts.append(_elem1219)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -29978,8 +30046,8 @@ class alter_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter1213 in self.new_parts:
-        iter1213.write(oprot)
+      for iter1220 in self.new_parts:
+        iter1220.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -30132,11 +30200,11 @@ class alter_partitions_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype1217, _size1214) = iprot.readListBegin()
-          for _i1218 in xrange(_size1214):
-            _elem1219 = Partition()
-            _elem1219.read(iprot)
-            self.new_parts.append(_elem1219)
+          (_etype1224, _size1221) = iprot.readListBegin()
+          for _i1225 in xrange(_size1221):
+            _elem1226 = Partition()
+            _elem1226.read(iprot)
+            self.new_parts.append(_elem1226)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -30167,8 +30235,8 @@ class alter_partitions_with_environment_context_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter1220 in self.new_parts:
-        iter1220.write(oprot)
+      for iter1227 in self.new_parts:
+        iter1227.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -30671,10 +30739,10 @@ class rename_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1224, _size1221) = iprot.readListBegin()
-          for _i1225 in xrange(_size1221):
-            _elem1226 = iprot.readString()
-            self.part_vals.append(_elem1226)
+          (_etype1231, _size1228) = iprot.readListBegin()
+          for _i1232 in xrange(_size1228):
+            _elem1233 = iprot.readString()
+            self.part_vals.append(_elem1233)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -30705,8 +30773,8 @@ class rename_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1227 in self.part_vals:
-        oprot.writeString(iter1227)
+      for iter1234 in self.part_vals:
+        oprot.writeString(iter1234)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.new_part is not None:
@@ -30819,6 +30887,165 @@ class rename_partition_result:
   def __ne__(self, other):
     return not (self == other)
 
+class rename_partition_req_args:
+  """
+  Attributes:
+   - req
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRUCT, 'req', (RenamePartitionRequest, RenamePartitionRequest.thrift_spec), None, ), # 1
+  )
+
+  def __init__(self, req=None,):
+    self.req = req
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRUCT:
+          self.req = RenamePartitionRequest()
+          self.req.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('rename_partition_req_args')
+    if self.req is not None:
+      oprot.writeFieldBegin('req', TType.STRUCT, 1)
+      self.req.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.req)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class rename_partition_req_result:
+  """
+  Attributes:
+   - success
+   - o1
+   - o2
+  """
+
+  thrift_spec = (
+    (0, TType.STRUCT, 'success', (RenamePartitionResponse, RenamePartitionResponse.thrift_spec), None, ), # 0
+    (1, TType.STRUCT, 'o1', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 1
+    (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2
+  )
+
+  def __init__(self, success=None, o1=None, o2=None,):
+    self.success = success
+    self.o1 = o1
+    self.o2 = o2
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.STRUCT:
+          self.success = RenamePartitionResponse()
+          self.success.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 1:
+        if ftype == TType.STRUCT:
+          self.o1 = InvalidOperationException()
+          self.o1.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRUCT:
+          self.o2 = MetaException()
+          self.o2.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('rename_partition_req_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.STRUCT, 0)
+      self.success.write(oprot)
+      oprot.writeFieldEnd()
+    if self.o1 is not None:
+      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+      self.o1.write(oprot)
+      oprot.writeFieldEnd()
+    if self.o2 is not None:
+      oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+      self.o2.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    value = (value * 31) ^ hash(self.o1)
+    value = (value * 31) ^ hash(self.o2)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class partition_name_has_valid_characters_args:
   """
   Attributes:
@@ -30848,10 +31075,10 @@ class partition_name_has_valid_characters_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1231, _size1228) = iprot.readListBegin()
-          for _i1232 in xrange(_size1228):
-            _elem1233 = iprot.readString()
-            self.part_vals.append(_elem1233)
+          (_etype1238, _size1235) = iprot.readListBegin()
+          for _i1239 in xrange(_size1235):
+            _elem1240 = iprot.readString()
+            self.part_vals.append(_elem1240)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -30873,8 +31100,8 @@ class partition_name_has_valid_characters_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1234 in self.part_vals:
-        oprot.writeString(iter1234)
+      for iter1241 in self.part_vals:
+        oprot.writeString(iter1241)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.throw_exception is not None:
@@ -31232,10 +31459,10 @@ class partition_name_to_vals_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1238, _size1235) = iprot.readListBegin()
-          for _i1239 in xrange(_size1235):
-            _elem1240 = iprot.readString()
-            self.success.append(_elem1240)
+          (_etype1245, _size1242) = iprot.readListBegin()
+          for _i1246 in xrange(_size1242):
+            _elem1247 = iprot.readString()
+            self.success.append(_elem1247)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -31258,8 +31485,8 @@ class partition_name_to_vals_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1241 in self.success:
-        oprot.writeString(iter1241)
+      for iter1248 in self.success:
+        oprot.writeString(iter1248)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -31383,11 +31610,11 @@ class partition_name_to_spec_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype1243, _vtype1244, _size1242 ) = iprot.readMapBegin()
-          for _i1246 in xrange(_size1242):
-            _key1247 = iprot.readString()
-            _val1248 = iprot.readString()
-            self.success[_key1247] = _val1248
+          (_ktype1250, _vtype1251, _size1249 ) = iprot.readMapBegin()
+          for _i1253 in xrange(_size1249):
+            _key1254 = iprot.readString()
+            _val1255 = iprot.readString()
+            self.success[_key1254] = _val1255
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -31410,9 +31637,9 @@ class partition_name_to_spec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
-      for kiter1249,viter1250 in self.success.items():
-        oprot.writeString(kiter1249)
-        oprot.writeString(viter1250)
+      for kiter1256,viter1257 in self.success.items():
+        oprot.writeString(kiter1256)
+        oprot.writeString(viter1257)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -31488,11 +31715,11 @@ class markPartitionForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype1252, _vtype1253, _size1251 ) = iprot.readMapBegin()
-          for _i1255 in xrange(_size1251):
-            _key1256 = iprot.readString()
-            _val1257 = iprot.readString()
-            self.part_vals[_key1256] = _val1257
+          (_ktype1259, _vtype1260, _size1258 ) = iprot.readMapBegin()
+          for _i1262 in xrange(_size1258):
+            _key1263 = iprot.readString()
+            _val1264 = iprot.readString()
+            self.part_vals[_key1263] = _val1264
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -31522,9 +31749,9 @@ class markPartitionForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter1258,viter1259 in self.part_vals.items():
-        oprot.writeString(kiter1258)
-        oprot.writeString(viter1259)
+      for kiter1265,viter1266 in self.part_vals.items():
+        oprot.writeString(kiter1265)
+        oprot.writeString(viter1266)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -31738,11 +31965,11 @@ class isPartitionMarkedForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype1261, _vtype1262, _size1260 ) = iprot.readMapBegin()
-          for _i1264 in xrange(_size1260):
-            _key1265 = iprot.readString()
-            _val1266 = iprot.readString()
-            self.part_vals[_key1265] = _val1266
+          (_ktype1268, _vtype1269, _size1267 ) = iprot.readMapBegin()
+          for _i1271 in xrange(_size1267):
+            _key1272 = iprot.readString()
+            _val1273 = iprot.readString()
+            self.part_vals[_key1272] = _val1273
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -31772,9 +31999,9 @@ class isPartitionMarkedForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter1267,viter1268 in self.part_vals.items():
-        oprot.writeString(kiter1267)
-        oprot.writeString(viter1268)
+      for kiter1274,viter1275 in self.part_vals.items():
+        oprot.writeString(kiter1274)
+        oprot.writeString(viter1275)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -35800,10 +36027,10 @@ class get_functions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1272, _size1269) = iprot.readListBegin()
-          for _i1273 in xrange(_size1269):
-            _elem1274 = iprot.readString()
-            self.success.append(_elem1274)
+          (_etype1279, _size1276) = iprot.readListBegin()
+          for _i1280 in xrange(_size1276):
+            _elem1281 = iprot.readString()
+            self.success.append(_elem1281)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -35826,8 +36053,8 @@ class get_functions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1275 in self.success:
-        oprot.writeString(iter1275)
+      for iter1282 in self.success:
+        oprot.writeString(iter1282)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -36515,10 +36742,10 @@ class get_role_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1279, _size1276) = iprot.readListBegin()
-          for _i1280 in xrange(_size1276):
-            _elem1281 = iprot.readString()
-            self.success.append(_elem1281)
+          (_etype1286, _size1283) = iprot.readListBegin()
+          for _i1287 in xrange(_size1283):
+            _elem1288 = iprot.readString()
+            self.success.append(_elem1288)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -36541,8 +36768,8 @@ class get_role_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1282 in self.success:
-        oprot.writeString(iter1282)
+      for iter1289 in self.success:
+        oprot.writeString(iter1289)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -37056,11 +37283,11 @@ class list_roles_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1286, _size1283) = iprot.readListBegin()
-          for _i1287 in xrange(_size1283):
-            _elem1288 = Role()
-            _elem1288.read(iprot)
-            self.success.append(_elem1288)
+          (_etype1293, _size1290) = iprot.readListBegin()
+          for _i1294 in xrange(_size1290):
+            _elem1295 = Role()
+            _elem1295.read(iprot)
+            self.success.append(_elem1295)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -37083,8 +37310,8 @@ class list_roles_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1289 in self.success:
-        iter1289.write(oprot)
+      for iter1296 in self.success:
+        iter1296.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -37593,10 +37820,10 @@ class get_privilege_set_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1293, _size1290) = iprot.readListBegin()
-          for _i1294 in xrange(_size1290):
-            _elem1295 = iprot.readString()
-            self.group_names.append(_elem1295)
+          (_etype1300, _size1297) = iprot.readListBegin()
+          for _i1301 in xrange(_size1297):
+            _elem1302 = iprot.readString()
+            self.group_names.append(_elem1302)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -37621,8 +37848,8 @@ class get_privilege_set_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1296 in self.group_names:
-        oprot.writeString(iter1296)
+      for iter1303 in self.group_names:
+        oprot.writeString(iter1303)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -37849,11 +38076,11 @@ class list_privileges_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1300, _size1297) = iprot.readListBegin()
-          for _i1301 in xrange(_size1297):
-            _elem1302 = HiveObjectPrivilege()
-            _elem1302.read(iprot)
-            self.success.append(_elem1302)
+          (_etype1307, _size1304) = iprot.readListBegin()
+          for _i1308 in xrange(_size1304):
+            _elem1309 = HiveObjectPrivilege()
+            _elem1309.read(iprot)
+            self.success.append(_elem1309)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -37876,8 +38103,8 @@ class list_privileges_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1303 in self.success:
-        iter1303.write(oprot)
+      for iter1310 in self.success:
+        iter1310.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -38547,10 +38774,10 @@ class set_ugi_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1307, _size1304) = iprot.readListBegin()
-          for _i1308 in xrange(_size1304):
-            _elem1309 = iprot.readString()
-            self.group_names.append(_elem1309)
+          (_etype1314, _size1311) = iprot.readListBegin()
+          for _i1315 in xrange(_size1311):
+            _elem1316 = iprot.readString()
+            self.group_names.append(_elem1316)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -38571,8 +38798,8 @@ class set_ugi_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1310 in self.group_names:
-        oprot.writeString(iter1310)
+      for iter1317 in self.group_names:
+        oprot.writeString(iter1317)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -38627,10 +38854,10 @@ class set_ugi_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1314, _size1311) = iprot.readListBegin()
-          for _i1315 in xrange(_size1311):
-            _elem1316 = iprot.readString()
-            self.success.append(_elem1316)
+          (_etype1321, _size1318) = iprot.readListBegin()
+          for _i1322 in xrange(_size1318):
+            _elem1323 = iprot.readString()
+            self.success.append(_elem1323)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -38653,8 +38880,8 @@ class set_ugi_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1317 in self.success:
-        oprot.writeString(iter1317)
+      for iter1324 in self.success:
+        oprot.writeString(iter1324)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -39586,10 +39813,10 @@ class get_all_token_identifiers_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1321, _size1318) = iprot.readListBegin()
-          for _i1322 in xrange(_size1318):
-            _elem1323 = iprot.readString()
-            self.success.append(_elem1323)
+          (_etype1328, _size1325) = iprot.readListBegin()
+          for _i1329 in xrange(_size1325):
+            _elem1330 = iprot.readString()
+            self.success.append(_elem1330)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -39606,8 +39833,8 @@ class get_all_token_identifiers_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1324 in self.success:
-        oprot.writeString(iter1324)
+      for iter1331 in self.success:
+        oprot.writeString(iter1331)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -40134,10 +40361,10 @@ class get_master_keys_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1328, _size1325) = iprot.readListBegin()
-          for _i1329 in xrange(_size1325):
-            _elem1330 = iprot.readString()
-            self.success.append(_elem1330)
+          (_etype1335, _size1332) = iprot.readListBegin()
+          for _i1336 in xrange(_size1332):
+            _elem1337 = iprot.readString()
+            self.success.append(_elem1337)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -40154,8 +40381,8 @@ class get_master_keys_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1331 in self.success:
-        oprot.writeString(iter1331)
+      for iter1338 in self.success:
+        oprot.writeString(iter1338)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -48562,11 +48789,11 @@ class get_schema_all_versions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1335, _size1332) = iprot.readListBegin()
-          for _i1336 in xrange(_size1332):
-            _elem1337 = SchemaVersion()
-            _elem1337.read(iprot)
-            self.success.append(_elem1337)
+          (_etype1342, _size1339) = iprot.readListBegin()
+          for _i1343 in xrange(_size1339):
+            _elem1344 = SchemaVersion()
+            _elem1344.read(iprot)
+            self.success.append(_elem1344)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -48595,8 +48822,8 @@ class get_schema_all_versions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1338 in self.success:
-        iter1338.write(oprot)
+      for iter1345 in self.success:
+        iter1345.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -50071,11 +50298,11 @@ class get_runtime_stats_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1342, _size1339) = iprot.readListBegin()
-          for _i1343 in xrange(_size1339):
-            _elem1344 = RuntimeStat()
-            _elem1344.read(iprot)
-            self.success.append(_elem1344)
+          (_etype1349, _size1346) = iprot.readListBegin()
+          for _i1350 in xrange(_size1346):
+            _elem1351 = RuntimeStat()
+            _elem1351.read(iprot)
+            self.success.append(_elem1351)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -50098,8 +50325,8 @@ class get_runtime_stats_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1345 in self.success:
-        iter1345.write(oprot)
+      for iter1352 in self.success:
+        iter1352.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:


[39/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/resources/package.jdo
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/resources/package.jdo
index 0000000,5fb548c..70150da
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/resources/package.jdo
+++ b/standalone-metastore/metastore-server/src/main/resources/package.jdo
@@@ -1,0 -1,1420 +1,1426 @@@
+ <?xml version="1.0"?>
+ <!--
+   Licensed to the Apache Software Foundation (ASF) under one
+   or more contributor license agreements.  See the NOTICE file
+   distributed with this work for additional information
+   regarding copyright ownership.  The ASF licenses this file
+   to you under the Apache License, Version 2.0 (the
+   "License"); you may not use this file except in compliance
+   with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ --> 
+ <!DOCTYPE jdo PUBLIC "-//Sun Microsystems, Inc.//DTD Java Data Objects Metadata 2.0//EN"
+   "http://java.sun.com/dtd/jdo_2_0.dtd">
+ <!--
+   Size Limitations:
+ 
+   Indexed VARCHAR: 767 bytes (MySQL running on InnoDB Engine http://bugs.mysql.com/bug.php?id=13315)
+   Non-indexed VARCHAR: 4000 bytes (max length on Oracle 9i/10g/11g)
+ 
+ -->
+ <jdo>
+   <package name="org.apache.hadoop.hive.metastore.model">
+     <class name="MDatabase" identity-type="datastore" table="DBS" detachable="true">  
+       <datastore-identity>
+         <column name="DB_ID"/>
+       </datastore-identity>
+       <index name="UniqueDatabase" unique="true">
+         <column name="NAME"/>
+         <column name="CTLG_NAME"/>
+       </index>
+       <field name="name">  
+         <column name="NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="catalogName">
+         <column name="CTLG_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="description">
+         <column name="DESC" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="locationUri">
+         <column name="DB_LOCATION_URI" length="4000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="parameters" table="DATABASE_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="DB_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="180" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="ownerName">    
+         <column name="OWNER_NAME" length="128" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+        <field name="ownerType">
+         <column name="OWNER_TYPE" length="10" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MCatalog" identity-type="datastore" table="CTLGS" detachable="true">
+       <datastore-identity>
+         <column name="CTLG_ID"/>
+       </datastore-identity>
+       <field name="name">
+         <column name="NAME" length="256" jdbc-type="VARCHAR"/>
+         <index name="UniqueCatalog" unique="true"/>
+       </field>
+       <field name="description">
+         <column name="DESC" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="locationUri">
+         <column name="LOCATION_URI" length="4000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MFieldSchema" embedded-only="true" table="TYPE_FIELDS" detachable="true">
+       <field name="name">
+         <column name="FNAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="type" >
+         <column name="FTYPE" length="32672" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="comment" >
+         <column name="FCOMMENT" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MType" table="TYPES" detachable="true">  
+       <field name="name" >  
+         <column name="TYPE_NAME" length="128" jdbc-type="VARCHAR"/>  
+         <index name="UniqueType" unique="true"/>
+       </field>
+       <field name="type1">  
+         <column name="TYPE1" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="type2">  
+         <column name="TYPE2" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="fields" table="TYPE_FIELDS" >
+         <collection element-type="MFieldSchema"/>
+         <join>
+           <primary-key name="TYPE_FIELDS_PK">
+             <column name="TYPE_NAME"/>
+             <column name="FIELD_NAME"/>
+           </primary-key>
+           <column name="TYPE_NAME"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="name">
+               <column name="FIELD_NAME" length="128" jdbc-type="VARCHAR"/>
+             </field>
+             <field name="type">
+               <column name="FIELD_TYPE" length="767" jdbc-type="VARCHAR"  allows-null="false"/>
+             </field>
+             <field name="comment" >
+               <column name="COMMENT" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+     </class>
+ 
+     <class name="MTable" table="TBLS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="TBL_ID"/>
+       </datastore-identity>
+       <index name="UniqueTable" unique="true">
+         <column name="TBL_NAME"/>
+         <column name="DB_ID"/>
+       </index>
+       <field name="tableName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="database">
+         <column name="DB_ID"/>
+       </field>
+       <field name="partitionKeys" table="PARTITION_KEYS" >
+         <collection element-type="MFieldSchema"/>
+         <join>
+           <primary-key name="PARTITION_KEY_PK">
+             <column name="TBL_ID"/>
+             <column name="PKEY_NAME"/>
+           </primary-key>
+           <column name="TBL_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="name">
+               <column name="PKEY_NAME" length="128" jdbc-type="VARCHAR"/>
+               </field>
+             <field name="type">
+               <column name="PKEY_TYPE" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+             </field>
+             <field name="comment" >
+               <column name="PKEY_COMMENT" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+       <field name="sd" dependent="true">
+         <column name="SD_ID"/>
+       </field>
+       <field name="owner">
+         <column name="OWNER" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="ownerType">
+         <column name="OWNER_TYPE" length="10" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="lastAccessTime">
+         <column name="LAST_ACCESS_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="retention">
+         <column name="RETENTION" jdbc-type="integer"/>
+       </field>
+       <field name="parameters" table="TABLE_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="TBL_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="viewOriginalText" default-fetch-group="false">
+         <column name="VIEW_ORIGINAL_TEXT" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="viewExpandedText" default-fetch-group="false">
+         <column name="VIEW_EXPANDED_TEXT" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="rewriteEnabled">
+         <column name="IS_REWRITE_ENABLED"/>
+       </field>
+       <field name="tableType">
+         <column name="TBL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
++      <field name="writeId">
++        <column name="WRITE_ID"/>
++      </field>
+     </class>
+ 
+     <class name="MCreationMetadata" identity-type="datastore" table="MV_CREATION_METADATA" detachable="true">
+       <datastore-identity>
+         <column name="MV_CREATION_METADATA_ID"/>
+       </datastore-identity>
+       <field name="catalogName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="tblName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="tables" table="MV_TABLES_USED">
+         <collection element-type="MTable"/>
+         <join>
+           <column name="MV_CREATION_METADATA_ID"/>
+         </join>
+         <element column="TBL_ID"/>
+       </field>
+       <field name="txnList">
+         <column name="TXN_LIST" jdbc-type="CLOB" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MConstraint" identity-type="application" table="KEY_CONSTRAINTS" detachable="true" objectid-class="MConstraint$PK">
+       <field name="constraintName" primary-key="true">
+         <column name="CONSTRAINT_NAME"/>
+       </field>
+       <field name="position" primary-key="true">
+         <column name="POSITION"/>
+       </field>
+       <field name="childColumn">
+         <column name="CHILD_CD_ID"/>
+       </field>
+       <field name="childIntegerIndex">
+         <column name="CHILD_INTEGER_IDX"/>
+       </field>
+       <field name="childTable">
+         <column name="CHILD_TBL_ID"/>
+       </field>
+       <field name="parentColumn">
+         <column name="PARENT_CD_ID"/>
+       </field>
+       <field name="parentIntegerIndex">
+     <column name="PARENT_INTEGER_IDX"/>
+       </field>
+       <field name="parentTable">
+         <column name="PARENT_TBL_ID"/>
+       </field>
+       <field name="constraintType">
+         <column name="CONSTRAINT_TYPE"/>
+       </field>
+       <field name="deleteRule">
+         <column name="DELETE_RULE"/>
+       </field>
+       <field name="updateRule">
+         <column name="UPDATE_RULE"/>
+       </field>
+       <field name="enableValidateRely">
+         <column name="ENABLE_VALIDATE_RELY"/>
+       </field>
+     </class>
+ 
+     <class name="MSerDeInfo" identity-type="datastore" table="SERDES" detachable="true">
+       <datastore-identity>
+         <column name="SERDE_ID"/>
+       </datastore-identity>
+       <field name="name">
+         <column name="NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="serializationLib">
+         <column name="SLIB" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="parameters" table="SERDE_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="SERDE_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="description">
+         <column name="DESCRIPTION" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="serializerClass">
+         <column name="SERIALIZER_CLASS" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="deserializerClass">
+         <column name="DESERIALIZER_CLASS" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="serdeType">
+         <column name="SERDE_TYPE" jdbc-type="integer" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MOrder" embedded-only="true" table="SORT_ORDER" detachable="true">
+       <field name="col">
+         <column name="COL_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="order">
+         <column name="ORDER" jdbc-type="INTEGER"  allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MColumnDescriptor" identity-type="datastore" table="CDS" detachable="true">
+       <datastore-identity>
+         <column name="CD_ID"/>
+       </datastore-identity>
+       <field name="cols" table="COLUMNS_V2" >
+         <collection element-type="MFieldSchema"/>
+         <join>
+           <primary-key name="COLUMNS_PK">
+             <column name="COLUMN_NAME"/>
+           </primary-key>
+           <column name="CD_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="name">
+               <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+               </field>
+             <field name="type">
+               <column name="TYPE_NAME" length="32672" jdbc-type="VARCHAR" allows-null="false"/>
+             </field>
+             <field name="comment">
+               <column name="COMMENT" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+     </class>
+ 
+    <class name="MStringList" identity-type="datastore" table="Skewed_STRING_LIST" detachable="true">
+      <datastore-identity>
+        <column name="STRING_LIST_ID"/>
+      </datastore-identity>
+      <field name="internalList" table="Skewed_STRING_LIST_VALUES">
+        <collection element-type="java.lang.String"/>
+        <join>
+          <column name="STRING_LIST_ID"/>
+        </join>
+        <element column="STRING_LIST_VALUE"/>
+      </field>
+    </class>
+ 
+     <class name="MStorageDescriptor" identity-type="datastore" table="SDS" detachable="true">
+       <datastore-identity>
+         <column name="SD_ID"/>
+       </datastore-identity>
+       <field name="cd">
+           <column name="CD_ID"/>
+       </field>
+       <field name="location">
+         <column name="LOCATION" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="inputFormat">
+         <column name="INPUT_FORMAT" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="outputFormat">
+         <column name="OUTPUT_FORMAT" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="isCompressed">
+         <column name="IS_COMPRESSED"/>
+       </field>
+       <field name="isStoredAsSubDirectories">
+         <column name="IS_STOREDASSUBDIRECTORIES"/>
+       </field>
+       <field name="numBuckets">
+         <column name="NUM_BUCKETS" jdbc-type="integer"/>
+       </field>
+       <field name="serDeInfo" dependent="true">
+         <column name="SERDE_ID"/>
+       </field>
+       <field name="bucketCols" table="BUCKETING_COLS">
+         <collection element-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <element column="BUCKET_COL_NAME"/>
+       </field>
+       <field name="sortCols" table="SORT_COLS">
+         <collection element-type="MOrder"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="col">
+               <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+               </field>
+             <field name="order">
+               <column name="ORDER" jdbc-type="INTEGER"  allows-null="false"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+       <field name="parameters" table="SD_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="skewedColNames" table="SKEWED_COL_NAMES">
+         <collection element-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <element column="SKEWED_COL_NAME"/>
+       </field>
+       <field name="skewedColValues" table="SKEWED_VALUES">
+         <collection element-type="MStringList"/>
+         <join/>
+       </field>
+       <field name="skewedColValueLocationMaps" table="SKEWED_COL_VALUE_LOC_MAP">
+         <map key-type="MStringList" value-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <value>
+           <column name="location" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+     </class>
+ 
+     <class name="MPartition" table="PARTITIONS" identity-type="datastore" detachable="true">
+       <index name="UniquePartition" unique="true">
+         <column name="PART_NAME"/>
+         <column name="TBL_ID"/>
+       </index>
+       <datastore-identity>
+         <column name="PART_ID"/>
+       </datastore-identity>
+       <field name="partitionName">
+         <column name="PART_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="lastAccessTime">
+         <column name="LAST_ACCESS_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="values" table="PARTITION_KEY_VALS">
+         <collection element-type="java.lang.String"/>
+         <join>
+           <column name="PART_ID"/>
+         </join>
+         <element column="PART_KEY_VAL"/>
+       </field>
+       <field name="sd" dependent="true">
+         <column name="SD_ID"/>
+       </field>
+       <field name="parameters" table="PARTITION_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="PART_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
++      <field name="writeId">
++        <column name="WRITE_ID"/>
++      </field>
+     </class>
+     
+     <class name="MIndex" table="IDXS" identity-type="datastore" detachable="true">
+       <index name="UniqueINDEX" unique="true">
+         <column name="INDEX_NAME"/>
+         <column name="ORIG_TBL_ID"/>
+       </index>
+       
+       <datastore-identity>
+         <column name="INDEX_ID"/>
+       </datastore-identity>
+       <field name="indexName">
+         <column name="INDEX_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="origTable">
+         <column name="ORIG_TBL_ID"/>
+       </field>
+       <field name="indexTable">
+         <column name="INDEX_TBL_ID"/>
+       </field>
+       <field name="indexHandlerClass">
+         <column name="INDEX_HANDLER_CLASS" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="deferredRebuild">
+         <column name="DEFERRED_REBUILD"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="lastAccessTime">
+         <column name="LAST_ACCESS_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="sd" dependent="true">
+         <column name="SD_ID"/>
+       </field>
+       <field name="parameters" table="INDEX_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="INDEX_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+     </class>
+ 
+     <class name="MRole" table="ROLES" identity-type="datastore" detachable="true">
+ 
+       <index name="RoleEntityINDEX" unique="true">
+         <column name="ROLE_NAME"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="ROLE_ID"/>
+       </datastore-identity>
+ 
+       <field name="roleName">
+         <column name="ROLE_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="ownerName">
+         <column name="OWNER_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       
+     </class>
+ 
+     <class name="MRoleMap" table="ROLE_MAP" identity-type="datastore" detachable="true">
+       <index name="UserRoleMapINDEX" unique="true">
+         <column name="PRINCIPAL_NAME"/>
+         <column name="ROLE_ID"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="ROLE_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="role">
+         <column name="ROLE_ID" />
+       </field>
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="addTime">
+         <column name="ADD_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+     </class>
+ 
+     <class name="MGlobalPrivilege" table="GLOBAL_PRIVS" identity-type="datastore" detachable="true">
+       
+       <index name="GlobalPrivilegeIndex" unique="true">
+         <column name="AUTHORIZER"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="USER_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+     
+       <datastore-identity>
+         <column name="USER_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="privilege">
+         <column name="USER_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MDBPrivilege" table="DB_PRIVS" identity-type="datastore" detachable="true">
+       
+       <index name="DBPrivilegeIndex" unique="true">
+         <column name="AUTHORIZER"/>
+         <column name="DB_ID"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="DB_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="DB_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="database">
+         <column name="DB_ID" />
+       </field>
+       <field name="privilege">
+         <column name="DB_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MTablePrivilege" table="TBL_PRIVS" identity-type="datastore" detachable="true">
+     
+       <index name="TablePrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="TBL_ID"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="TBL_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="TBL_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID"/>
+       </field>
+       <field name="privilege">
+         <column name="TBL_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionPrivilege" table="PART_PRIVS" identity-type="datastore" detachable="true">
+     
+       <index name="PartPrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="PART_ID"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="PART_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="PART_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="partition">
+         <column name="PART_ID" />
+       </field>
+       <field name="privilege">
+         <column name="PART_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MTableColumnPrivilege" table="TBL_COL_PRIVS" identity-type="datastore" detachable="true">
+     
+      <index name="TableColumnPrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="TBL_ID"/>
+         <column name="COLUMN_NAME"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="TBL_COL_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="TBL_COLUMN_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID" />
+       </field>
+       <field name="columnName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="privilege">
+         <column name="TBL_COL_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionColumnPrivilege" table="PART_COL_PRIVS" identity-type="datastore" detachable="true">
+     
+      <index name="PartitionColumnPrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="PART_ID"/>
+         <column name="COLUMN_NAME"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="PART_COL_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="PART_COLUMN_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="partition">
+         <column name="PART_ID" />
+       </field>
+       <field name="columnName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="privilege">
+         <column name="PART_COL_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionEvent"  table="PARTITION_EVENTS" identity-type="datastore" detachable="true">  
+        
+       <index name="PartitionEventIndex" unique="false">
+         <column name="PARTITION_NAME"/>
+       </index>
+       
+       <datastore-identity>
+         <column name="PART_NAME_ID"/>
+       </datastore-identity>
+       
+       <field name="catalogName">  
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="dbName">  
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="tblName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+        <field name="partName">
+         <column name="PARTITION_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="eventType">
+         <column name="EVENT_TYPE"  jdbc-type="integer"/>
+       </field>
+      <field name="eventTime">
+         <column name="EVENT_TIME"  jdbc-type="BIGINT"/>
+       </field>
+ 
+     </class>
+     
+     <class name="MMasterKey" table="MASTER_KEYS" identity-type="application" detachable="true">
+ 
+       <field name="keyId" primary-key="true" value-strategy="native">
+         <column name="KEY_ID" jdbc-type="integer" />
+       </field>
+         
+       <field name="masterKey">
+         <column name="MASTER_KEY" length="767" jdbc-type="VARCHAR" />
+       </field>  
+       
+     </class>
+ 
+     <class name="MDelegationToken" table="DELEGATION_TOKENS" identity-type="application" detachable="true">
+ 
+       <field name="tokenIdentifier" primary-key="true">
+         <column name="TOKEN_IDENT" length="767" jdbc-type="VARCHAR" />
+       </field>  
+ 
+       <field name="tokenStr">
+         <column name="TOKEN" length="767" jdbc-type="VARCHAR" />
+       </field>
+             
+     </class>    
+ 
+     <class name="MTableColumnStatistics" table="TAB_COL_STATS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="CS_ID"/>
+       </datastore-identity>
+ 
+       <field name ="catName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="tableName">
+         <column name="TABLE_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID"/>
+       </field>
+       <field name="colName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="colType">
+         <column name="COLUMN_TYPE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="longLowValue">
+         <column name="LONG_LOW_VALUE"  jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="longHighValue">
+         <column name="LONG_HIGH_VALUE" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+        <field name="doubleLowValue">
+         <column name="DOUBLE_LOW_VALUE"  jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="doubleHighValue">
+         <column name="DOUBLE_HIGH_VALUE" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="decimalLowValue">
+         <column name="BIG_DECIMAL_LOW_VALUE"  jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="decimalHighValue">
+         <column name="BIG_DECIMAL_HIGH_VALUE" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="numNulls">
+         <column name="NUM_NULLS" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="numDVs">
+         <column name="NUM_DISTINCTS" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="bitVector">
+         <column name="BIT_VECTOR" jdbc-type="BLOB" allows-null="true"/>
+       </field>
+       <field name="avgColLen">
+         <column name="AVG_COL_LEN" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="maxColLen">
+         <column name="MAX_COL_LEN" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numTrues">
+         <column name="NUM_TRUES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numFalses">
+         <column name="NUM_FALSES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="lastAnalyzed">
+         <column name="LAST_ANALYZED" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionColumnStatistics" table="PART_COL_STATS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="CS_ID"/>
+       </datastore-identity>
+ 
+       <field name ="catName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="tableName">
+         <column name="TABLE_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="partitionName">
+         <column name="PARTITION_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="partition">
+         <column name="PART_ID"/>
+       </field>
+       <field name="colName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="colType">
+         <column name="COLUMN_TYPE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="longLowValue">
+         <column name="LONG_LOW_VALUE"  jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="longHighValue">
+         <column name="LONG_HIGH_VALUE" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+        <field name="doubleLowValue">
+         <column name="DOUBLE_LOW_VALUE"  jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="doubleHighValue">
+         <column name="DOUBLE_HIGH_VALUE" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="decimalLowValue">
+         <column name="BIG_DECIMAL_LOW_VALUE"  jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="decimalHighValue">
+         <column name="BIG_DECIMAL_HIGH_VALUE" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="numNulls">
+         <column name="NUM_NULLS" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="numDVs">
+         <column name="NUM_DISTINCTS" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="bitVector">
+         <column name="BIT_VECTOR" jdbc-type="BLOB" allows-null="true"/>
+       </field>
+       <field name="avgColLen">
+         <column name="AVG_COL_LEN" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="maxColLen">
+         <column name="MAX_COL_LEN" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numTrues">
+         <column name="NUM_TRUES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numFalses">
+         <column name="NUM_FALSES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="lastAnalyzed">
+         <column name="LAST_ANALYZED" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+     </class>
+     <class name="MVersionTable" table="VERSION" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="VER_ID"/>
+       </datastore-identity>
+       <field name ="schemaVersion">
+         <column name="SCHEMA_VERSION" length="127" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="versionComment">
+         <column name="VERSION_COMMENT" length="255" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MMetastoreDBProperties" table="METASTORE_DB_PROPERTIES" identity-type="application" detachable="true">
+       <field name ="propertyKey" primary-key="true">
+         <column name="PROPERTY_KEY" length="255" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="propertyValue">
+         <column name="PROPERTY_VALUE" length="1000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="description">
+         <column name="DESCRIPTION" length="1000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MResourceUri" embedded-only="true" table="RESOURCE_URI" detachable="true">
+       <field name="resourceType">
+         <column name="RESOURCE_TYPE" jdbc-type="INTEGER"/>
+       </field>
+       <field name="uri">
+         <column name="RESOURCE_URI" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MFunction" table="FUNCS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="FUNC_ID"/>
+       </datastore-identity>
+       <index name="UniqueFunction" unique="true">
+         <column name="FUNC_NAME"/>
+         <column name="DB_ID"/>
+       </index>
+       <field name="functionName">
+         <column name="FUNC_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="database">
+         <column name="DB_ID"/>
+       </field>
+       <field name="functionType">
+         <column name="FUNC_TYPE" jdbc-type="integer"/>
+       </field>
+       <field name="className">
+         <column name="CLASS_NAME" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="ownerName">    
+         <column name="OWNER_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+        <field name="ownerType">
+         <column name="OWNER_TYPE" length="10" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="resourceUris" table="FUNC_RU">
+         <collection element-type="MResourceUri"/>
+         <join>
+           <column name="FUNC_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="resourceType">
+               <column name="RESOURCE_TYPE" jdbc-type="INTEGER"/>
+             </field>
+             <field name="uri">
+               <column name="RESOURCE_URI" length="4000" jdbc-type="VARCHAR"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+     </class>
+ 
+     <class name="MNotificationLog" table="NOTIFICATION_LOG" identity-type="datastore" detachable="true">
+       <datastore-identity strategy="increment"/>
+       <datastore-identity key-cache-size="1"/>
+       <datastore-identity>
+         <column name="NL_ID"/>
+       </datastore-identity>
+       <field name="eventId">
+         <column name="EVENT_ID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="eventTime">
+         <column name="EVENT_TIME" jdbc-type="INTEGER" allows-null="false"/>
+       </field>
+       <field name="eventType">
+         <column name="EVENT_TYPE" length="32" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="catalogName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="tableName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="message">
+         <column name="MESSAGE" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="messageFormat">
+         <column name="MESSAGE_FORMAT" length="16" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <!-- I tried to use a sequence here but derby didn't handle it well. -->
+     <class name="MNotificationNextId" table="NOTIFICATION_SEQUENCE" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="NNI_ID"/>
+       </datastore-identity>
+       <field name="nextEventId">
+         <column name="NEXT_EVENT_ID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MTxnWriteNotificationLog" table="TXN_WRITE_NOTIFICATION_LOG" identity-type="datastore" detachable="true">
+       <datastore-identity strategy="increment"/>
+       <datastore-identity key-cache-size="1"/>
+       <datastore-identity>
+         <column name="WNL_ID"/>
+       </datastore-identity>
+       <field name="txnId">
+         <column name="WNL_TXNID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="writeId">
+         <column name="WNL_WRITEID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="database">
+         <column name="WNL_DATABASE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="table">
+         <column name="WNL_TABLE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="partition">
+         <column name="WNL_PARTITION" length="1024" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="tableObject">
+         <column name="WNL_TABLE_OBJ" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="partObject">
+         <column name="WNL_PARTITION_OBJ" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="files">
+         <column name="WNL_FILES" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="eventTime">
+         <column name="WNL_EVENT_TIME" jdbc-type="INTEGER" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MWMResourcePlan" identity-type="datastore" table="WM_RESOURCEPLAN" detachable="true">
+       <datastore-identity>
+         <column name="RP_ID"/>
+       </datastore-identity>
+       <field name="name">
+         <column name="NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="queryParallelism">
+         <column name="QUERY_PARALLELISM" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <field name="status">
+         <column name="STATUS" jdbc-type="string" allows-null="false"/>
+       </field>
+       <field name="defaultPool">
+         <column name="DEFAULT_POOL_ID" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <index name="UniqueResourcePlan" unique="true">
+         <column name="NAME"/>
+       </index>
+ 
+       <field name="pools" mapped-by="resourcePlan">
+         <collection element-type="MWMPool"/>
+       </field>
+       <field name="triggers" mapped-by="resourcePlan">
+         <collection element-type="MWMTrigger"/>
+       </field>
+       <field name="mappings" mapped-by="resourcePlan">
+         <collection element-type="MWMMapping"/>
+       </field>
+     </class>
+ 
+     <class name="MWMPool" identity-type="datastore" table="WM_POOL" detachable="true">
+       <datastore-identity>
+         <column name="POOL_ID"/>
+       </datastore-identity>
+       <field name="resourcePlan">
+         <column name="RP_ID" jdbc-type="integer" allows-null="false"/>
+       </field>
+       <field name="path">
+         <column name="PATH" length="1024" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="allocFraction">
+         <column name="ALLOC_FRACTION" jdbc-type="double" allows-null="true"/>
+       </field>
+       <field name="queryParallelism">
+         <column name="QUERY_PARALLELISM" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <field name="schedulingPolicy">
+         <column name="SCHEDULING_POLICY" jdbc-type="string" allows-null="true"/>
+       </field>
+       <field name="triggers" table="WM_POOL_TO_TRIGGER">
+         <collection element-type="MWMTrigger" />
+         <join>
+           <column name="POOL_ID" />
+         </join>
+         <element>
+           <column name="TRIGGER_ID"/>
+         </element>
+       </field>
+       <index name="UniqueWMPool" unique="true">
+         <column name="RP_ID"/>
+         <column name="PATH"/>
+       </index>
+     </class>
+ 
+     <class name="MWMTrigger" identity-type="datastore" table="WM_TRIGGER" detachable="true">
+       <datastore-identity>
+         <column name="TRIGGER_ID"/>
+       </datastore-identity>
+       <field name="resourcePlan">
+         <column name="RP_ID" jdbc-type="integer" allows-null="false"/>
+       </field>
+       <field name="name">
+         <column name="NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="triggerExpression">
+         <column name="TRIGGER_EXPRESSION" jdbc-type="string" allows-null="true"/>
+       </field>
+       <field name="actionExpression">
+         <column name="ACTION_EXPRESSION" jdbc-type="string" allows-null="true"/>
+       </field>
+       <field name="isInUnmanaged">
+         <column name="IS_IN_UNMANAGED" allows-null="false"/>
+       </field>
+       <field name="pools" mapped-by="triggers">
+         <collection element-type="MWMPool" />
+       </field>
+       <index name="UniqueWMTrigger" unique="true">
+         <column name="RP_ID"/>
+         <column name="NAME"/>
+       </index>
+     </class>
+ 
+     <class name="MWMMapping" identity-type="datastore" table="WM_MAPPING" detachable="true">
+       <datastore-identity>
+         <column name="MAPPING_ID"/>
+       </datastore-identity>
+       <field name="resourcePlan">
+         <column name="RP_ID" jdbc-type="integer" allows-null="false"/>
+       </field>
+       <field name="entityType">
+         <column name="ENTITY_TYPE" jdbc-type="string" length="128" />
+       </field>
+       <field name="entityName">
+         <column name="ENTITY_NAME" jdbc-type="string" length="128" />
+       </field>
+       <field name="pool">
+         <column name="POOL_ID" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <field name="ordering">
+         <column name="ORDERING" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <index name="UniqueWMMapping" unique="true">
+         <column name="RP_ID"/>
+         <column name="ENTITY_TYPE"/>
+         <column name="ENTITY_NAME"/>
+       </index>
+     </class>
+ 
+     <class name="MISchema" identity-type="datastore" table="I_SCHEMA" detachable="true">
+       <datastore-identity>
+         <column name="SCHEMA_ID"/>
+       </datastore-identity>
+       <field name="schemaType">
+         <column name="SCHEMA_TYPE" jdbc-type="integer"/>
+       </field>
+       <field name="name">
+         <column name="NAME" jdbc-type="varchar" length="256"/>
+       </field>
+       <field name="db">
+         <column name="DB_ID"/>
+       </field>
+       <field name="compatibility">
+         <column name="COMPATIBILITY" jdbc-type="integer"/>
+       </field>
+       <field name="validationLevel">
+         <column name="VALIDATION_LEVEL" jdbc-type="integer"/>
+       </field>
+       <field name="canEvolve">
+         <column name="CAN_EVOLVE"/>
+       </field>
+       <field name="schemaGroup">
+         <column name="SCHEMA_GROUP" jdbc-type="varchar" length="256" allows-null="true"/>
+       </field>
+       <field name="description">
+         <column name="DESCRIPTION" jdbc-type="varchar" length="4000" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MSchemaVersion" identity-type="datastore" table="SCHEMA_VERSION" detachable="true">
+       <datastore-identity>
+         <column name="SCHEMA_VERSION_ID"/>
+       </datastore-identity>
+       <field name="iSchema">
+         <column name="SCHEMA_ID"/>
+       </field>
+       <field name="version">
+         <column name="VERSION" jdbc-type="integer"/>
+       </field>
+       <field name="createdAt">
+         <column name="CREATED_AT" jdbc-type="bigint"/>
+       </field>
+       <field name="cols">
+           <column name="CD_ID"/>
+       </field>
+       <field name="state">
+         <column name="STATE" jdbc-type="integer"/>
+       </field>
+       <field name="description">
+         <column name="DESCRIPTION" jdbc-type="varchar" length="4000" allows-null="true"/>
+       </field>
+       <field name="schemaText" default-fetch-group="false">
+         <column name="SCHEMA_TEXT" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="fingerprint">
+         <column name="FINGERPRINT" jdbc-type="varchar" length="256" allows-null="true"/>
+       </field>
+       <field name="name">
+         <column name="SCHEMA_VERSION_NAME" jdbc-type="varchar" length="256" allows-null="true"/>
+       </field>
+       <field name="serDe">
+         <column name="SERDE_ID"/>
+       </field>
+     </class>
+     <class name="MRuntimeStat" identity-type="datastore" table="RUNTIME_STATS" detachable="true">
+       <datastore-identity>
+         <column name="RS_ID"/>
+       </datastore-identity>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="weight">
+         <column name="WEIGHT" jdbc-type="integer"/>
+       </field>
+       <field name="payload">
+         <column name="PAYLOAD" jdbc-type="BLOB" allows-null="true"/>
+       </field>
+    </class>
+   </package>
+ </jdo>
+ 


[28/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index 0000000,62ed380..e4854f9
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@@ -1,0 -1,1075 +1,1075 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.cache;
+ 
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.concurrent.Callable;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.ThreadFactory;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog;
+ import org.apache.hadoop.hive.metastore.HiveMetaStore;
+ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+ import org.apache.hadoop.hive.metastore.ObjectStore;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.Warehouse;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
+ import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ 
+ import jline.internal.Log;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ 
+ @Category(MetastoreCheckinTest.class)
+ public class TestCachedStore {
+ 
+   private ObjectStore objectStore;
+   private CachedStore cachedStore;
+   private SharedCache sharedCache;
+   private Configuration conf;
+ 
+   @Before
+   public void setUp() throws Exception {
+     conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+     // Disable memory estimation for this test class
+     MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb");
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+     objectStore = new ObjectStore();
+     objectStore.setConf(conf);
+     cachedStore = new CachedStore();
+     cachedStore.setConfForTest(conf);
+     // Stop the CachedStore cache update service. We'll start it explicitly to control the test
+     CachedStore.stopCacheUpdateService(1);
+     sharedCache = new SharedCache();
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+ 
+     // Create the 'hive' catalog
+     HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf));
+   }
+ 
+   /**********************************************************************************************
+    * Methods that test CachedStore
+    *********************************************************************************************/
+ 
+   @Test
+   public void testDatabaseOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testDatabaseOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read database via CachedStore
+     Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+ 
+     // Add another db via CachedStore
+     final String dbName1 = "testDatabaseOps1";
+     Database db1 = createTestDb(dbName1, dbOwner);
+     cachedStore.createDatabase(db1);
+     db1 = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
+ 
+     // Read db via ObjectStore
+     dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
+     Assert.assertEquals(db1, dbRead);
+ 
+     // Alter the db via CachedStore (can only alter owner or parameters)
+     dbOwner = "user2";
+     db = new Database(db);
+     db.setOwnerName(dbOwner);
+     cachedStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db);
+     db = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Read db via ObjectStore
+     dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+ 
+     // Add another db via ObjectStore
+     final String dbName2 = "testDatabaseOps2";
+     Database db2 = createTestDb(dbName2, dbOwner);
+     objectStore.createDatabase(db2);
+     db2 = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2);
+ 
+     // Alter db "testDatabaseOps" via ObjectStore
+     dbOwner = "user1";
+     db = new Database(db);
+     db.setOwnerName(dbOwner);
+     objectStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Drop db "testDatabaseOps1" via ObjectStore
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName1);
+ 
+     // We update twice to accurately detect if cache is dirty or not
+     updateCache(cachedStore);
+     updateCache(cachedStore);
+ 
+     // Read the newly added db via CachedStore
+     dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2);
+     Assert.assertEquals(db2, dbRead);
+ 
+     // Read the altered db via CachedStore (altered user from "user2" to "user1")
+     dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+ 
+     // Try to read the dropped db after cache update
+     try {
+       dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
+       Assert.fail("The database: " + dbName1
+           + " should have been removed from the cache after running the update service");
+     } catch (NoSuchObjectException e) {
+       // Expected
+     }
+ 
+     // Clean up
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName2);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   @Test
+   public void testTableOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testTableOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Add a table via ObjectStore
+     String tblName = "tbl";
+     String tblOwner = "user1";
+     FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     List<FieldSchema> cols = new ArrayList<FieldSchema>();
+     cols.add(col1);
+     cols.add(col2);
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
+     objectStore.createTable(tbl);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read database, table via CachedStore
+     Database dbRead= cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+     Table tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     Assert.assertEquals(tbl, tblRead);
+ 
+     // Add a new table via CachedStore
+     String tblName1 = "tbl1";
+     Table tbl1 = new Table(tbl);
+     tbl1.setTableName(tblName1);
+     cachedStore.createTable(tbl1);
+     tbl1 = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+ 
+     // Read via object store
+     tblRead = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+     Assert.assertEquals(tbl1, tblRead);
+ 
+     // Add a new table via ObjectStore
+     String tblName2 = "tbl2";
+     Table tbl2 = new Table(tbl);
+     tbl2.setTableName(tblName2);
+     objectStore.createTable(tbl2);
+     tbl2 = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2);
+ 
+     // Alter table "tbl" via ObjectStore
+     tblOwner = "role1";
+     tbl.setOwner(tblOwner);
+     tbl.setOwnerType(PrincipalType.ROLE);
 -    objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl);
++    objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl, -1, null);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     Assert.assertEquals("Owner of the table did not change.", tblOwner, tbl.getOwner());
+     Assert.assertEquals("Owner type of the table did not change", PrincipalType.ROLE, tbl.getOwnerType());
+ 
+     // Drop table "tbl1" via ObjectStore
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+ 
+     // We update twice to accurately detect if cache is dirty or not
+     updateCache(cachedStore);
+     updateCache(cachedStore);
+ 
+     // Read "tbl2" via CachedStore
+     tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2);
+     Assert.assertEquals(tbl2, tblRead);
+ 
+     // Read the altered "tbl" via CachedStore
+     tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     Assert.assertEquals(tbl, tblRead);
+ 
+     // Try to read the dropped "tbl1" via CachedStore (should throw exception)
+     tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+     Assert.assertNull(tblRead);
+ 
+     // Should return "tbl" and "tbl2"
+     List<String> tblNames = cachedStore.getTables(DEFAULT_CATALOG_NAME, dbName, "*");
+     Assert.assertTrue(tblNames.contains(tblName));
+     Assert.assertTrue(!tblNames.contains(tblName1));
+     Assert.assertTrue(tblNames.contains(tblName2));
+ 
+     // Clean up
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName2);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   @Test
+   public void testPartitionOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testPartitionOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Add a table via ObjectStore
+     String tblName = "tbl";
+     String tblOwner = "user1";
+     FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     List<FieldSchema> cols = new ArrayList<FieldSchema>();
+     cols.add(col1);
+     cols.add(col2);
+     FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column");
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     ptnCols.add(ptnCol1);
+     Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
+     objectStore.createTable(tbl);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     final String ptnColVal1 = "aaa";
+     Map<String, String> partParams = new HashMap<String, String>();
+     Partition ptn1 =
+         new Partition(Arrays.asList(ptnColVal1), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(ptn1);
+     ptn1 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1));
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     final String ptnColVal2 = "bbb";
+     Partition ptn2 =
+         new Partition(Arrays.asList(ptnColVal2), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(ptn2);
+     ptn2 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+ 
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read database, table, partition via CachedStore
+     Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+     Table tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     Assert.assertEquals(tbl, tblRead);
+     Partition ptn1Read = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1));
+     Assert.assertEquals(ptn1, ptn1Read);
+     Partition ptn2Read = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+     Assert.assertEquals(ptn2, ptn2Read);
+ 
+     // Add a new partition via ObjectStore
+     final String ptnColVal3 = "ccc";
+     Partition ptn3 =
+         new Partition(Arrays.asList(ptnColVal3), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn3.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(ptn3);
+     ptn3 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3));
+ 
+     // Alter an existing partition ("aaa") via ObjectStore
+     final String ptnColVal1Alt = "aaaAlt";
+     Partition ptn1Atl =
+         new Partition(Arrays.asList(ptnColVal1Alt), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn1Atl.setCatName(DEFAULT_CATALOG_NAME);
 -    objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl);
++    objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl, -1, null);
+     ptn1Atl = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
+ 
+     // Drop an existing partition ("bbb") via ObjectStore
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+ 
+     // We update twice to accurately detect if cache is dirty or not
+     updateCache(cachedStore);
+     updateCache(cachedStore);
+ 
+     // Read the newly added partition via CachedStore
+     Partition ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3));
+     Assert.assertEquals(ptn3, ptnRead);
+ 
+     // Read the altered partition via CachedStore
+     ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
+     Assert.assertEquals(ptn1Atl, ptnRead);
+ 
+     // Try to read the dropped partition via CachedStore
+     try {
+       ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+       Assert.fail("The partition: " + ptnColVal2
+           + " should have been removed from the cache after running the update service");
+     } catch (NoSuchObjectException e) {
+       // Expected
+     }
+     // Clean up
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3));
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   //@Test
+   public void testTableColStatsOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testTableColStatsOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Add a table via ObjectStore
+     final String tblName = "tbl";
+     final String tblOwner = "user1";
+     final FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     // Stats values for col1
+     long col1LowVal = 5;
+     long col1HighVal = 500;
+     long col1Nulls = 10;
+     long col1DV = 20;
+     final  FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     // Stats values for col2
+     long col2MaxColLen = 100;
+     double col2AvgColLen = 45.5;
+     long col2Nulls = 5;
+     long col2DV = 40;
+     final FieldSchema col3 = new FieldSchema("col3", "boolean", "boolean column");
+     // Stats values for col3
+     long col3NumTrues = 100;
+     long col3NumFalses = 30;
+     long col3Nulls = 10;
+     final List<FieldSchema> cols = new ArrayList<>();
+     cols.add(col1);
+     cols.add(col2);
+     cols.add(col3);
+     FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column");
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     ptnCols.add(ptnCol1);
+     Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
+     objectStore.createTable(tbl);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     // Add ColumnStatistics for tbl to metastore DB via ObjectStore
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     // Col1
+     ColumnStatisticsData data1 = new ColumnStatisticsData();
+     ColumnStatisticsObj col1Stats = new ColumnStatisticsObj(col1.getName(), col1.getType(), data1);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(col1LowVal);
+     longStats.setHighValue(col1HighVal);
+     longStats.setNumNulls(col1Nulls);
+     longStats.setNumDVs(col1DV);
+     data1.setLongStats(longStats);
+     colStatObjs.add(col1Stats);
+ 
+     // Col2
+     ColumnStatisticsData data2 = new ColumnStatisticsData();
+     ColumnStatisticsObj col2Stats = new ColumnStatisticsObj(col2.getName(), col2.getType(), data2);
+     StringColumnStatsDataInspector stringStats = new StringColumnStatsDataInspector();
+     stringStats.setMaxColLen(col2MaxColLen);
+     stringStats.setAvgColLen(col2AvgColLen);
+     stringStats.setNumNulls(col2Nulls);
+     stringStats.setNumDVs(col2DV);
+     data2.setStringStats(stringStats);
+     colStatObjs.add(col2Stats);
+ 
+     // Col3
+     ColumnStatisticsData data3 = new ColumnStatisticsData();
+     ColumnStatisticsObj col3Stats = new ColumnStatisticsObj(col3.getName(), col3.getType(), data3);
+     BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
+     boolStats.setNumTrues(col3NumTrues);
+     boolStats.setNumFalses(col3NumFalses);
+     boolStats.setNumNulls(col3Nulls);
+     data3.setBooleanStats(boolStats);
+     colStatObjs.add(col3Stats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
+     // Save to DB
 -    objectStore.updateTableColumnStatistics(stats);
++    objectStore.updateTableColumnStatistics(stats, -1, null, -1);
+ 
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read table stats via CachedStore
+     ColumnStatistics newStats =
+         cachedStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, dbName, tblName,
+             Arrays.asList(col1.getName(), col2.getName(), col3.getName()));
+     Assert.assertEquals(stats, newStats);
+ 
+     // Clean up
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   /**********************************************************************************************
+    * Methods that test SharedCache
+    *********************************************************************************************/
+ 
+   @Test
+   public void testSharedStoreDb() {
+     Database db1 = createTestDb("db1", "user1");
+     Database db2 = createTestDb("db2", "user1");
+     Database db3 = createTestDb("db3", "user1");
+     Database newDb1 = createTestDb("newdb1", "user1");
+     sharedCache.addDatabaseToCache(db1);
+     sharedCache.addDatabaseToCache(db2);
+     sharedCache.addDatabaseToCache(db3);
+     Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3);
+     sharedCache.alterDatabaseInCache(DEFAULT_CATALOG_NAME, "db1", newDb1);
+     Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3);
+     sharedCache.removeDatabaseFromCache(DEFAULT_CATALOG_NAME, "db2");
+     Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 2);
+     List<String> dbs = sharedCache.listCachedDatabases(DEFAULT_CATALOG_NAME);
+     Assert.assertEquals(dbs.size(), 2);
+     Assert.assertTrue(dbs.contains("newdb1"));
+     Assert.assertTrue(dbs.contains("db3"));
+   }
+ 
+   @Test
+   public void testSharedStoreTable() {
+     Table tbl1 = new Table();
+     StorageDescriptor sd1 = new StorageDescriptor();
+     List<FieldSchema> cols1 = new ArrayList<>();
+     cols1.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params1 = new HashMap<>();
+     params1.put("key", "value");
+     sd1.setCols(cols1);
+     sd1.setParameters(params1);
+     sd1.setLocation("loc1");
+     tbl1.setSd(sd1);
+     tbl1.setPartitionKeys(new ArrayList<>());
+ 
+     Table tbl2 = new Table();
+     StorageDescriptor sd2 = new StorageDescriptor();
+     List<FieldSchema> cols2 = new ArrayList<>();
+     cols2.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params2 = new HashMap<>();
+     params2.put("key", "value");
+     sd2.setCols(cols2);
+     sd2.setParameters(params2);
+     sd2.setLocation("loc2");
+     tbl2.setSd(sd2);
+     tbl2.setPartitionKeys(new ArrayList<>());
+ 
+     Table tbl3 = new Table();
+     StorageDescriptor sd3 = new StorageDescriptor();
+     List<FieldSchema> cols3 = new ArrayList<>();
+     cols3.add(new FieldSchema("col3", "int", ""));
+     Map<String, String> params3 = new HashMap<>();
+     params3.put("key2", "value2");
+     sd3.setCols(cols3);
+     sd3.setParameters(params3);
+     sd3.setLocation("loc3");
+     tbl3.setSd(sd3);
+     tbl3.setPartitionKeys(new ArrayList<>());
+ 
+     Table newTbl1 = new Table();
+     newTbl1.setDbName("db2");
+     newTbl1.setTableName("tbl1");
+     StorageDescriptor newSd1 = new StorageDescriptor();
+     List<FieldSchema> newCols1 = new ArrayList<>();
+     newCols1.add(new FieldSchema("newcol1", "int", ""));
+     Map<String, String> newParams1 = new HashMap<>();
+     newParams1.put("key", "value");
+     newSd1.setCols(newCols1);
+     newSd1.setParameters(params1);
+     newSd1.setLocation("loc1");
+     newTbl1.setSd(newSd1);
+     newTbl1.setPartitionKeys(new ArrayList<>());
+ 
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1);
+ 
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 4);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 2);
+ 
+     Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1");
+     Assert.assertEquals(t.getSd().getLocation(), "loc1");
+ 
+     sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1");
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 3);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 2);
+ 
+     sharedCache.alterTableInCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", newTbl1);
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 3);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 3);
+ 
+     sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl2");
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 2);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 2);
+   }
+ 
+ 
+   @Test
+   public void testSharedStorePartition() {
+     String dbName = "db1";
+     String tbl1Name = "tbl1";
+     String tbl2Name = "tbl2";
+     String owner = "user1";
+     Database db = createTestDb(dbName, owner);
+     sharedCache.addDatabaseToCache(db);
+     FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     List<FieldSchema> cols = new ArrayList<FieldSchema>();
+     cols.add(col1);
+     cols.add(col2);
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     Table tbl1 = createTestTbl(dbName, tbl1Name, owner, cols, ptnCols);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1);
+     Table tbl2 = createTestTbl(dbName, tbl2Name, owner, cols, ptnCols);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2);
+ 
+     Partition part1 = new Partition();
+     StorageDescriptor sd1 = new StorageDescriptor();
+     List<FieldSchema> cols1 = new ArrayList<>();
+     cols1.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params1 = new HashMap<>();
+     params1.put("key", "value");
+     sd1.setCols(cols1);
+     sd1.setParameters(params1);
+     sd1.setLocation("loc1");
+     part1.setSd(sd1);
+     part1.setValues(Arrays.asList("201701"));
+ 
+     Partition part2 = new Partition();
+     StorageDescriptor sd2 = new StorageDescriptor();
+     List<FieldSchema> cols2 = new ArrayList<>();
+     cols2.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params2 = new HashMap<>();
+     params2.put("key", "value");
+     sd2.setCols(cols2);
+     sd2.setParameters(params2);
+     sd2.setLocation("loc2");
+     part2.setSd(sd2);
+     part2.setValues(Arrays.asList("201702"));
+ 
+     Partition part3 = new Partition();
+     StorageDescriptor sd3 = new StorageDescriptor();
+     List<FieldSchema> cols3 = new ArrayList<>();
+     cols3.add(new FieldSchema("col3", "int", ""));
+     Map<String, String> params3 = new HashMap<>();
+     params3.put("key2", "value2");
+     sd3.setCols(cols3);
+     sd3.setParameters(params3);
+     sd3.setLocation("loc3");
+     part3.setSd(sd3);
+     part3.setValues(Arrays.asList("201703"));
+ 
+     Partition newPart1 = new Partition();
+     newPart1.setDbName(dbName);
+     newPart1.setTableName(tbl1Name);
+     StorageDescriptor newSd1 = new StorageDescriptor();
+     List<FieldSchema> newCols1 = new ArrayList<>();
+     newCols1.add(new FieldSchema("newcol1", "int", ""));
+     Map<String, String> newParams1 = new HashMap<>();
+     newParams1.put("key", "value");
+     newSd1.setCols(newCols1);
+     newSd1.setParameters(params1);
+     newSd1.setLocation("loc1new");
+     newPart1.setSd(newSd1);
+     newPart1.setValues(Arrays.asList("201701"));
+ 
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part1);
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part2);
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part3);
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, part1);
+ 
+     Partition t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"));
+     Assert.assertEquals(t.getSd().getLocation(), "loc1");
+ 
+     sharedCache.removePartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701"));
+     t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701"));
+     Assert.assertNull(t);
+ 
+     sharedCache.alterPartitionInCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"), newPart1);
+     t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"));
+     Assert.assertEquals(t.getSd().getLocation(), "loc1new");
+   }
+ 
+   @Test
+   public void testAggrStatsRepeatedRead() throws Exception {
+     String dbName = "testTableColStatsOps";
+     String tblName = "tbl";
+     String colName = "f1";
+ 
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setLocation("some_location")
+         .build(conf);
+     cachedStore.createDatabase(db);
+ 
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema(colName, "int", null));
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("col", "int", null));
+     StorageDescriptor sd =
+         new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()),
+             null, null, null);
+ 
+     Table tbl =
+         new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(),
+             null, null, TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.createTable(tbl);
+ 
+     List<String> partVals1 = new ArrayList<>();
+     partVals1.add("1");
+     List<String> partVals2 = new ArrayList<>();
+     partVals2.add("2");
+ 
+     Partition ptn1 =
+         new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn1);
+     Partition ptn2 =
+         new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn2);
+ 
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     statsDesc.setPartName("col");
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     ColumnStatisticsData data = new ColumnStatisticsData();
+     ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(0);
+     longStats.setHighValue(100);
+     longStats.setNumNulls(50);
+     longStats.setNumDVs(30);
+     data.setLongStats(longStats);
+     colStatObjs.add(colStats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add(colName);
+     List<String> aggrPartVals = new ArrayList<>();
+     aggrPartVals.add("1");
+     aggrPartVals.add("2");
+     AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+   }
+ 
+   @Test
+   public void testPartitionAggrStats() throws Exception {
+     String dbName = "testTableColStatsOps1";
+     String tblName = "tbl1";
+     String colName = "f1";
+ 
+     Database db = new Database(dbName, null, "some_location", null);
+     db.setCatalogName(DEFAULT_CATALOG_NAME);
+     cachedStore.createDatabase(db);
+ 
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema(colName, "int", null));
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("col", "int", null));
+     StorageDescriptor sd =
+         new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()),
+             null, null, null);
+ 
+     Table tbl =
+         new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(),
+             null, null, TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.createTable(tbl);
+ 
+     List<String> partVals1 = new ArrayList<>();
+     partVals1.add("1");
+     List<String> partVals2 = new ArrayList<>();
+     partVals2.add("2");
+ 
+     Partition ptn1 =
+         new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn1);
+     Partition ptn2 =
+         new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn2);
+ 
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     statsDesc.setPartName("col");
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     ColumnStatisticsData data = new ColumnStatisticsData();
+     ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(0);
+     longStats.setHighValue(100);
+     longStats.setNumNulls(50);
+     longStats.setNumDVs(30);
+     data.setLongStats(longStats);
+     colStatObjs.add(colStats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
+ 
+     longStats.setNumDVs(40);
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add(colName);
+     List<String> aggrPartVals = new ArrayList<>();
+     aggrPartVals.add("1");
+     aggrPartVals.add("2");
+     AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40);
+     aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40);
+   }
+ 
+   @Test
+   public void testPartitionAggrStatsBitVector() throws Exception {
+     String dbName = "testTableColStatsOps2";
+     String tblName = "tbl2";
+     String colName = "f1";
+ 
+     Database db = new Database(dbName, null, "some_location", null);
+     db.setCatalogName(DEFAULT_CATALOG_NAME);
+     cachedStore.createDatabase(db);
+ 
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema(colName, "int", null));
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("col", "int", null));
+     StorageDescriptor sd =
+         new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()),
+             null, null, null);
+ 
+     Table tbl =
+         new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(),
+             null, null, TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.createTable(tbl);
+ 
+     List<String> partVals1 = new ArrayList<>();
+     partVals1.add("1");
+     List<String> partVals2 = new ArrayList<>();
+     partVals2.add("2");
+ 
+     Partition ptn1 =
+         new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn1);
+     Partition ptn2 =
+         new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn2);
+ 
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     statsDesc.setPartName("col");
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     ColumnStatisticsData data = new ColumnStatisticsData();
+     ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(0);
+     longStats.setHighValue(100);
+     longStats.setNumNulls(50);
+     longStats.setNumDVs(30);
+ 
+     HyperLogLog hll = HyperLogLog.builder().build();
+     hll.addLong(1);
+     hll.addLong(2);
+     hll.addLong(3);
+     longStats.setBitVectors(hll.serialize());
+ 
+     data.setLongStats(longStats);
+     colStatObjs.add(colStats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
+ 
+     longStats.setNumDVs(40);
+     hll = HyperLogLog.builder().build();
+     hll.addLong(2);
+     hll.addLong(3);
+     hll.addLong(4);
+     hll.addLong(5);
+     longStats.setBitVectors(hll.serialize());
+ 
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add(colName);
+     List<String> aggrPartVals = new ArrayList<>();
+     aggrPartVals.add("1");
+     aggrPartVals.add("2");
+     AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5);
+     aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5);
+   }
+ 
+   @Test
+   public void testMultiThreadedSharedCacheOps() throws Exception {
+     List<String> dbNames = new ArrayList<String>(Arrays.asList("db1", "db2", "db3", "db4", "db5"));
+     List<Callable<Object>> tasks = new ArrayList<Callable<Object>>();
+     ExecutorService executor = Executors.newFixedThreadPool(50, new ThreadFactory() {
+       @Override
+       public Thread newThread(Runnable r) {
+         Thread t = Executors.defaultThreadFactory().newThread(r);
+         t.setDaemon(true);
+         return t;
+       }
+     });
+ 
+     // Create 5 dbs
+     for (String dbName : dbNames) {
+       Callable<Object> c = new Callable<Object>() {
+         public Object call() {
+           Database db = createTestDb(dbName, "user1");
+           sharedCache.addDatabaseToCache(db);
+           return null;
+         }
+       };
+       tasks.add(c);
+     }
+     executor.invokeAll(tasks);
+     for (String dbName : dbNames) {
+       Database db = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName);
+       Assert.assertNotNull(db);
+       Assert.assertEquals(dbName, db.getName());
+     }
+ 
+     // Created 5 tables under "db1"
+     List<String> tblNames =
+         new ArrayList<String>(Arrays.asList("tbl1", "tbl2", "tbl3", "tbl4", "tbl5"));
+     tasks.clear();
+     for (String tblName : tblNames) {
+       FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+       FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+       List<FieldSchema> cols = new ArrayList<FieldSchema>();
+       cols.add(col1);
+       cols.add(col2);
+       FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column");
+       List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+       ptnCols.add(ptnCol1);
+       Callable<Object> c = new Callable<Object>() {
+         public Object call() {
+           Table tbl = createTestTbl(dbNames.get(0), tblName, "user1", cols, ptnCols);
+           sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl);
+           return null;
+         }
+       };
+       tasks.add(c);
+     }
+     executor.invokeAll(tasks);
+     for (String tblName : tblNames) {
+       Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName);
+       Assert.assertNotNull(tbl);
+       Assert.assertEquals(tblName, tbl.getTableName());
+     }
+ 
+     // Add 5 partitions to all tables
+     List<String> ptnVals = new ArrayList<String>(Arrays.asList("aaa", "bbb", "ccc", "ddd", "eee"));
+     tasks.clear();
+     for (String tblName : tblNames) {
+       Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName);
+       for (String ptnVal : ptnVals) {
+         Map<String, String> partParams = new HashMap<String, String>();
+         Callable<Object> c = new Callable<Object>() {
+           public Object call() {
+             Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0,
+                 tbl.getSd(), partParams);
+             sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn);
+             return null;
+           }
+         };
+         tasks.add(c);
+       }
+     }
+     executor.invokeAll(tasks);
+     for (String tblName : tblNames) {
+       for (String ptnVal : ptnVals) {
+         Partition ptn = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal));
+         Assert.assertNotNull(ptn);
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(Arrays.asList(ptnVal), ptn.getValues());
+       }
+     }
+ 
+     // Drop all partitions from "tbl1", "tbl2", "tbl3" and add 2 new partitions to "tbl4" and "tbl5"
+     List<String> newPtnVals = new ArrayList<String>(Arrays.asList("fff", "ggg"));
+     List<String> dropPtnTblNames = new ArrayList<String>(Arrays.asList("tbl1", "tbl2", "tbl3"));
+     List<String> addPtnTblNames = new ArrayList<String>(Arrays.asList("tbl4", "tbl5"));
+     tasks.clear();
+     for (String tblName : dropPtnTblNames) {
+       for (String ptnVal : ptnVals) {
+         Callable<Object> c = new Callable<Object>() {
+           public Object call() {
+             sharedCache.removePartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal));
+             return null;
+           }
+         };
+         tasks.add(c);
+       }
+     }
+     for (String tblName : addPtnTblNames) {
+       Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName);
+       for (String ptnVal : newPtnVals) {
+         Map<String, String> partParams = new HashMap<String, String>();
+         Callable<Object> c = new Callable<Object>() {
+           public Object call() {
+             Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0,
+                 tbl.getSd(), partParams);
+             sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn);
+             return null;
+           }
+         };
+         tasks.add(c);
+       }
+     }
+     executor.invokeAll(tasks);
+     for (String tblName : addPtnTblNames) {
+       for (String ptnVal : newPtnVals) {
+         Partition ptn = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal));
+         Assert.assertNotNull(ptn);
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(Arrays.asList(ptnVal), ptn.getValues());
+       }
+     }
+     for (String tblName : dropPtnTblNames) {
+       List<Partition> ptns = sharedCache.listCachedPartitions(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, 100);
+       Assert.assertEquals(0, ptns.size());
+     }
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   private Database createTestDb(String dbName, String dbOwner) {
+     String dbDescription = dbName;
+     String dbLocation = "file:/tmp";
+     Map<String, String> dbParams = new HashMap<>();
+     Database db = new Database(dbName, dbDescription, dbLocation, dbParams);
+     db.setOwnerName(dbOwner);
+     db.setOwnerType(PrincipalType.USER);
+     db.setCatalogName(DEFAULT_CATALOG_NAME);
+     return db;
+   }
+ 
+   private Table createTestTbl(String dbName, String tblName, String tblOwner,
+       List<FieldSchema> cols, List<FieldSchema> ptnCols) {
+     String serdeLocation = "file:/tmp";
+     Map<String, String> serdeParams = new HashMap<>();
+     Map<String, String> tblParams = new HashMap<>();
+     SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", new HashMap<>());
+     StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0,
+         serdeInfo, null, null, serdeParams);
+     sd.setStoredAsSubDirectories(false);
+     Table tbl = new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, ptnCols, tblParams, null, null,
+         TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     return tbl;
+   }
+ 
+   // This method will return only after the cache has updated once
+   private void updateCache(CachedStore cachedStore) throws InterruptedException {
+     int maxTries = 100000;
+     long updateCountBefore = cachedStore.getCacheUpdateCount();
+     // Start the CachedStore update service
+     CachedStore.startCacheUpdateService(cachedStore.getConf(), true, false);
+     while ((cachedStore.getCacheUpdateCount() != (updateCountBefore + 1)) && (maxTries-- > 0)) {
+       Thread.sleep(1000);
+     }
+     CachedStore.stopCacheUpdateService(100);
+   }
+ }


[21/54] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - more tests (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
index 292cf51..fe631b7 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
@@ -31999,6 +31999,300 @@ class AlterPartitionsResponse {
 
 }
 
+class RenamePartitionRequest {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $catName = null;
+  /**
+   * @var string
+   */
+  public $dbName = null;
+  /**
+   * @var string
+   */
+  public $tableName = null;
+  /**
+   * @var string[]
+   */
+  public $partVals = null;
+  /**
+   * @var \metastore\Partition
+   */
+  public $newPart = null;
+  /**
+   * @var int
+   */
+  public $txnId = -1;
+  /**
+   * @var string
+   */
+  public $validWriteIdList = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'catName',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'dbName',
+          'type' => TType::STRING,
+          ),
+        3 => array(
+          'var' => 'tableName',
+          'type' => TType::STRING,
+          ),
+        4 => array(
+          'var' => 'partVals',
+          'type' => TType::LST,
+          'etype' => TType::STRING,
+          'elem' => array(
+            'type' => TType::STRING,
+            ),
+          ),
+        5 => array(
+          'var' => 'newPart',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\Partition',
+          ),
+        6 => array(
+          'var' => 'txnId',
+          'type' => TType::I64,
+          ),
+        7 => array(
+          'var' => 'validWriteIdList',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['catName'])) {
+        $this->catName = $vals['catName'];
+      }
+      if (isset($vals['dbName'])) {
+        $this->dbName = $vals['dbName'];
+      }
+      if (isset($vals['tableName'])) {
+        $this->tableName = $vals['tableName'];
+      }
+      if (isset($vals['partVals'])) {
+        $this->partVals = $vals['partVals'];
+      }
+      if (isset($vals['newPart'])) {
+        $this->newPart = $vals['newPart'];
+      }
+      if (isset($vals['txnId'])) {
+        $this->txnId = $vals['txnId'];
+      }
+      if (isset($vals['validWriteIdList'])) {
+        $this->validWriteIdList = $vals['validWriteIdList'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'RenamePartitionRequest';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->catName);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->dbName);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->tableName);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 4:
+          if ($ftype == TType::LST) {
+            $this->partVals = array();
+            $_size847 = 0;
+            $_etype850 = 0;
+            $xfer += $input->readListBegin($_etype850, $_size847);
+            for ($_i851 = 0; $_i851 < $_size847; ++$_i851)
+            {
+              $elem852 = null;
+              $xfer += $input->readString($elem852);
+              $this->partVals []= $elem852;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 5:
+          if ($ftype == TType::STRUCT) {
+            $this->newPart = new \metastore\Partition();
+            $xfer += $this->newPart->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 6:
+          if ($ftype == TType::I64) {
+            $xfer += $input->readI64($this->txnId);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 7:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->validWriteIdList);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('RenamePartitionRequest');
+    if ($this->catName !== null) {
+      $xfer += $output->writeFieldBegin('catName', TType::STRING, 1);
+      $xfer += $output->writeString($this->catName);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->dbName !== null) {
+      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2);
+      $xfer += $output->writeString($this->dbName);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->tableName !== null) {
+      $xfer += $output->writeFieldBegin('tableName', TType::STRING, 3);
+      $xfer += $output->writeString($this->tableName);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->partVals !== null) {
+      if (!is_array($this->partVals)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('partVals', TType::LST, 4);
+      {
+        $output->writeListBegin(TType::STRING, count($this->partVals));
+        {
+          foreach ($this->partVals as $iter853)
+          {
+            $xfer += $output->writeString($iter853);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->newPart !== null) {
+      if (!is_object($this->newPart)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('newPart', TType::STRUCT, 5);
+      $xfer += $this->newPart->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->txnId !== null) {
+      $xfer += $output->writeFieldBegin('txnId', TType::I64, 6);
+      $xfer += $output->writeI64($this->txnId);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->validWriteIdList !== null) {
+      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7);
+      $xfer += $output->writeString($this->validWriteIdList);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class RenamePartitionResponse {
+  static $_TSPEC;
+
+
+  public function __construct() {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        );
+    }
+  }
+
+  public function getName() {
+    return 'RenamePartitionResponse';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('RenamePartitionResponse');
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
 class AlterTableRequest {
   static $_TSPEC;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index a21e28a..a595732 100755
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -113,6 +113,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
   print('  AlterPartitionsResponse alter_partitions_req(AlterPartitionsRequest req)')
   print('  void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)')
   print('  void rename_partition(string db_name, string tbl_name,  part_vals, Partition new_part)')
+  print('  RenamePartitionResponse rename_partition_req(RenamePartitionRequest req)')
   print('  bool partition_name_has_valid_characters( part_vals, bool throw_exception)')
   print('  string get_config_value(string name, string defaultValue)')
   print('   partition_name_to_vals(string part_name)')
@@ -840,6 +841,12 @@ elif cmd == 'rename_partition':
     sys.exit(1)
   pp.pprint(client.rename_partition(args[0],args[1],eval(args[2]),eval(args[3]),))
 
+elif cmd == 'rename_partition_req':
+  if len(args) != 1:
+    print('rename_partition_req requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.rename_partition_req(eval(args[0]),))
+
 elif cmd == 'partition_name_has_valid_characters':
   if len(args) != 2:
     print('partition_name_has_valid_characters requires 2 args')


[38/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index 0000000,5ba71c4..878530a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@@ -1,0 -1,720 +1,720 @@@
+ -- Timestamp: 2011-09-22 15:32:02.024
+ -- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+ -- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+ -- Specified schema is: APP
+ -- appendLogs: false
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for functions
+ -- ----------------------------------------------
+ 
+ CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
+ 
+ CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for tables
+ -- ----------------------------------------------
+ CREATE TABLE "APP"."DBS" (
+   "DB_ID" BIGINT NOT NULL,
+   "DESC" VARCHAR(4000),
+   "DB_LOCATION_URI" VARCHAR(4000) NOT NULL,
+   "NAME" VARCHAR(128),
+   "OWNER_NAME" VARCHAR(128),
+   "OWNER_TYPE" VARCHAR(10),
+   "CTLG_NAME" VARCHAR(256) NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+ 
+ CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+ 
+ CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
+ 
+ CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+ 
 -CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
++CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "WRITE_ID" BIGINT DEFAULT 0);
+ 
+ CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
+ 
+ CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT);
+ 
+ CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767));
+ 
+ CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+ 
+ CREATE TABLE "APP"."PARTITION_EVENTS" (
+     "PART_NAME_ID" BIGINT NOT NULL,
+     "CAT_NAME" VARCHAR(256),
+     "DB_NAME" VARCHAR(128),
+     "EVENT_TIME" BIGINT NOT NULL,
+     "EVENT_TYPE" INTEGER NOT NULL,
+     "PARTITION_NAME" VARCHAR(767),
+     "TBL_NAME" VARCHAR(256)
+ );
+ 
+ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
+ 
 -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N');
++CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N',  "WRITE_ID" BIGINT DEFAULT 0);
+ 
+ CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL);
+ 
+ CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."TAB_COL_STATS"(
+     "CAT_NAME" VARCHAR(256) NOT NULL,
+     "DB_NAME" VARCHAR(128) NOT NULL,
+     "TABLE_NAME" VARCHAR(256) NOT NULL,
+     "COLUMN_NAME" VARCHAR(767) NOT NULL,
+     "COLUMN_TYPE" VARCHAR(128) NOT NULL,
+     "LONG_LOW_VALUE" BIGINT,
+     "LONG_HIGH_VALUE" BIGINT,
+     "DOUBLE_LOW_VALUE" DOUBLE,
+     "DOUBLE_HIGH_VALUE" DOUBLE,
+     "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),
+     "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),
+     "NUM_DISTINCTS" BIGINT,
+     "NUM_NULLS" BIGINT NOT NULL,
+     "AVG_COL_LEN" DOUBLE,
+     "MAX_COL_LEN" BIGINT,
+     "NUM_TRUES" BIGINT,
+     "NUM_FALSES" BIGINT,
+     "LAST_ANALYZED" BIGINT,
+     "CS_ID" BIGINT NOT NULL,
+     "TBL_ID" BIGINT NOT NULL,
+     "BIT_VECTOR" BLOB
+ );
+ 
+ CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+ 
+ CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
+ 
+ CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+ 
+ CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000));
+ 
+ CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767));
+ 
+ CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767));
+ 
+ CREATE TABLE "APP"."PART_COL_STATS"(
+     "CAT_NAME" VARCHAR(256) NOT NULL,
+     "DB_NAME" VARCHAR(128) NOT NULL,
+     "TABLE_NAME" VARCHAR(256) NOT NULL,
+     "PARTITION_NAME" VARCHAR(767) NOT NULL,
+     "COLUMN_NAME" VARCHAR(767) NOT NULL,
+     "COLUMN_TYPE" VARCHAR(128) NOT NULL,
+     "LONG_LOW_VALUE" BIGINT,
+     "LONG_HIGH_VALUE" BIGINT,
+     "DOUBLE_LOW_VALUE" DOUBLE,
+     "DOUBLE_HIGH_VALUE" DOUBLE,
+     "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),
+     "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),
+     "NUM_DISTINCTS" BIGINT,
+     "BIT_VECTOR" BLOB,
+     "NUM_NULLS" BIGINT NOT NULL,
+     "AVG_COL_LEN" DOUBLE,
+     "MAX_COL_LEN" BIGINT,
+     "NUM_TRUES" BIGINT,
+     "NUM_FALSES" BIGINT,
+     "LAST_ANALYZED" BIGINT,
+     "CS_ID" BIGINT NOT NULL,
+     "PART_ID" BIGINT NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
+ 
+ CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
+ 
+ CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."NOTIFICATION_LOG" (
+     "NL_ID" BIGINT NOT NULL,
+     "CAT_NAME" VARCHAR(256),
+     "DB_NAME" VARCHAR(128),
+     "EVENT_ID" BIGINT NOT NULL,
+     "EVENT_TIME" INTEGER NOT NULL,
+     "EVENT_TYPE" VARCHAR(32) NOT NULL,
+     "MESSAGE" CLOB,
+     "TBL_NAME" VARCHAR(256),
+     "MESSAGE_FORMAT" VARCHAR(16)
+ );
+ 
+ CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT , "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL,  "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, "DEFAULT_VALUE" VARCHAR(400));
+ 
+ CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000));
+ 
+ CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT);
+ 
+ CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024));
+ 
+ CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024), IS_IN_UNMANAGED INTEGER NOT NULL DEFAULT 0);
+ 
+ CREATE TABLE "APP"."WM_POOL_TO_TRIGGER"  (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(128) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT, ORDERING INTEGER);
+ 
+ CREATE TABLE "APP"."MV_CREATION_METADATA" (
+   "MV_CREATION_METADATA_ID" BIGINT NOT NULL,
+   "CAT_NAME" VARCHAR(256) NOT NULL,
+   "DB_NAME" VARCHAR(128) NOT NULL,
+   "TBL_NAME" VARCHAR(256) NOT NULL,
+   "TXN_LIST" CLOB,
+   "MATERIALIZATION_TIME" BIGINT NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."MV_TABLES_USED" (
+   "MV_CREATION_METADATA_ID" BIGINT NOT NULL,
+   "TBL_ID" BIGINT NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."CTLGS" (
+     "CTLG_ID" BIGINT NOT NULL,
+     "NAME" VARCHAR(256) UNIQUE,
+     "DESC" VARCHAR(4000),
+     "LOCATION_URI" VARCHAR(4000) NOT NULL);
+ 
+ -- ----------------------------------------------
+ -- DML Statements
+ -- ----------------------------------------------
+ 
+ INSERT INTO "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "APP"."NOTIFICATION_SEQUENCE");
+ 
+ INSERT INTO "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_VAL" FROM "APP"."SEQUENCE_TABLE" WHERE "SEQUENCE_NAME" = 'org.apache.hadoop.hive.metastore.model.MNotificationLog');
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for indexes
+ -- ----------------------------------------------
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID");
+ 
+ CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("AUTHORIZER", "TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("AUTHORIZER", "DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+ 
+ CREATE INDEX "APP"."TAB_COL_STATS_IDX" ON "APP"."TAB_COL_STATS" ("CAT_NAME", "DB_NAME", "TABLE_NAME", "COLUMN_NAME");
+ 
+ CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("AUTHORIZER", "PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");
+ 
+ CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("AUTHORIZER", "TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("AUTHORIZER", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME");
+ 
+ CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID");
+ 
+ CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID");
+ 
+ CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID");
+ 
+ CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID");
+ 
+ CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."MV_UNIQUE_TABLE" ON "APP"."MV_CREATION_METADATA" ("TBL_NAME", "DB_NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_CATALOG" ON "APP"."CTLGS" ("NAME");
+ 
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for keys
+ -- ----------------------------------------------
+ 
+ -- primary/unique
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID");
+ 
+ ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+ 
+ ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID");
+ 
+ ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID");
+ 
+ ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+ 
+ ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME");
+ 
+ ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID");
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID");
+ 
+ ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID");
+ 
+ ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+ 
+ ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID");
+ 
+ ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+ 
+ ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID");
+ 
+ ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID");
+ 
+ ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME");
+ 
+ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID");
+ 
+ ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID");
+ 
+ ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID");
+ 
+ ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID");
+ 
+ ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID");
+ 
+ ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+ 
+ ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+ 
+ ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID");
+ 
+ ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID");
+ 
+ ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+ 
+ ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+ 
+ ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+ 
+ ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID");
+ 
+ ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID");
+ 
+ ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID");
+ 
+ ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION");
+ 
+ ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
+ 
+ ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID");
+ 
+ ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID");
+ 
+ 
+ -- foreign
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID");
+ 
+ ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID");
+ 
+ ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID");
+ 
+ ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID");
+ 
+ ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID");
+ 
+ ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "APP"."MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_CTLG_FK" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for checks
+ -- ----------------------------------------------
+ 
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N'));
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N'));
+ 
+ -- ----------------------------
+ -- Transaction and Lock Tables
+ -- ----------------------------
+ CREATE TABLE TXNS (
+   TXN_ID bigint PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED bigint NOT NULL,
+   TXN_LAST_HEARTBEAT bigint NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar(128),
+   TXN_META_INFO varchar(128),
+   TXN_HEARTBEAT_COUNT integer,
+   TXN_TYPE integer
+ );
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID),
+   TC_DATABASE varchar(128) NOT NULL,
+   TC_TABLE varchar(128),
+   TC_PARTITION varchar(767),
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID bigint
+ );
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID bigint NOT NULL,
+   CTC_DATABASE varchar(128) NOT NULL,
+   CTC_TABLE varchar(256),
+   CTC_PARTITION varchar(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID bigint,
+   CTC_UPDATE_DELETE char(1) NOT NULL
+ );
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID bigint NOT NULL,
+   HL_LOCK_INT_ID bigint NOT NULL,
+   HL_TXNID bigint NOT NULL,
+   HL_DB varchar(128) NOT NULL,
+   HL_TABLE varchar(128),
+   HL_PARTITION varchar(767),
+   HL_LOCK_STATE char(1) NOT NULL,
+   HL_LOCK_TYPE char(1) NOT NULL,
+   HL_LAST_HEARTBEAT bigint NOT NULL,
+   HL_ACQUIRED_AT bigint,
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT integer,
+   HL_AGENT_INFO varchar(128),
+   HL_BLOCKEDBY_EXT_ID bigint,
+   HL_BLOCKEDBY_INT_ID bigint,
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+ );
+ 
+ CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID bigint PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START bigint,
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID bigint,
+   CQ_META_INFO varchar(2048) for bit data,
+   CQ_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID bigint PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START bigint,
+   CC_END bigint,
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID bigint,
+   CC_META_INFO varchar(2048) for bit data,
+   CC_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT varchar(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ );
+ 
+ --1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK
+ --This is a good candidate for Index orgainzed table
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar(128) NOT NULL,
+   WS_TABLE varchar(128) NOT NULL,
+   WS_PARTITION varchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE varchar(128) NOT NULL,
+   T2W_TABLE varchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE varchar(128) NOT NULL,
+   NWI_TABLE varchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+   MRL_TXN_ID BIGINT NOT NULL,
+   MRL_DB_NAME VARCHAR(128) NOT NULL,
+   MRL_TBL_NAME VARCHAR(256) NOT NULL,
+   MRL_LAST_HEARTBEAT BIGINT NOT NULL,
+   PRIMARY KEY(MRL_TXN_ID)
+ );
+ 
+ CREATE TABLE "APP"."I_SCHEMA" (
+   "SCHEMA_ID" bigint primary key,
+   "SCHEMA_TYPE" integer not null,
+   "NAME" varchar(256) unique,
+   "DB_ID" bigint references "APP"."DBS" ("DB_ID"),
+   "COMPATIBILITY" integer not null,
+   "VALIDATION_LEVEL" integer not null,
+   "CAN_EVOLVE" char(1) not null,
+   "SCHEMA_GROUP" varchar(256),
+   "DESCRIPTION" varchar(4000)
+ );
+ 
+ CREATE TABLE "APP"."SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" bigint primary key,
+   "SCHEMA_ID" bigint references "APP"."I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" integer not null,
+   "CREATED_AT" bigint not null,
+   "CD_ID" bigint references "APP"."CDS" ("CD_ID"),
+   "STATE" integer not null,
+   "DESCRIPTION" varchar(4000),
+   "SCHEMA_TEXT" clob,
+   "FINGERPRINT" varchar(256),
+   "SCHEMA_VERSION_NAME" varchar(256),
+   "SERDE_ID" bigint references "APP"."SERDES" ("SERDE_ID")
+ );
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_SCHEMA_VERSION" ON "APP"."SCHEMA_VERSION" ("SCHEMA_ID", "VERSION");
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ CREATE TABLE "APP"."RUNTIME_STATS" (
+   "RS_ID" bigint primary key,
+   "CREATE_TIME" integer not null,
+   "WEIGHT" integer not null,
+   "PAYLOAD" BLOB
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID bigint NOT NULL,
+   WNL_TXNID bigint NOT NULL,
+   WNL_WRITEID bigint NOT NULL,
+   WNL_DATABASE varchar(128) NOT NULL,
+   WNL_TABLE varchar(128) NOT NULL,
+   WNL_PARTITION varchar(1024) NOT NULL,
+   WNL_TABLE_OBJ clob NOT NULL,
+   WNL_PARTITION_OBJ clob,
+   WNL_FILES clob,
+   WNL_EVENT_TIME integer NOT NULL,
+   PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION)
+ );
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
index 0000000,a511376..d4fb299
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
@@@ -1,0 -1,6 +1,8 @@@
+ -- Upgrade MetaStore schema from 3.1.0 to 4.0.0
 -
++-- HIVE-19416
++ALTER TABLE "APP"."TBLS" ADD WRITE_ID bigint DEFAULT 0;
++ALTER TABLE "APP"."PARTITIONS" ADD WRITE_ID bigint DEFAULT 0;
+ 
+ -- This needs to be the last thing done.  Insert any changes above this line.
+ UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
index 0000000,bbc8ea2..810d48a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
@@@ -1,0 -1,1284 +1,1284 @@@
+ -- Licensed to the Apache Software Foundation (ASF) under one or more
+ -- contributor license agreements.  See the NOTICE file distributed with
+ -- this work for additional information regarding copyright ownership.
+ -- The ASF licenses this file to You under the Apache License, Version 2.0
+ -- (the "License"); you may not use this file except in compliance with
+ -- the License.  You may obtain a copy of the License at
+ --
+ --     http://www.apache.org/licenses/LICENSE-2.0
+ --
+ -- Unless required by applicable law or agreed to in writing, software
+ -- distributed under the License is distributed on an "AS IS" BASIS,
+ -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ -- See the License for the specific language governing permissions and
+ -- limitations under the License.
+ 
+ ------------------------------------------------------------------
+ -- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15)
+ ------------------------------------------------------------------
+ -- Complete schema required for the following classes:-
+ --     org.apache.hadoop.hive.metastore.model.MColumnDescriptor
+ --     org.apache.hadoop.hive.metastore.model.MDBPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MDatabase
+ --     org.apache.hadoop.hive.metastore.model.MDelegationToken
+ --     org.apache.hadoop.hive.metastore.model.MFieldSchema
+ --     org.apache.hadoop.hive.metastore.model.MFunction
+ --     org.apache.hadoop.hive.metastore.model.MGlobalPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MIndex
+ --     org.apache.hadoop.hive.metastore.model.MMasterKey
+ --     org.apache.hadoop.hive.metastore.model.MOrder
+ --     org.apache.hadoop.hive.metastore.model.MPartition
+ --     org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics
+ --     org.apache.hadoop.hive.metastore.model.MPartitionEvent
+ --     org.apache.hadoop.hive.metastore.model.MPartitionPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MResourceUri
+ --     org.apache.hadoop.hive.metastore.model.MRole
+ --     org.apache.hadoop.hive.metastore.model.MRoleMap
+ --     org.apache.hadoop.hive.metastore.model.MSerDeInfo
+ --     org.apache.hadoop.hive.metastore.model.MStorageDescriptor
+ --     org.apache.hadoop.hive.metastore.model.MStringList
+ --     org.apache.hadoop.hive.metastore.model.MTable
+ --     org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MTableColumnStatistics
+ --     org.apache.hadoop.hive.metastore.model.MTablePrivilege
+ --     org.apache.hadoop.hive.metastore.model.MType
+ --     org.apache.hadoop.hive.metastore.model.MVersionTable
+ --
+ -- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+ CREATE TABLE MASTER_KEYS
+ (
+     KEY_ID int NOT NULL,
+     MASTER_KEY nvarchar(767) NULL
+ );
+ 
+ ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
+ 
+ -- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+ CREATE TABLE IDXS
+ (
+     INDEX_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     DEFERRED_REBUILD bit NOT NULL,
+     INDEX_HANDLER_CLASS nvarchar(4000) NULL,
+     INDEX_NAME nvarchar(128) NULL,
+     INDEX_TBL_ID bigint NULL,
+     LAST_ACCESS_TIME int NOT NULL,
+     ORIG_TBL_ID bigint NULL,
+     SD_ID bigint NULL
+ );
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+ 
+ -- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+ CREATE TABLE PART_COL_STATS
+ (
+     CS_ID bigint NOT NULL,
+     AVG_COL_LEN float NULL,
+     "COLUMN_NAME" nvarchar(767) NOT NULL,
+     COLUMN_TYPE nvarchar(128) NOT NULL,
+     DB_NAME nvarchar(128) NOT NULL,
+     BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+     BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+     DOUBLE_HIGH_VALUE float NULL,
+     DOUBLE_LOW_VALUE float NULL,
+     LAST_ANALYZED bigint NOT NULL,
+     LONG_HIGH_VALUE bigint NULL,
+     LONG_LOW_VALUE bigint NULL,
+     MAX_COL_LEN bigint NULL,
+     NUM_DISTINCTS bigint NULL,
+     BIT_VECTOR varbinary(max) NULL,
+     NUM_FALSES bigint NULL,
+     NUM_NULLS bigint NOT NULL,
+     NUM_TRUES bigint NULL,
+     PART_ID bigint NULL,
+     PARTITION_NAME nvarchar(767) NOT NULL,
+     "TABLE_NAME" nvarchar(256) NOT NULL,
+     "CAT_NAME" nvarchar(256) NOT NULL
+ );
+ 
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
+ 
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+ 
+ -- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ CREATE TABLE PART_PRIVS
+ (
+     PART_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PART_ID bigint NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     PART_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+ 
+ -- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+ CREATE TABLE SKEWED_STRING_LIST
+ (
+     STRING_LIST_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+ 
+ -- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE TABLE ROLES
+ (
+     ROLE_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     OWNER_NAME nvarchar(128) NULL,
+     ROLE_NAME nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+ 
+ -- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+ CREATE TABLE PARTITIONS
+ (
+     PART_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     LAST_ACCESS_TIME int NOT NULL,
+     PART_NAME nvarchar(767) NULL,
+     SD_ID bigint NULL,
 -    TBL_ID bigint NULL
 -);
++    TBL_ID bigint NULL,
++    WRITE_ID bigint NULL);
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+ 
+ -- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+ CREATE TABLE CDS
+ (
+     CD_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+ 
+ -- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable]
+ CREATE TABLE VERSION
+ (
+     VER_ID bigint NOT NULL,
+     SCHEMA_VERSION nvarchar(127) NOT NULL,
+     VERSION_COMMENT nvarchar(255) NOT NULL
+ );
+ 
+ ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+ 
+ -- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE TABLE GLOBAL_PRIVS
+ (
+     USER_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     USER_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+ 
+ -- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ CREATE TABLE PART_COL_PRIVS
+ (
+     PART_COLUMN_GRANT_ID bigint NOT NULL,
+     "COLUMN_NAME" nvarchar(767) NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PART_ID bigint NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     PART_COL_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+ 
+ -- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ CREATE TABLE DB_PRIVS
+ (
+     DB_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     DB_ID bigint NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     DB_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+ 
+ -- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+ CREATE TABLE TAB_COL_STATS
+ (
+     CS_ID bigint NOT NULL,
+     AVG_COL_LEN float NULL,
+     "COLUMN_NAME" nvarchar(767) NOT NULL,
+     COLUMN_TYPE nvarchar(128) NOT NULL,
+     DB_NAME nvarchar(128) NOT NULL,
+     BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+     BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+     DOUBLE_HIGH_VALUE float NULL,
+     DOUBLE_LOW_VALUE float NULL,
+     LAST_ANALYZED bigint NOT NULL,
+     LONG_HIGH_VALUE bigint NULL,
+     LONG_LOW_VALUE bigint NULL,
+     MAX_COL_LEN bigint NULL,
+     NUM_DISTINCTS bigint NULL,
+     BIT_VECTOR varbinary(max) NULL,
+     NUM_FALSES bigint NULL,
+     NUM_NULLS bigint NOT NULL,
+     NUM_TRUES bigint NULL,
+     TBL_ID bigint NULL,
+     "TABLE_NAME" nvarchar(256) NOT NULL,
+     "CAT_NAME" nvarchar(256) NOT NULL
+ );
+ 
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
+ CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME);
+ 
+ -- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE TABLE TYPES
+ (
+     TYPES_ID bigint NOT NULL,
+     TYPE_NAME nvarchar(128) NULL,
+     TYPE1 nvarchar(767) NULL,
+     TYPE2 nvarchar(767) NULL
+ );
+ 
+ ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+ 
+ -- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ CREATE TABLE TBL_PRIVS
+ (
+     TBL_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     TBL_PRIV nvarchar(128) NULL,
+     TBL_ID bigint NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+ 
+ -- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE TABLE DBS
+ (
+     DB_ID bigint NOT NULL,
+     "DESC" nvarchar(4000) NULL,
+     DB_LOCATION_URI nvarchar(4000) NOT NULL,
+     "NAME" nvarchar(128) NULL,
+     OWNER_NAME nvarchar(128) NULL,
+     OWNER_TYPE nvarchar(10) NULL,
+     CTLG_NAME nvarchar(256)
+ );
+ 
+ ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+ 
+ -- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ CREATE TABLE TBL_COL_PRIVS
+ (
+     TBL_COLUMN_GRANT_ID bigint NOT NULL,
+     "COLUMN_NAME" nvarchar(767) NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     TBL_COL_PRIV nvarchar(128) NULL,
+     TBL_ID bigint NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+ 
+ -- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+ CREATE TABLE DELEGATION_TOKENS
+ (
+     TOKEN_IDENT nvarchar(767) NOT NULL,
+     TOKEN nvarchar(767) NULL
+ );
+ 
+ ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
+ 
+ -- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ CREATE TABLE SERDES
+ (
+     SERDE_ID bigint NOT NULL,
+     "NAME" nvarchar(128) NULL,
+     SLIB nvarchar(4000) NULL,
+     "DESCRIPTION" nvarchar(4000),
+     "SERIALIZER_CLASS" nvarchar(4000),
+     "DESERIALIZER_CLASS" nvarchar(4000),
+     "SERDE_TYPE" int
+ );
+ 
+ ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+ 
+ -- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction]
+ CREATE TABLE FUNCS
+ (
+     FUNC_ID bigint NOT NULL,
+     CLASS_NAME nvarchar(4000) NULL,
+     CREATE_TIME int NOT NULL,
+     DB_ID bigint NULL,
+     FUNC_NAME nvarchar(128) NULL,
+     FUNC_TYPE int NOT NULL,
+     OWNER_NAME nvarchar(128) NULL,
+     OWNER_TYPE nvarchar(10) NULL
+ );
+ 
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+ 
+ -- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ CREATE TABLE ROLE_MAP
+ (
+     ROLE_GRANT_ID bigint NOT NULL,
+     ADD_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     ROLE_ID bigint NULL
+ );
+ 
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+ 
+ -- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+ CREATE TABLE TBLS
+ (
+     TBL_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     DB_ID bigint NULL,
+     LAST_ACCESS_TIME int NOT NULL,
+     OWNER nvarchar(767) NULL,
+     OWNER_TYPE nvarchar(10) NULL,
+     RETENTION int NOT NULL,
+     SD_ID bigint NULL,
+     TBL_NAME nvarchar(256) NULL,
+     TBL_TYPE nvarchar(128) NULL,
+     VIEW_EXPANDED_TEXT text NULL,
+     VIEW_ORIGINAL_TEXT text NULL,
 -    IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0
 -);
++    IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0,
++    WRITE_ID bigint NULL);
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+ 
+ -- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata]
+ CREATE TABLE MV_CREATION_METADATA
+ (
+     MV_CREATION_METADATA_ID bigint NOT NULL,
+     CAT_NAME nvarchar(256) NOT NULL,
+     DB_NAME nvarchar(128) NOT NULL,
+     TBL_NAME nvarchar(256) NOT NULL,
+     TXN_LIST text NULL,
+     MATERIALIZATION_TIME bigint NOT NULL
+ );
+ 
+ ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
+ CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME,DB_NAME);
+ 
+ 
+ CREATE TABLE MV_TABLES_USED
+ (
+     MV_CREATION_METADATA_ID bigint NOT NULL,
+     TBL_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE MV_TABLES_USED WITH CHECK ADD FOREIGN KEY(MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID);
+ ALTER TABLE MV_TABLES_USED WITH CHECK ADD FOREIGN KEY(TBL_ID) REFERENCES TBLS (TBL_ID);
+ 
+ -- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ CREATE TABLE SDS
+ (
+     SD_ID bigint NOT NULL,
+     CD_ID bigint NULL,
+     INPUT_FORMAT nvarchar(4000) NULL,
+     IS_COMPRESSED bit NOT NULL,
+     IS_STOREDASSUBDIRECTORIES bit NOT NULL,
+     LOCATION nvarchar(4000) NULL,
+     NUM_BUCKETS int NOT NULL,
+     OUTPUT_FORMAT nvarchar(4000) NULL,
+     SERDE_ID bigint NULL
+ );
+ 
+ ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+ 
+ -- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE TABLE PARTITION_EVENTS
+ (
+     PART_NAME_ID bigint NOT NULL,
+     CAT_NAME nvarchar(256) NULL,
+     DB_NAME nvarchar(128) NULL,
+     EVENT_TIME bigint NOT NULL,
+     EVENT_TYPE int NOT NULL,
+     PARTITION_NAME nvarchar(767) NULL,
+     TBL_NAME nvarchar(256) NULL
+ );
+ 
+ ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+ 
+ -- Table SORT_COLS for join relationship
+ CREATE TABLE SORT_COLS
+ (
+     SD_ID bigint NOT NULL,
+     "COLUMN_NAME" nvarchar(767) NULL,
+     "ORDER" int NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table SKEWED_COL_NAMES for join relationship
+ CREATE TABLE SKEWED_COL_NAMES
+ (
+     SD_ID bigint NOT NULL,
+     SKEWED_COL_NAME nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table SKEWED_COL_VALUE_LOC_MAP for join relationship
+ CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+ (
+     SD_ID bigint NOT NULL,
+     STRING_LIST_ID_KID bigint NOT NULL,
+     LOCATION nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+ 
+ -- Table SKEWED_STRING_LIST_VALUES for join relationship
+ CREATE TABLE SKEWED_STRING_LIST_VALUES
+ (
+     STRING_LIST_ID bigint NOT NULL,
+     STRING_LIST_VALUE nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+ 
+ -- Table PARTITION_KEY_VALS for join relationship
+ CREATE TABLE PARTITION_KEY_VALS
+ (
+     PART_ID bigint NOT NULL,
+     PART_KEY_VAL nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+ 
+ -- Table PARTITION_KEYS for join relationship
+ CREATE TABLE PARTITION_KEYS
+ (
+     TBL_ID bigint NOT NULL,
+     PKEY_COMMENT nvarchar(4000) NULL,
+     PKEY_NAME nvarchar(128) NOT NULL,
+     PKEY_TYPE nvarchar(767) NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+ 
+ -- Table SKEWED_VALUES for join relationship
+ CREATE TABLE SKEWED_VALUES
+ (
+     SD_ID_OID bigint NOT NULL,
+     STRING_LIST_ID_EID bigint NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+ 
+ -- Table SD_PARAMS for join relationship
+ CREATE TABLE SD_PARAMS
+ (
+     SD_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE varchar(max) NULL
+ );
+ 
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+ 
+ -- Table FUNC_RU for join relationship
+ CREATE TABLE FUNC_RU
+ (
+     FUNC_ID bigint NOT NULL,
+     RESOURCE_TYPE int NOT NULL,
+     RESOURCE_URI nvarchar(4000) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX);
+ 
+ -- Table TYPE_FIELDS for join relationship
+ CREATE TABLE TYPE_FIELDS
+ (
+     TYPE_NAME bigint NOT NULL,
+     COMMENT nvarchar(256) NULL,
+     FIELD_NAME nvarchar(128) NOT NULL,
+     FIELD_TYPE nvarchar(767) NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+ 
+ -- Table BUCKETING_COLS for join relationship
+ CREATE TABLE BUCKETING_COLS
+ (
+     SD_ID bigint NOT NULL,
+     BUCKET_COL_NAME nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table DATABASE_PARAMS for join relationship
+ CREATE TABLE DATABASE_PARAMS
+ (
+     DB_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(180) NOT NULL,
+     PARAM_VALUE nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+ 
+ -- Table INDEX_PARAMS for join relationship
+ CREATE TABLE INDEX_PARAMS
+ (
+     INDEX_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+ 
+ -- Table COLUMNS_V2 for join relationship
+ CREATE TABLE COLUMNS_V2
+ (
+     CD_ID bigint NOT NULL,
+     COMMENT nvarchar(256) NULL,
+     "COLUMN_NAME" nvarchar(767) NOT NULL,
+     TYPE_NAME varchar(max) NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+ 
+ -- Table SERDE_PARAMS for join relationship
+ CREATE TABLE SERDE_PARAMS
+ (
+     SERDE_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE varchar(max) NULL
+ );
+ 
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+ 
+ -- Table PARTITION_PARAMS for join relationship
+ CREATE TABLE PARTITION_PARAMS
+ (
+     PART_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+ 
+ -- Table TABLE_PARAMS for join relationship
+ CREATE TABLE TABLE_PARAMS
+ (
+     TBL_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE varchar(max) NULL
+ );
+ 
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+ 
+ CREATE TABLE NOTIFICATION_LOG
+ (
+     NL_ID bigint NOT NULL,
+     EVENT_ID bigint NOT NULL,
+     EVENT_TIME int NOT NULL,
+     EVENT_TYPE nvarchar(32) NOT NULL,
+     CAT_NAME nvarchar(128) NULL,
+     DB_NAME nvarchar(128) NULL,
+     TBL_NAME nvarchar(256) NULL,
+     MESSAGE_FORMAT nvarchar(16),
+     MESSAGE text NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+ 
+ CREATE TABLE NOTIFICATION_SEQUENCE
+ (
+     NNI_ID bigint NOT NULL,
+     NEXT_EVENT_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+ 
+ -- Tables to manage resource plans.
+ 
+ CREATE TABLE WM_RESOURCEPLAN
+ (
+     RP_ID bigint NOT NULL,
+     "NAME" nvarchar(128) NOT NULL,
+     QUERY_PARALLELISM int,
+     STATUS nvarchar(20) NOT NULL,
+     DEFAULT_POOL_ID bigint
+ );
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+ 
+ CREATE TABLE WM_POOL
+ (
+     POOL_ID bigint NOT NULL,
+     RP_ID bigint NOT NULL,
+     PATH nvarchar(1024) NOT NULL,
+     ALLOC_FRACTION float,
+     QUERY_PARALLELISM int,
+     SCHEDULING_POLICY nvarchar(1024)
+ );
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+ 
+ CREATE TABLE WM_TRIGGER
+ (
+     TRIGGER_ID bigint NOT NULL,
+     RP_ID bigint NOT NULL,
+     "NAME" nvarchar(128) NOT NULL,
+     TRIGGER_EXPRESSION nvarchar(1024),
+     ACTION_EXPRESSION nvarchar(1024),
+     IS_IN_UNMANAGED bit NOT NULL DEFAULT 0
+ );
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+ 
+ CREATE TABLE WM_POOL_TO_TRIGGER
+ (
+     POOL_ID bigint NOT NULL,
+     TRIGGER_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+ 
+ CREATE TABLE WM_MAPPING
+ (
+     MAPPING_ID bigint NOT NULL,
+     RP_ID bigint NOT NULL,
+     ENTITY_TYPE nvarchar(128) NOT NULL,
+     ENTITY_NAME nvarchar(128) NOT NULL,
+     POOL_ID bigint,
+     ORDERING int
+ );
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+ 
+ CREATE TABLE CTLGS (
+       CTLG_ID bigint primary key,
+       "NAME" nvarchar(256),
+       "DESC" nvarchar(4000),
+       LOCATION_URI nvarchar(4000) not null
+ );
+ 
+ CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME");
+ 
+ -- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey]
+ 
+ -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+ 
+ CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+ 
+ CREATE INDEX IDXS_N50 ON IDXS (ORIG_TBL_ID);
+ 
+ CREATE INDEX IDXS_N49 ON IDXS (INDEX_TBL_ID);
+ 
+ 
+ -- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+ 
+ 
+ -- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (AUTHORIZER,PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+ 
+ 
+ -- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList]
+ 
+ -- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+ 
+ 
+ -- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+ 
+ CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+ 
+ 
+ -- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+ 
+ -- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable]
+ 
+ -- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (AUTHORIZER,PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+ 
+ CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (AUTHORIZER,DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+ 
+ 
+ -- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);
+ 
+ 
+ -- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME);
+ 
+ 
+ -- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+ 
+ CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (AUTHORIZER,TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME", "CTLG_NAME");
+ 
+ 
+ -- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (AUTHORIZER,TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+ 
+ 
+ -- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+ 
+ -- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ 
+ -- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction]
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID);
+ 
+ CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+ 
+ 
+ -- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ;
+ 
+ CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+ 
+ CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+ 
+ CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+ 
+ 
+ -- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+ 
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+ 
+ CREATE INDEX SDS_N50 ON SDS (CD_ID);
+ 
+ CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+ 
+ 
+ -- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+ 
+ 
+ -- Constraints for table SORT_COLS
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table SKEWED_COL_NAMES
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID);
+ 
+ 
+ -- Constraints for table SKEWED_COL_VALUE_LOC_MAP
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+ 
+ CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID);
+ 
+ CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID);
+ 
+ 
+ -- Constraints for table SKEWED_STRING_LIST_VALUES
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+ 
+ CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID);
+ 
+ 
+ -- Constraints for table PARTITION_KEY_VALS
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+ 
+ 
+ -- Constraints for table PARTITION_KEYS
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+ 
+ 
+ -- Constraints for table SKEWED_VALUES
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+ 
+ CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID);
+ 
+ CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID);
+ 
+ 
+ -- Constraints for table SD_PARAMS
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+ 
+ 
+ -- Constraints for table FUNC_RU
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ;
+ 
+ CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+ 
+ 
+ -- Constraints for table TYPE_FIELDS
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ;
+ 
+ CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+ 
+ 
+ -- Constraints for table BUCKETING_COLS
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table DATABASE_PARAMS
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+ 
+ 
+ -- Constraints for table INDEX_PARAMS
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) ;
+ 
+ CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+ 
+ 
+ -- Constraints for table COLUMNS_V2
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+ 
+ CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+ 
+ 
+ -- Constraints for table SERDE_PARAMS
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+ 
+ CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+ 
+ 
+ -- Constraints for table PARTITION_PARAMS
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+ 
+ 
+ -- Constraints for table TABLE_PARAMS
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+ 
+ -- Constraints for resource plan tables.
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CTLGS ("NAME");
+ -- -----------------------------------------------------------------------------------------------------------------------------------------------
+ -- Transaction and Lock Tables
+ -- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+ -- -----------------------------------------------------------------------------------------------------------------------------------------------
+ CREATE TABLE COMPACTION_QUEUE(
+ 	CQ_ID bigint NOT NULL,
+ 	CQ_DATABASE nvarchar(128) NOT NULL,
+ 	CQ_TABLE nvarchar(128) NOT NULL,
+ 	CQ_PARTITION nvarchar(767) NULL,
+ 	CQ_STATE char(1) NOT NULL,
+ 	CQ_TYPE char(1) NOT NULL,
+ 	CQ_TBLPROPERTIES nvarchar(2048) NULL,
+ 	CQ_WORKER_ID nvarchar(128) NULL,
+ 	CQ_START bigint NULL,
+ 	CQ_RUN_AS nvarchar(128) NULL,
+     CQ_HIGHEST_WRITE_ID bigint NULL,
+     CQ_META_INFO varbinary(2048) NULL,
+ 	CQ_HADOOP_JOB_ID nvarchar(128) NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	CQ_ID ASC
+ )
+ );
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+ 	CC_ID bigint NOT NULL,
+ 	CC_DATABASE nvarchar(128) NOT NULL,
+ 	CC_TABLE nvarchar(128) NOT NULL,
+ 	CC_PARTITION nvarchar(767) NULL,
+ 	CC_STATE char(1) NOT NULL,
+ 	CC_TYPE char(1) NOT NULL,
+ 	CC_TBLPROPERTIES nvarchar(2048) NULL,
+ 	CC_WORKER_ID nvarchar(128) NULL,
+ 	CC_START bigint NULL,
+ 	CC_END bigint NULL,
+ 	CC_RUN_AS nvarchar(128) NULL,
+     CC_HIGHEST_WRITE_ID bigint NULL,
+     CC_META_INFO varbinary(2048) NULL,
+ 	CC_HADOOP_JOB_ID nvarchar(128) NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	CC_ID ASC
+ )
+ );
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS(
+ 	CTC_TXNID bigint NOT NULL,
+ 	CTC_DATABASE nvarchar(128) NOT NULL,
+ 	CTC_TABLE nvarchar(128) NULL,
+ 	CTC_PARTITION nvarchar(767) NULL,
+     CTC_TIMESTAMP datetime2 DEFAULT CURRENT_TIMESTAMP NOT NULL,
+     CTC_WRITEID bigint,
+     CTC_UPDATE_DELETE char(1) NOT NULL
+ );
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE HIVE_LOCKS(
+ 	HL_LOCK_EXT_ID bigint NOT NULL,
+ 	HL_LOCK_INT_ID bigint NOT NULL,
+ 	HL_TXNID bigint NOT NULL,
+ 	HL_DB nvarchar(128) NOT NULL,
+ 	HL_TABLE nvarchar(128) NULL,
+ 	HL_PARTITION nvarchar(767) NULL,
+ 	HL_LOCK_STATE char(1) NOT NULL,
+ 	HL_LOCK_TYPE char(1) NOT NULL,
+ 	HL_LAST_HEARTBEAT bigint NOT NULL,
+ 	HL_ACQUIRED_AT bigint NULL,
+ 	HL_USER nvarchar(128) NOT NULL,
+ 	HL_HOST nvarchar(128) NOT NULL,
+     HL_HEARTBEAT_COUNT int NULL,
+     HL_AGENT_INFO nvarchar(128) NULL,
+     HL_BLOCKEDBY_EXT_ID bigint NULL,
+     HL_BLOCKEDBY_INT_ID bigint NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	HL_LOCK_EXT_ID ASC,
+ 	HL_LOCK_INT_ID ASC
+ )
+ );
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
+ 	NCQ_NEXT bigint NOT NULL
+ );
+ 
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE NEXT_LOCK_ID(
+ 	NL_NEXT bigint NOT NULL
+ );
+ 
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE NEXT_TXN_ID(
+ 	NTXN_NEXT bigint NOT NULL
+ );
+ 
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE TXNS(
+ 	TXN_ID bigint NOT NULL,
+ 	TXN_STATE char(1) NOT NULL,
+ 	TXN_STARTED bigint NOT NULL,
+ 	TXN_LAST_HEARTBEAT bigint NOT NULL,
+ 	TXN_USER nvarchar(128) NOT NULL,
+ 	TXN_HOST nvarchar(128) NOT NULL,
+     TXN_AGENT_INFO nvarchar(128) NULL,
+     TXN_META_INFO nvarchar(128) NULL,
+     TXN_HEARTBEAT_COUNT int NULL,
+     TXN_TYPE int NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	TXN_ID ASC
+ )
+ );
+ 
+ CREATE TABLE TXN_COMPONENTS(
+ 	TC_TXNID bigint NOT NULL,
+ 	TC_DATABASE nvarchar(128) NOT NULL,
+ 	TC_TABLE nvarchar(128) NULL,
+ 	TC_PARTITION nvarchar(767) NULL,
+     TC_OPERATION_TYPE char(1) NOT NULL,
+     TC_WRITEID bigint
+ );
+ 
+ ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 nvarchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT nvarchar(255) NULL,
+   PRIMARY KEY CLUSTERED
+ (
+     MT_KEY1 ASC,
+     MT_KEY2 ASC
+ )
+ );
+ 
+ CREATE TABLE KEY_CONSTRAINTS
+ (
+   CHILD_CD_ID BIGINT,
+   CHILD_INTEGER_IDX INT,
+   CHILD_TBL_ID BIGINT,
+   PARENT_CD_ID BIGINT,
+   PARENT_INTEGER_IDX INT NOT NULL,
+   PARENT_TBL_ID BIGINT NOT NULL,
+   POSITION INT NOT NULL,
+   CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+   CONSTRAINT_TYPE SMALLINT NOT NULL,
+   UPDATE_RULE SMALLINT,
+   DELETE_RULE SMALLINT,
+   ENABLE_VALIDATE_RELY SMALLINT NOT NULL,
+   DEFAULT_VALUE VARCHAR(400)
+ ) ;
+ 
+ ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+ 
+ CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+ 
+ CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE nvarchar(128) NOT NULL,
+   WS_TABLE nvarchar(128) NOT NULL,
+   WS_PARTITION nvarchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE METASTORE_DB_PROPERTIES (
+   PROPERTY_KEY VARCHAR(255) NOT NULL,
+   PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+   DESCRIPTION VARCHAR(1000)
+ );
+ 
+ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE nvarchar(128) NOT NULL,
+   T2W_TABLE nvarchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE nvarchar(128) NOT NULL,
+   NWI_TABLE nvarchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+ PRIMARY KEY CLUSTERED
+ (
+     MHL_TXNID ASC
+ )
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+   MRL_TXN_ID bigint NOT NULL,
+   MRL_DB_NAME nvarchar(128) NOT NULL,
+   MRL_TBL_NAME nvarchar(256) NOT NULL,
+   MRL_LAST_HEARTBEAT bigint NOT NULL,
+ PRIMARY KEY CLUSTERED
+ (
+     MRL_TXN_ID ASC
+ )
+ );
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" bigint primary key,
+   "SCHEMA_TYPE" int not null,
+   "NAME" nvarchar(256) unique,
+   "DB_ID" bigint references "DBS" ("DB_ID"),
+   "COMPATIBILITY" int not null,
+   "VALIDATION_LEVEL" int not null,
+   "CAN_EVOLVE" bit not null,
+   "SCHEMA_GROUP" nvarchar(256),
+   "DESCRIPTION" nvarchar(4000),
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" bigint primary key,
+   "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" int not null,
+   "CREATED_AT" bigint not null,
+   "CD_ID" bigint references "CDS" ("CD_ID"),
+   "STATE" int not null,
+   "DESCRIPTION" nvarchar(4000),
+   "SCHEMA_TEXT" varchar(max),
+   "FINGERPRINT" nvarchar(256),
+   "SCHEMA_VERSION_NAME" nvarchar(256),
+   "SERDE_ID" bigint references "SERDES" ("SERDE_ID"),
+   unique ("SCHEMA_ID", "VERSION")
+ );
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY nvarchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE REPL_TXN_MAP ADD CONSTRAINT REPL_TXN_MAP_PK PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID);
+ 
+ -- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+ -- NOTE: Some versions of SchemaTool do not automatically generate this table.
+ -- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+ CREATE TABLE SEQUENCE_TABLE
+ (
+    SEQUENCE_NAME nvarchar(256) NOT NULL,
+    NEXT_VAL bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX PART_TABLE_PK ON SEQUENCE_TABLE (SEQUENCE_NAME);
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID bigint primary key,
+   CREATE_TIME bigint NOT NULL,
+   WEIGHT bigint NOT NULL,
+   PAYLOAD varbinary(max)
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID bigint NOT NULL,
+   WNL_TXNID bigint NOT NULL,
+   WNL_WRITEID bigint NOT NULL,
+   WNL_DATABASE nvarchar(128) NOT NULL,
+   WNL_TABLE nvarchar(128) NOT NULL,
+   WNL_PARTITION nvarchar(1024) NOT NULL,
+   WNL_TABLE_OBJ text NOT NULL,
+   WNL_PARTITION_OBJ text,
+   WNL_FILES text,
+   WNL_EVENT_TIME int NOT NULL
+ );
+ 
+ ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD CONSTRAINT TXN_WRITE_NOTIFICATION_LOG_PK PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION);
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
index 0000000,27b7026..acc9361
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE;
+ 
++-- HIVE-19416
++ALTER TABLE TBLS ADD WRITE_ID bigint NULL;
++ALTER TABLE PARTITIONS ADD WRITE_ID bigint NULL;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE;
+ 


[49/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 0000000,9661beb..70be8d8
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@@ -1,0 -1,3699 +1,3757 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ 
+ import java.io.IOException;
+ import java.nio.ByteBuffer;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.annotation.NoReconnect;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.CmRecycleRequest;
+ import org.apache.hadoop.hive.metastore.api.CmRecycleResponse;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+ import org.apache.hadoop.hive.metastore.api.CompactionResponse;
+ import org.apache.hadoop.hive.metastore.api.CompactionType;
+ import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+ import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.DataOperationType;
+ import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp;
+ import org.apache.hadoop.hive.metastore.api.FindSchemasByColsRqst;
+ import org.apache.hadoop.hive.metastore.api.FireEventRequest;
+ import org.apache.hadoop.hive.metastore.api.FireEventResponse;
+ import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
+ import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+ import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
+ import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
+ import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
+ import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
+ import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.ISchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+ import org.apache.hadoop.hive.metastore.api.LockRequest;
+ import org.apache.hadoop.hive.metastore.api.LockResponse;
+ import org.apache.hadoop.hive.metastore.api.Materialization;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
+ import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+ import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+ import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+ import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
+ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
+ import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+ import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+ import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.TableMeta;
+ import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
+ import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+ import org.apache.hadoop.hive.metastore.api.TxnOpenException;
+ import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
+ import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMMapping;
+ import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMPool;
+ import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMTrigger;
+ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+ import org.apache.thrift.TException;
+ 
+ /**
+  * Wrapper around hive metastore thrift api
+  */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public interface IMetaStoreClient {
+ 
+   /**
+    * Returns whether current client is compatible with conf argument or not
+    * @return
+    */
+   boolean isCompatibleWith(Configuration conf);
+ 
+   /**
+    * Set added jars path info to MetaStoreClient.
+    * @param addedJars the hive.added.jars.path. It is qualified paths separated by commas.
+    */
+   void setHiveAddedJars(String addedJars);
+ 
+   /**
+    * Returns true if the current client is using an in process metastore (local metastore).
+    *
+    * @return
+    */
+   boolean isLocalMetaStore();
+ 
+   /**
+    *  Tries to reconnect this MetaStoreClient to the MetaStore.
+    */
+   void reconnect() throws MetaException;
+ 
+   /**
+    * close connection to meta store
+    */
+   @NoReconnect
+   void close();
+ 
+   /**
+    * set meta variable which is open to end users
+    */
+   void setMetaConf(String key, String value) throws MetaException, TException;
+ 
+   /**
+    * get current meta variable
+    */
+   String getMetaConf(String key) throws MetaException, TException;
+ 
+   /**
+    * Create a new catalog.
+    * @param catalog catalog object to create.
+    * @throws AlreadyExistsException A catalog of this name already exists.
+    * @throws InvalidObjectException There is something wrong with the passed in catalog object.
+    * @throws MetaException something went wrong, usually either in the database or trying to
+    * create the directory for the catalog.
+    * @throws TException general thrift exception.
+    */
+   void createCatalog(Catalog catalog)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, TException;
+ 
+   /**
+    * Alter an existing catalog.
+    * @param catalogName the name of the catalog to alter.
+    * @param newCatalog the new catalog object.  All relevant details of the catalog should be
+    *                   set, don't rely on the system to figure out what you changed and only copy
+    *                   that in.
+    * @throws NoSuchObjectException no catalog of this name exists
+    * @throws InvalidObjectException an attempt was made to make an unsupported change (such as
+    * catalog name).
+    * @throws MetaException usually indicates a database error
+    * @throws TException general thrift exception
+    */
+   void alterCatalog(String catalogName, Catalog newCatalog)
+       throws NoSuchObjectException, InvalidObjectException, MetaException, TException;
+ 
+   /**
+    * Get a catalog object.
+    * @param catName Name of the catalog to fetch.
+    * @return The catalog.
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws TException general thrift exception.
+    */
+   Catalog getCatalog(String catName) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of all catalogs known to the system.
+    * @return list of catalog names
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws TException general thrift exception.
+    */
+   List<String> getCatalogs() throws MetaException, TException;
+ 
+   /**
+    * Drop a catalog.  Catalogs must be empty to be dropped, there is no cascade for dropping a
+    * catalog.
+    * @param catName name of the catalog to drop
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws InvalidOperationException The catalog is not empty and cannot be dropped.
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws TException general thrift exception.
+    */
+   void dropCatalog(String catName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Get the names of all databases in the default catalog that match the given pattern.
+    * @param databasePattern pattern for the database name to patch
+    * @return List of database names.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getDatabases(String databasePattern) throws MetaException, TException;
+ 
+   /**
+    * Get all databases in a catalog whose names match a pattern.
+    * @param catName  catalog name.  Can be null, in which case the default catalog is assumed.
+    * @param databasePattern pattern for the database name to match
+    * @return list of database names
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getDatabases(String catName, String databasePattern)
+       throws MetaException, TException;
+ 
+   /**
+    * Get the names of all databases in the MetaStore.
+    * @return List of database names in the default catalog.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getAllDatabases() throws MetaException, TException;
+ 
+   /**
+    * Get all databases in a catalog.
+    * @param catName catalog name.  Can be null, in which case the default catalog is assumed.
+    * @return list of all database names
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getAllDatabases(String catName) throws MetaException, TException;
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern.
+    * @param dbName database name.
+    * @param tablePattern pattern for table name to conform to
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException indicated database to search in does not exist.
+    */
+   List<String> getTables(String dbName, String tablePattern)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tablePattern pattern for table name to conform to
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException general thrift error
+    * @throws UnknownDBException indicated database to search in does not exist.
+    */
+   List<String> getTables(String catName, String dbName, String tablePattern)
+       throws MetaException, TException, UnknownDBException;
+ 
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern and table type (MANAGED_TABLE || EXTERNAL_TABLE || VIRTUAL_VIEW)
+    * @param dbName Name of the database to fetch tables in.
+    * @param tablePattern pattern to match for table names.
+    * @param tableType Type of the table in the HMS store. VIRTUAL_VIEW is for views.
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException indicated database does not exist.
+    */
+   List<String> getTables(String dbName, String tablePattern, TableType tableType)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern and table type (MANAGED_TABLE || EXTERNAL_TABLE || VIRTUAL_VIEW)
+    * @param catName catalog name.
+    * @param dbName Name of the database to fetch tables in.
+    * @param tablePattern pattern to match for table names.
+    * @param tableType Type of the table in the HMS store. VIRTUAL_VIEW is for views.
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException indicated database does not exist.
+    */
+   List<String> getTables(String catName, String dbName, String tablePattern, TableType tableType)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get materialized views that have rewriting enabled.  This will use the default catalog.
+    * @param dbName Name of the database to fetch materialized views from.
+    * @return List of materialized view names.
+    * @throws MetaException error fetching from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException no such database
+    */
+   List<String> getMaterializedViewsForRewriting(String dbName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get materialized views that have rewriting enabled.
+    * @param catName catalog name.
+    * @param dbName Name of the database to fetch materialized views from.
+    * @return List of materialized view names.
+    * @throws MetaException error fetching from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException no such database
+    */
+   List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Fetches just table name and comments.  Useful when you need full table name
+    * (catalog.database.table) but don't need extra information like partition columns that
+    * require additional fetches from the database.
+    * @param dbPatterns database pattern to match, or null for all databases
+    * @param tablePatterns table pattern to match.
+    * @param tableTypes list of table types to fetch.
+    * @return list of TableMeta objects with information on matching tables
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Fetches just table name and comments.  Useful when you need full table name
+    * (catalog.database.table) but don't need extra information like partition columns that
+    * require additional fetches from the database.
+    * @param catName catalog to search in.  Search cannot cross catalogs.
+    * @param dbPatterns database pattern to match, or null for all databases
+    * @param tablePatterns table pattern to match.
+    * @param tableTypes list of table types to fetch.
+    * @return list of TableMeta objects with information on matching tables
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<TableMeta> getTableMeta(String catName, String dbPatterns, String tablePatterns,
+                                List<String> tableTypes)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database.
+    * @param dbName database name
+    * @return List of table names.
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<String> getAllTables(String dbName) throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database.
+    * @param catName catalog name
+    * @param dbName database name
+    * @return List of table names.
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<String> getAllTables(String catName, String dbName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get a list of table names that match a filter.
+    * The filter operators are LIKE, &lt;, &lt;=, &gt;, &gt;=, =, &lt;&gt;
+    *
+    * In the filter statement, values interpreted as strings must be enclosed in quotes,
+    * while values interpreted as integers should not be.  Strings and integers are the only
+    * supported value types.
+    *
+    * The currently supported key names in the filter are:
+    * Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
+    *   and supports all filter operators
+    * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
+    *   and supports all filter operators except LIKE
+    * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
+    *   and only supports the filter operators = and &lt;&gt;.
+    *   Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
+    *   For example, to filter on parameter keys called "retention", the key name in the filter
+    *   statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
+    *   Also, = and &lt;&gt; only work for keys that exist in the tables.
+    *   E.g., filtering on tables where key1 &lt;&gt; value will only
+    *   return tables that have a value for the parameter key1.
+    * Some example filter statements include:
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
+    *   Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " = \"test_user\" and (" +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\")"
+    *
+    * @param dbName
+    *          The name of the database from which you will retrieve the table names
+    * @param filter
+    *          The filter string
+    * @param maxTables
+    *          The maximum number of tables returned
+    * @return  A list of table names that match the desired filter
+    * @throws InvalidOperationException invalid filter
+    * @throws UnknownDBException no such database
+    * @throws TException thrift transport error
+    */
+   List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+       throws TException, InvalidOperationException, UnknownDBException;
+ 
+   /**
+    * Get a list of table names that match a filter.
+    * The filter operators are LIKE, &lt;, &lt;=, &gt;, &gt;=, =, &lt;&gt;
+    *
+    * In the filter statement, values interpreted as strings must be enclosed in quotes,
+    * while values interpreted as integers should not be.  Strings and integers are the only
+    * supported value types.
+    *
+    * The currently supported key names in the filter are:
+    * Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
+    *   and supports all filter operators
+    * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
+    *   and supports all filter operators except LIKE
+    * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
+    *   and only supports the filter operators = and &lt;&gt;.
+    *   Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
+    *   For example, to filter on parameter keys called "retention", the key name in the filter
+    *   statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
+    *   Also, = and &lt;&gt; only work for keys that exist in the tables.
+    *   E.g., filtering on tables where key1 &lt;&gt; value will only
+    *   return tables that have a value for the parameter key1.
+    * Some example filter statements include:
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
+    *   Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " = \"test_user\" and (" +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\")"
+    *
+    * @param catName catalog name
+    * @param dbName
+    *          The name of the database from which you will retrieve the table names
+    * @param filter
+    *          The filter string
+    * @param maxTables
+    *          The maximum number of tables returned
+    * @return  A list of table names that match the desired filter
+    * @throws InvalidOperationException invalid filter
+    * @throws UnknownDBException no such database
+    * @throws TException thrift transport error
+    */
+   List<String> listTableNamesByFilter(String catName, String dbName, String filter, int maxTables)
+       throws TException, InvalidOperationException, UnknownDBException;
+ 
+   /**
+    * Drop the table.
+    *
+    * @param dbname
+    *          The database for this table
+    * @param tableName
+    *          The table to drop
+    * @param deleteData
+    *          Should we delete the underlying data
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @throws MetaException
+    *           Could not drop table properly.
+    * @throws NoSuchObjectException
+    *           The table wasn't found.
+    * @throws TException
+    *           A thrift communication error occurred
+    *
+    */
+   void dropTable(String dbname, String tableName, boolean deleteData,
+       boolean ignoreUnknownTab) throws MetaException, TException,
+       NoSuchObjectException;
+ 
+   /**
+    * Drop the table.
+    *
+    * @param dbname
+    *          The database for this table
+    * @param tableName
+    *          The table to drop
+    * @param deleteData
+    *          Should we delete the underlying data
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @param ifPurge
+    *          completely purge the table (skipping trash) while removing data from warehouse
+    * @throws MetaException
+    *           Could not drop table properly.
+    * @throws NoSuchObjectException
+    *           The table wasn't found.
+    * @throws TException
+    *           A thrift communication error occurred
+    */
+   void dropTable(String dbname, String tableName, boolean deleteData,
+       boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException,
+       NoSuchObjectException;
+ 
+   /**
+    * Drop the table.
+    *
+    * @param dbname
+    *          The database for this table
+    * @param tableName
+    *          The table to drop
+    * @throws MetaException
+    *           Could not drop table properly.
+    * @throws NoSuchObjectException
+    *           The table wasn't found.
+    * @throws TException
+    *           A thrift communication error occurred
+    */
+   void dropTable(String dbname, String tableName)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Drop a table.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @param deleteData whether associated data should be deleted.
+    * @param ignoreUnknownTable whether a non-existent table name should be ignored
+    * @param ifPurge whether dropped data should be immediately removed rather than placed in HDFS
+    *               trash.
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    * @throws NoSuchObjectException No table of this name exists, only thrown if
+    * ignoreUnknownTable is false.
+    * @throws TException general thrift error.
+    */
+   void dropTable(String catName, String dbName, String tableName, boolean deleteData,
+                  boolean ignoreUnknownTable, boolean ifPurge)
+     throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Drop a table.  Equivalent to
+    * {@link #dropTable(String, String, String, boolean, boolean, boolean)} with ifPurge set to
+    * false.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @param deleteData whether associated data should be deleted.
+    * @param ignoreUnknownTable whether a non-existent table name should be ignored
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    * @throws NoSuchObjectException No table of this name exists, only thrown if
+    * ignoreUnknownTable is false.
+    * @throws TException general thrift error.
+    */
+   default void dropTable(String catName, String dbName, String tableName, boolean deleteData,
+                          boolean ignoreUnknownTable)
+     throws MetaException, NoSuchObjectException, TException {
+     dropTable(catName, dbName, tableName, deleteData, ignoreUnknownTable, false);
+   }
+ 
+   /**
+    * Drop a table.  Equivalent to
+    * {@link #dropTable(String, String, String, boolean, boolean, boolean)} with deleteData
+    * set and ignoreUnknownTable set to true and ifPurge set to false.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    * @throws NoSuchObjectException No table of this name exists, only thrown if
+    * ignoreUnknownTable is false.
+    * @throws TException general thrift error.
+    */
+   default void dropTable(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException, TException {
+     dropTable(catName, dbName, tableName, true, true, false);
+   }
+ 
+   /**
+    * Truncate the table/partitions in the DEFAULT database.
+    * @param dbName
+    *          The db to which the table to be truncate belongs to
+    * @param tableName
+    *          The table to truncate
+    * @param partNames
+    *          List of partitions to truncate. NULL will truncate the whole table/all partitions
+    * @throws MetaException Failure in the RDBMS or storage
+    * @throws TException Thrift transport exception
+    */
+   void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException;
+ 
++  void truncateTable(String dbName, String tableName, List<String> partNames,
++      long txnId, String validWriteIds, long writeId) throws TException;
++
+   /**
+    * Truncate the table/partitions in the DEFAULT database.
+    * @param catName catalog name
+    * @param dbName
+    *          The db to which the table to be truncate belongs to
+    * @param tableName
+    *          The table to truncate
+    * @param partNames
+    *          List of partitions to truncate. NULL will truncate the whole table/all partitions
+    * @throws MetaException Failure in the RDBMS or storage
+    * @throws TException Thrift transport exception
+    */
+   void truncateTable(String catName, String dbName, String tableName, List<String> partNames)
+       throws MetaException, TException;
+ 
+   /**
+    * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
+    *
+    * @param request Inputs for path of the data files to be recycled to cmroot and
+    *                isPurge flag when set to true files which needs to be recycled are not moved to Trash
+    * @return Response which is currently void
+    */
+   CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException;
+ 
+   /**
+    * Check whether a table exists in the default catalog.
+    * @param databaseName database name
+    * @param tableName table name
+    * @return true if the indicated table exists, false if not
+    * @throws MetaException error fetching form the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException the indicated database does not exist.
+    */
+   boolean tableExists(String databaseName, String tableName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Check whether a table exists.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @return true if the indicated table exists, false if not
+    * @throws MetaException error fetching form the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException the indicated database does not exist.
+    */
+   boolean tableExists(String catName, String dbName, String tableName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get a Database Object in the default catalog
+    * @param databaseName  name of the database to fetch
+    * @return the database
+    * @throws NoSuchObjectException The database does not exist
+    * @throws MetaException Could not fetch the database
+    * @throws TException A thrift communication error occurred
+    */
+   Database getDatabase(String databaseName)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a database.
+    * @param catalogName catalog name.  Can be null, in which case
+    * {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param databaseName database name
+    * @return the database object
+    * @throws NoSuchObjectException No database with this name exists in the specified catalog
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift error
+    */
+   Database getDatabase(String catalogName, String databaseName)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a table object in the default catalog.
+    *
+    * @param dbName
+    *          The database the table is located in.
+    * @param tableName
+    *          Name of the table to fetch.
+    * @return An object representing the table.
+    * @throws MetaException
+    *           Could not fetch the table
+    * @throws TException
+    *           A thrift communication error occurred
+    * @throws NoSuchObjectException
+    *           In case the table wasn't found.
+    */
+   Table getTable(String dbName, String tableName) throws MetaException,
+       TException, NoSuchObjectException;
+ 
++  Table getTable(String dbName, String tableName,
++                 long txnId, String validWriteIdList)
++      throws MetaException, TException, NoSuchObjectException;
++
+   /**
+    * Get a table object.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @return table object.
+    * @throws MetaException Something went wrong, usually in the RDBMS.
+    * @throws TException general thrift error.
+    */
+   Table getTable(String catName, String dbName, String tableName) throws MetaException, TException;
+ 
++  Table getTable(String catName, String dbName, String tableName,
++                        long txnId, String validWriteIdList) throws TException;
+   /**
+    * Get tables as objects (rather than just fetching their names).  This is more expensive and
+    * should only be used if you actually need all the information about the tables.
+    * @param dbName
+    *          The database the tables are located in.
+    * @param tableNames
+    *          The names of the tables to fetch
+    * @return A list of objects representing the tables.
+    *          Only the tables that can be retrieved from the database are returned.  For example,
+    *          if none of the requested tables could be retrieved, an empty list is returned.
+    *          There is no guarantee of ordering of the returned tables.
+    * @throws InvalidOperationException
+    *          The input to this operation is invalid (e.g., the list of tables names is null)
+    * @throws UnknownDBException
+    *          The requested database could not be fetched.
+    * @throws TException
+    *          A thrift communication error occurred
+    * @throws MetaException
+    *          Any other errors
+    */
+   List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException;
+ 
+   /**
+    * Get tables as objects (rather than just fetching their names).  This is more expensive and
+    * should only be used if you actually need all the information about the tables.
+    * @param catName catalog name
+    * @param dbName
+    *          The database the tables are located in.
+    * @param tableNames
+    *          The names of the tables to fetch
+    * @return A list of objects representing the tables.
+    *          Only the tables that can be retrieved from the database are returned.  For example,
+    *          if none of the requested tables could be retrieved, an empty list is returned.
+    *          There is no guarantee of ordering of the returned tables.
+    * @throws InvalidOperationException
+    *          The input to this operation is invalid (e.g., the list of tables names is null)
+    * @throws UnknownDBException
+    *          The requested database could not be fetched.
+    * @throws TException
+    *          A thrift communication error occurred
+    * @throws MetaException
+    *          Any other errors
+    */
+   List<Table> getTableObjectsByName(String catName, String dbName, List<String> tableNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException;
+ 
+   /**
+    * Returns the invalidation information for the materialized views given as input.
+    */
+   Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException;
+ 
+   /**
+    * Updates the creation metadata for the materialized view.
+    */
+   void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, TException;
+ 
+   /**
+    * Updates the creation metadata for the materialized view.
+    */
+   void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, TException;
+ 
+   /**
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition values set.
+    * @param dbName database name
+    * @param tableName table name
+    * @param partVals partition values
+    * @return the partition object
+    * @throws InvalidObjectException no such table
+    * @throws AlreadyExistsException a partition with these values already exists
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String dbName, String tableName, List<String> partVals)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition values set.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partVals partition values
+    * @return the partition object
+    * @throws InvalidObjectException no such table
+    * @throws AlreadyExistsException a partition with these values already exists
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String catName, String dbName, String tableName, List<String> partVals)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition value set.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param name name of the partition, should be in the form partkey=partval.
+    * @return new partition object.
+    * @throws InvalidObjectException No such table.
+    * @throws AlreadyExistsException Partition of this name already exists.
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String dbName, String tableName, String name)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition value set.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param name name of the partition, should be in the form partkey=partval.
+    * @return new partition object.
+    * @throws InvalidObjectException No such table.
+    * @throws AlreadyExistsException Partition of this name already exists.
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String catName, String dbName, String tableName, String name)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to the table.
+    *
+    * @param partition
+    *          The partition to add
+    * @return The partition added
+    * @throws InvalidObjectException
+    *           Could not find table to add to
+    * @throws AlreadyExistsException
+    *           Partition already exists
+    * @throws MetaException
+    *           Could not add partition
+    * @throws TException
+    *           Thrift exception
+    */
+   Partition add_partition(Partition partition)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add partitions to the table.
+    *
+    * @param partitions
+    *          The partitions to add
+    * @throws InvalidObjectException
+    *           Could not find table to add to
+    * @throws AlreadyExistsException
+    *           Partition already exists
+    * @throws MetaException
+    *           Could not add partition
+    * @throws TException
+    *           Thrift exception
+    */
+   int add_partitions(List<Partition> partitions)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partitions using a spec proxy.
+    * @param partitionSpec partition spec proxy
+    * @return number of partitions that were added
+    * @throws InvalidObjectException the partitionSpec is malformed.
+    * @throws AlreadyExistsException one or more of the partitions already exist.
+    * @throws MetaException error accessing the RDBMS or storage.
+    * @throws TException thrift transport error
+    */
+   int add_partitions_pspec(PartitionSpecProxy partitionSpec)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add partitions to the table.
+    *
+    * @param partitions The partitions to add
+    * @param ifNotExists only add partitions if they don't exist
+    * @param needResults Whether the results are needed
+    * @return the partitions that were added, or null if !needResults
+    */
+   List<Partition> add_partitions(
+       List<Partition> partitions, boolean ifNotExists, boolean needResults)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Get a partition.
+    * @param dbName database name
+    * @param tblName table name
+    * @param partVals partition values for this partition, must be in the same order as the
+    *                 partition keys of the table.
+    * @return the partition object
+    * @throws NoSuchObjectException no such partition
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String dbName, String tblName, List<String> partVals)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a partition.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tblName table name
+    * @param partVals partition values for this partition, must be in the same order as the
+    *                 partition keys of the table.
+    * @return the partition object
+    * @throws NoSuchObjectException no such partition
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String catName, String dbName, String tblName, List<String> partVals)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Move a partition from one table to another
+    * @param partitionSpecs key value pairs that describe the partition to be moved.
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @return partition object
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    */
+   Partition exchange_partition(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destdb,
+       String destTableName) throws MetaException, NoSuchObjectException,
+       InvalidObjectException, TException;
+ 
+   /**
+    * Move a partition from one table to another
+    * @param partitionSpecs key value pairs that describe the partition to be moved.
+    * @param sourceCat catalog of the source table
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destCat catalog of the destination table, for now must the same as sourceCat
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @return partition object
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    */
+   Partition exchange_partition(Map<String, String> partitionSpecs, String sourceCat,
+                                String sourceDb, String sourceTable, String destCat, String destdb,
+                                String destTableName) throws MetaException, NoSuchObjectException,
+       InvalidObjectException, TException;
+ 
+   /**
+    * With the one partitionSpecs to exchange, multiple partitions could be exchanged.
+    * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions
+    * belonging to it exchanged. This function returns the list of affected partitions.
+    * @param partitionSpecs key value pairs that describe the partition(s) to be moved.
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    * @return the list of the new partitions
+    */
+   List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destdb,
+       String destTableName) throws MetaException, NoSuchObjectException,
+       InvalidObjectException, TException;
+ 
+   /**
+    * With the one partitionSpecs to exchange, multiple partitions could be exchanged.
+    * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions
+    * belonging to it exchanged. This function returns the list of affected partitions.
+    * @param partitionSpecs key value pairs that describe the partition(s) to be moved.
+    * @param sourceCat catalog of the source table
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destCat catalog of the destination table, for now must the same as sourceCat
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    * @return the list of the new partitions
+    */
+   List<Partition> exchange_partitions(Map<String, String> partitionSpecs, String sourceCat,
+                                       String sourceDb, String sourceTable, String destCat,
+                                       String destdb, String destTableName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, TException;
+ 
+   /**
+    * Get a Partition by name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01'
+    * @return the partition object
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String dbName, String tblName, String name)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a Partition by name.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01'
+    * @return the partition object
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String catName, String dbName, String tblName, String name)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+ 
+   /**
+    * Get a Partition along with authorization information.
+    * @param dbName database name
+    * @param tableName table name
+    * @param pvals partition values, must be in the same order as the tables partition keys
+    * @param userName name of the calling user
+    * @param groupNames groups the call
+    * @return the partition
+    * @throws MetaException error accessing the RDBMS
+    * @throws UnknownTableException no such table
+    * @throws NoSuchObjectException no such partition
+    * @throws TException thrift transport error
+    */
+   Partition getPartitionWithAuthInfo(String dbName, String tableName,
+       List<String> pvals, String userName, List<String> groupNames)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a Partition along with authorization information.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param pvals partition values, must be in the same order as the tables partition keys
+    * @param userName name of the calling user
+    * @param groupNames groups the call
+    * @return the partition
+    * @throws MetaException error accessing the RDBMS
+    * @throws UnknownTableException no such table
+    * @throws NoSuchObjectException no such partition
+    * @throws TException thrift transport error
+    */
+   Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName,
+                                      List<String> pvals, String userName, List<String> groupNames)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a list of partittions for a table.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param max_parts maximum number of parts to return, -1 for all
+    * @return the list of partitions
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitions(String db_name, String tbl_name, short max_parts)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partittions for a table.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param max_parts maximum number of parts to return, -1 for all
+    * @return the list of partitions
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitions(String catName, String db_name, String tbl_name, int max_parts)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partitions from a table, returned in the form of PartitionSpecProxy
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param maxParts maximum number of partitions to return, or -1 for all
+    * @return a PartitionSpecProxy
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts)
+     throws TException;
+ 
+   /**
+    * Get a list of partitions from a table, returned in the form of PartitionSpecProxy
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param maxParts maximum number of partitions to return, or -1 for all
+    * @return a PartitionSpecProxy
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName,
+                                         int maxParts) throws TException;
+ 
+   /**
+    * Get a list of partitions based on a (possibly partial) list of partition values.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partition values, in order of the table partition keys.  These can be
+    *                  partial, or .* to match all values for a particular key.
+    * @param max_parts maximum number of partitions to return, or -1 for all.
+    * @return list of partitions
+    * @throws NoSuchObjectException no such table.
+    * @throws MetaException error accessing the database or processing the partition values.
+    * @throws TException thrift transport error.
+    */
+   List<Partition> listPartitions(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partitions based on a (possibly partial) list of partition values.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partition values, in order of the table partition keys.  These can be
+    *                  partial, or .* to match all values for a particular key.
+    * @param max_parts maximum number of partitions to return, or -1 for all.
+    * @return list of partitions
+    * @throws NoSuchObjectException no such table.
+    * @throws MetaException error accessing the database or processing the partition values.
+    * @throws TException thrift transport error.
+    */
+   List<Partition> listPartitions(String catName, String db_name, String tbl_name,
+                                  List<String> part_vals, int max_parts)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * List Names of partitions in a table.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param max_parts maximum number of parts of fetch, or -1 to fetch them all.
+    * @return list of partition names.
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException Error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> listPartitionNames(String db_name, String tbl_name,
+       short max_parts) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * List Names of partitions in a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param max_parts maximum number of parts of fetch, or -1 to fetch them all.
+    * @return list of partition names.
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException Error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> listPartitionNames(String catName, String db_name, String tbl_name,
+                                   int max_parts) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partition names matching a partial specification of the partition values.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partial list of partition values.  These must be given in the order of the
+    *                  partition keys.  If you wish to accept any value for a particular key you
+    *                  can pass ".*" for that value in this list.
+    * @param max_parts maximum number of partition names to return, or -1 to return all that are
+    *                  found.
+    * @return list of matching partition names.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error.
+    * @throws NoSuchObjectException no such table.
+    */
+   List<String> listPartitionNames(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get a list of partition names matching a partial specification of the partition values.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partial list of partition values.  These must be given in the order of the
+    *                  partition keys.  If you wish to accept any value for a particular key you
+    *                  can pass ".*" for that value in this list.
+    * @param max_parts maximum number of partition names to return, or -1 to return all that are
+    *                  found.
+    * @return list of matching partition names.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error.
+    * @throws NoSuchObjectException no such table.
+    */
+   List<String> listPartitionNames(String catName, String db_name, String tbl_name,
+                                   List<String> part_vals, int max_parts)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get a list of partition values
+    * @param request request
+    * @return reponse
+    * @throws MetaException error accessing RDBMS
+    * @throws TException thrift transport error
+    * @throws NoSuchObjectException no such table
+    */
+   PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get number of partitions matching specified filter
+    * @param dbName the database name
+    * @param tableName the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @return number of partitions
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException no such table
+    * @throws TException thrift transport error
+    */
+   int getNumPartitionsByFilter(String dbName, String tableName,
+                                String filter) throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get number of partitions matching specified filter
+    * @param catName catalog name
+    * @param dbName the database name
+    * @param tableName the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @return number of partitions
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException no such table
+    * @throws TException thrift transport error
+    */
+   int getNumPartitionsByFilter(String catName, String dbName, String tableName,
+                                String filter) throws MetaException, NoSuchObjectException, TException;
+ 
+ 
+   /**
+    * Get list of partitions matching specified filter
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @return list of partitions
+    * @throws MetaException Error accessing the RDBMS or processing the filter.
+    * @throws NoSuchObjectException No such table.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+       String filter, short max_parts) throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get list of partitions matching specified filter
+    * @param catName catalog name.
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @return list of partitions
+    * @throws MetaException Error accessing the RDBMS or processing the filter.
+    * @throws NoSuchObjectException No such table.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsByFilter(String catName, String db_name, String tbl_name,
+                                          String filter, int max_parts)
+       throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to
+    * fetch.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param filter SQL where clause filter
+    * @param max_parts maximum number of partitions to fetch, or -1 for all
+    * @return PartitionSpec
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException No table matches the request
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                 String filter, int max_parts)
+       throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to
+    * fetch.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param filter SQL where clause filter
+    * @param max_parts maximum number of partitions to fetch, or -1 for all
+    * @return PartitionSpec
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException No table matches the request
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, String tbl_name,
+                                                 String filter, int max_parts)
+       throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get list of partitions matching specified serialized expression
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param expr expression, serialized from ExprNodeDesc
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @param default_partition_name Default partition name from configuration. If blank, the
+    *    metastore server-side configuration is used.
+    * @param result the resulting list of partitions
+    * @return whether the resulting list contains partitions which may or may not match the expr
+    * @throws TException thrift transport error or error executing the filter.
+    */
+   boolean listPartitionsByExpr(String db_name, String tbl_name,
+       byte[] expr, String default_partition_name, short max_parts, List<Partition> result)
+           throws TException;
+ 
+   /**
+    * Get list of partitions matching specified serialized expression
+    * @param catName catalog name
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param expr expression, serialized from ExprNodeDesc
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @param default_partition_name Default partition name from configuration. If blank, the
+    *    metastore server-side configuration is used.
+    * @param result the resulting list of partitions
+    * @return whether the resulting list contains partitions which may or may not match the expr
+    * @throws TException thrift transport error or error executing the filter.
+    */
+   boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr,
+                                String default_partition_name, int max_parts, List<Partition> result)
+       throws TException;
+ 
+   /**
+    * List partitions, fetching the authorization information along with the partitions.
+    * @param dbName database name
+    * @param tableName table name
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privileges for
+    * @param groupNames groups to fetch privileges for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String dbName,
+       String tableName, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * List partitions, fetching the authorization information along with the partitions.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privileges for
+    * @param groupNames groups to fetch privileges for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                              int maxParts, String userName, List<String> groupNames)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get partitions by a list of partition names.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_names list of partition names
+    * @return list of Partition objects
+    * @throws NoSuchObjectException No such partitions
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> getPartitionsByNames(String db_name, String tbl_name,
+       List<String> part_names) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get partitions by a list of partition names.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_names list of partition names
+    * @return list of Partition objects
+    * @throws NoSuchObjectException No such partitions
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> getPartitionsByNames(String catName, String db_name, String tbl_name,
+                                        List<String> part_names)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * List partitions along with privilege information for a user or groups
+    * @param dbName database name
+    * @param tableName table name
+    * @param partialPvals partition values, can be partial
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privilege information for
+    * @param groupNames group to fetch privilege information for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String dbName,
+       String tableName, List<String> partialPvals, short maxParts, String userName,
+       List<String> groupNames) throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * List partitions along with privilege information for a user or groups
+    * @param dbName database name
+    * @param tableName table name
+    * @param partialPvals partition values, can be partial
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privilege information for
+    * @param groupNames group to fetch privilege information for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                              List<String> partialPvals, int maxParts, String userName,
+                                              List<String> groupNames)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Mark an event as having occurred on a partition.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param partKVs key value pairs that describe the partition
+    * @param eventType type of the event
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   void markPartitionForEvent(String db_name, String tbl_name, Map<String,String> partKVs,
+       PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * Mark an event as having occurred on a partition.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param partKVs key value pairs that describe the partition
+    * @param eventType type of the event
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   void markPartitionForEvent(String catName, String db_name, String tbl_name, Map<String,String> partKVs,
+                              PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * Determine whether a partition has been marked with a particular event type.
+    * @param db_name database name
+    * @param tbl_name table name.
+    * @param partKVs key value pairs that describe the partition.
+    * @param eventType event type
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map<String,String> partKVs,
+       PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * Determine whether a partition has been marked with a particular event type.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name.
+    * @param partKVs key value pairs that describe the partition.
+    * @param eventType event type
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_name, Map<String,String> partKVs,
+                                     PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * @param partVals
+    * @throws TException
+    * @throws MetaException
+    */
+   void validatePartitionNameCharacters(List<String> partVals) throws TException, MetaException;
+ 
+   /**
+    * @param tbl
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
+    */
+ 
+   void createTable(Table tbl) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Alter a table
+    * @param databaseName database name
+    * @param tblName table name
+    * @param table new table object, should be complete representation of the table, not just the
+    *             things you want to change.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
+   void alter_table(String databaseName, String tblName, Table table)
+       throws InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Alter a table. Equivalent to
+    * {@link #alter_table(String, String, String, Table, EnvironmentContext)} with
+    * EnvironmentContext set to null.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param newTable new table object, should be complete representation of the table, not just the
+    *                 things you want to change.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
+   default void alter_table(String catName, String dbName, String tblName, Table newTable)
+       throws InvalidOperationException, MetaException, TException {
+     alter_table(catName, dbName, tblName, newTable, null);
+   }
+ 
+   /**
+    * Alter a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param newTable new table object, should be complete representation of the table, not just the
+    *                 things you want to change.
+    * @param envContext options for the alter.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
+   void alter_table(String catName, String dbName, String tblName, Table newTable,
+                   EnvironmentContext envContext)
+       throws InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * @deprecated Use alter_table_with_environmentContext instead of alter_table with cascade option
+    * passed in EnvironmentContext using {@code StatsSetupConst.CASCADE}
+    */
+   @Deprecated
+   void alter_table(String defaultDatabaseName, String tblName, Table table,
+       boolean cascade) throws InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Alter a table.
+    * @param databaseName database name
+    * @param tblName table name
+    * @param table new table object, should be complete representation of the table, not just the
+    *              things you want to change.
+    * @param environmentContext options for the alter.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
++  @Deprecated
+   void alter_table_with_environmentContext(String databaseName, String tblName, Table table,
+       EnvironmentContext environmentContext) throws InvalidOperationException, MetaException,
+       TException;
+ 
++  void alter_table(String catName, String databaseName, String tblName, Table table,
++      EnvironmentContext environmentContext, long txnId, String validWriteIdList)
++          throws InvalidOperationException, MetaException, TException;
+   /**
+    * Create a new database.
+    * @param db database object.  If the catalog name is null it will be assumed to be
+    *           {@link Warehouse#DEFAULT_CATALOG_NAME}.
+    * @throws InvalidObjectException There is something wrong with the database object.
+    * @throws AlreadyExistsException There is already a database of this name in the specified
+    * catalog.
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift error
+    */
+   void createDatabase(Database db)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Drop a database.
+    * @param name name of the database to drop.
+    * @throws NoSuchObjectException No such database exists.
+    * @throws InvalidOperationException The database cannot be dropped because it is not empty.
+    * @throws MetaException something went wrong, usually either in the RDMBS or in storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String name)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    *
+    * Drop a database.
+    * @param name name of the database to drop.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database cannot be dropped because it is not empty.
+    * @throws MetaException something went wrong, usually either in the RDMBS or in storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    *
+    * Drop a database.
+    * @param name database name.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @param cascade whether to drop contained tables, etc.  If this is false and there are
+    *                objects still in the database the drop will fail.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Drop a database.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @param cascade whether to drop contained tables, etc.  If this is false and there are
+    *                objects still in the database the drop will fail.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String catName, String dbName, boolean deleteData, boolean ignoreUnknownDb,
+                     boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Drop a database.  Equivalent to
+    * {@link #dropDatabase(String, String, boolean, boolean, boolean)} with cascade = false.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   default void dropDatabase(String catName, String dbName, boolean deleteData,
+                             boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(catName, dbName, deleteData, ignoreUnknownDb, false);
+   }
+ 
+   /**
+    * Drop a database.  Equivalent to
+    * {@link #dropDatabase(String, String, boolean, boolean, boolean)} with deleteData =
+    * true, ignoreUnknownDb = false, cascade = false.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   default void dropDatabase(String catName, String dbName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(catName, dbName, true, false, false);
+   }
+ 
+ 
+   /**
+    * Alter a database.
+    * @param name database name.
+    * @param db new database object.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog.
+    * @throws MetaException something went wrong, usually in the RDBMS.
+    * @throws TException general thrift error.
+    */
+   void alterDatabase(String name, Database db)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Alter a database.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @param newDb new database object.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog.
+    * @throws MetaException something went wrong, usually in the RDBMS.
+    * @throws TException general thrift error.
+    */
+   void alterDatabase(String catName, String dbName, Database newDb)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Drop a partition.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_vals partition values, in the same order as the partition keys
+    * @param deleteData
+    *          delete the underlying data or just delete the partition in metadata
+    * @return true or false
+    * @throws NoSuchObjectException partition does not exist
+    * @throws MetaException error accessing the RDBMS or the storage.
+    * @throws TException thrift transport error
+    */
+   boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, boolean deleteData) throws NoSuchObjectException,
+       MetaException, TException;
+ 
+   /**
+    * Drop a partition.
+    * @param catName catalog name.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_vals partition values, in the same order as the partition keys
+    * @param deleteData
+    *          delete the underlying data or just delete the partition in metadata
+    * @return true or false
+    * @throws NoSuchObjectException partition does not exist
+    * @throws MetaException error accessing the RDBMS or the storage.
+    * @throws TException thrift transport error
+    */
+   boolean dropPartition(String catName, String db_name, String tbl_name,
+               

<TRUNCATED>

[25/54] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - more tests (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-19532 : fix tests for master-txnstats branch - more tests (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f0a2fffa
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f0a2fffa
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f0a2fffa

Branch: refs/heads/master-txnstats
Commit: f0a2fffa9e7044002c6b75219fd3da50b55c3e68
Parents: 174c674
Author: sergey <se...@apache.org>
Authored: Thu Jul 19 14:36:50 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Thu Jul 19 14:36:50 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |    4 +
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   16 +-
 .../results/clientpositive/acid_stats4.q.out    |   39 +-
 .../alter_partition_update_status.q.out         |   12 +
 .../alter_table_update_status.q.out             |   76 +
 ..._table_update_status_disable_bitvector.q.out |   76 +
 .../clientpositive/compute_stats_date.q.out     |    4 +
 .../metastore/api/RenamePartitionRequest.java   | 1067 ++++++
 .../metastore/api/RenamePartitionResponse.java  |  283 ++
 .../hive/metastore/api/ThriftHiveMetastore.java | 3559 ++++++++++++------
 .../gen-php/metastore/ThriftHiveMetastore.php   | 2112 ++++++-----
 .../src/gen/thrift/gen-php/metastore/Types.php  |  294 ++
 .../hive_metastore/ThriftHiveMetastore-remote   |    7 +
 .../hive_metastore/ThriftHiveMetastore.py       | 1151 +++---
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  206 +
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   47 +
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   66 +
 .../hadoop/hive/metastore/HiveAlterHandler.java |    3 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   23 +-
 .../hive/metastore/HiveMetaStoreClient.java     |   12 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |    3 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |   13 +
 .../src/main/thrift/hive_metastore.thrift       |   16 +
 .../HiveMetaStoreClientPreCatalog.java          |    4 +-
 .../metastore/client/TestAlterPartitions.java   |    2 +-
 25 files changed, 6425 insertions(+), 2670 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index f356682..9b66bcf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -1660,11 +1660,15 @@ public class AcidUtils {
   }
 
 
+  /** Note: this is generally called in Hive.java; so, the callers of Hive.java make sure
+   *        to set up the acid state during compile, and Hive.java retrieves it if needed. */
   public static TableSnapshot getTableSnapshot(
       Configuration conf, Table tbl, boolean isStatsUpdater) throws LockException {
     return getTableSnapshot(conf, tbl, tbl.getDbName(), tbl.getTableName(), isStatsUpdater);
   }
 
+  /** Note: this is generally called in Hive.java; so, the callers of Hive.java make sure
+   *        to set up the acid state during compile, and Hive.java retrieves it if needed. */
   public static TableSnapshot getTableSnapshot(Configuration conf,
       Table tbl, String dbName, String tblName, boolean isStatsUpdater)
       throws LockException, AssertionError {

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index f3ab6a1..a9accbf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -828,8 +828,20 @@ public class Hive {
           pvals.add(val);
         }
       }
-      getMSC().renamePartition(tbl.getDbName(), tbl.getTableName(), pvals,
-          newPart.getTPartition());
+      String validWriteIds = null;
+      long txnId = -1;
+      if (AcidUtils.isTransactionalTable(tbl)) {
+        // Set table snapshot to api.Table to make it persistent.
+        TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true);
+        if (tableSnapshot != null) {
+          newPart.getTPartition().setWriteId(tableSnapshot.getWriteId());
+          txnId = tableSnapshot.getTxnId();
+          validWriteIds = tableSnapshot.getValidWriteIdList();
+        }
+      }
+
+      getMSC().renamePartition(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), pvals,
+          newPart.getTPartition(), txnId, validWriteIds);
 
     } catch (InvalidOperationException e){
       throw new HiveException("Unable to rename partition. " + e.getMessage(), e);

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/ql/src/test/results/clientpositive/acid_stats4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_stats4.q.out b/ql/src/test/results/clientpositive/acid_stats4.q.out
index bfb8898..afd5adb 100644
--- a/ql/src/test/results/clientpositive/acid_stats4.q.out
+++ b/ql/src/test/results/clientpositive/acid_stats4.q.out
@@ -300,47 +300,12 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain select count(key) from stats_part
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: stats_part
-            Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: int)
-              outputColumnNames: key
-              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                aggregations: count(key)
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: -1
+      limit: 1
       Processor Tree:
         ListSink
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/ql/src/test/results/clientpositive/alter_partition_update_status.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_update_status.q.out b/ql/src/test/results/clientpositive/alter_partition_update_status.q.out
index 09dda34..3ad935a 100644
--- a/ql/src/test/results/clientpositive/alter_partition_update_status.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_update_status.q.out
@@ -52,8 +52,12 @@ bitVector           	HL
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat_part_one PARTITION(partitionId=1) UPDATE STATISTICS for column key SET ('numDVs'='11','avgColLen'='2.2')
 PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS
+PREHOOK: Input: default@src_stat_part_one
+PREHOOK: Output: default@src_stat_part_one@partitionid=1
 POSTHOOK: query: ALTER TABLE src_stat_part_one PARTITION(partitionId=1) UPDATE STATISTICS for column key SET ('numDVs'='11','avgColLen'='2.2')
 POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS
+POSTHOOK: Input: default@src_stat_part_one
+POSTHOOK: Output: default@src_stat_part_one@partitionid=1
 PREHOOK: query: describe formatted src_stat_part_one PARTITION(partitionId=1) key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat_part_one
@@ -126,8 +130,12 @@ bitVector           	HL
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat_part_two PARTITION(px=1, py='a') UPDATE STATISTICS for column key SET ('numDVs'='30','maxColLen'='40')
 PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS
+PREHOOK: Input: default@src_stat_part_two
+PREHOOK: Output: default@src_stat_part_two@px=1/py=a
 POSTHOOK: query: ALTER TABLE src_stat_part_two PARTITION(px=1, py='a') UPDATE STATISTICS for column key SET ('numDVs'='30','maxColLen'='40')
 POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS
+POSTHOOK: Input: default@src_stat_part_two
+POSTHOOK: Output: default@src_stat_part_two@px=1/py=a
 PREHOOK: query: describe formatted src_stat_part_two PARTITION(px=1, py='a') key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat_part_two
@@ -160,8 +168,12 @@ POSTHOOK: type: SWITCHDATABASE
 POSTHOOK: Input: database:dummydb
 PREHOOK: query: ALTER TABLE default.src_stat_part_two PARTITION(px=1, py='a') UPDATE STATISTICS for column key SET ('numDVs'='40','maxColLen'='50')
 PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS
+PREHOOK: Input: default@src_stat_part_two
+PREHOOK: Output: default@src_stat_part_two@px=1/py=a
 POSTHOOK: query: ALTER TABLE default.src_stat_part_two PARTITION(px=1, py='a') UPDATE STATISTICS for column key SET ('numDVs'='40','maxColLen'='50')
 POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS
+POSTHOOK: Input: default@src_stat_part_two
+POSTHOOK: Output: default@src_stat_part_two@px=1/py=a
 PREHOOK: query: describe formatted default.src_stat_part_two PARTITION(px=1, py='a') key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat_part_two

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/ql/src/test/results/clientpositive/alter_table_update_status.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_update_status.q.out b/ql/src/test/results/clientpositive/alter_table_update_status.q.out
index ce3ff4c..b855379 100644
--- a/ql/src/test/results/clientpositive/alter_table_update_status.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_update_status.q.out
@@ -63,8 +63,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@src_stat_n0
+PREHOOK: Output: default@src_stat_n0
 POSTHOOK: query: ALTER TABLE src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@src_stat_n0
+POSTHOOK: Output: default@src_stat_n0
 PREHOOK: query: describe formatted src_stat_n0 key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat_n0
@@ -86,8 +90,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@src_stat_n0
+PREHOOK: Output: default@src_stat_n0
 POSTHOOK: query: ALTER TABLE src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@src_stat_n0
+POSTHOOK: Output: default@src_stat_n0
 PREHOOK: query: describe formatted src_stat_n0 value
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat_n0
@@ -138,8 +146,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat_int_n0 UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@src_stat_int_n0
+PREHOOK: Output: default@src_stat_int_n0
 POSTHOOK: query: ALTER TABLE src_stat_int_n0 UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@src_stat_int_n0
+POSTHOOK: Output: default@src_stat_int_n0
 PREHOOK: query: describe formatted src_stat_int_n0 key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat_int_n0
@@ -173,8 +185,12 @@ POSTHOOK: type: SWITCHDATABASE
 POSTHOOK: Input: database:dummydb
 PREHOOK: query: ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@src_stat_n0
+PREHOOK: Output: default@src_stat_n0
 POSTHOOK: query: ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@src_stat_n0
+POSTHOOK: Output: default@src_stat_n0
 PREHOOK: query: describe formatted default.src_stat_n0 key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat_n0
@@ -196,8 +212,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@src_stat_n0
+PREHOOK: Output: default@src_stat_n0
 POSTHOOK: query: ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@src_stat_n0
+POSTHOOK: Output: default@src_stat_n0
 PREHOOK: query: describe formatted default.src_stat_n0 value
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat_n0
@@ -579,8 +599,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 t
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -621,8 +645,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 s
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -663,8 +691,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 i
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -705,8 +737,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 b
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -747,8 +783,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 f
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -789,8 +829,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 d
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -831,8 +875,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 dem
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -873,8 +921,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 ts
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -915,8 +967,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\",\"ts\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 dt
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -957,8 +1013,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\",\"ts\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 str
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -999,8 +1059,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 v
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -1041,8 +1105,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 c
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -1083,8 +1151,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"c\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 bl
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0
@@ -1125,8 +1197,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"bl\":\"true\",\"c\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats_n0
+PREHOOK: Output: default@datatype_stats_n0
 POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats_n0
+POSTHOOK: Output: default@datatype_stats_n0
 PREHOOK: query: DESC FORMATTED datatype_stats_n0 bin
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats_n0

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out b/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
index 726ec1b..fd4bb7b 100644
--- a/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
@@ -63,8 +63,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@src_stat
+PREHOOK: Output: default@src_stat
 POSTHOOK: query: ALTER TABLE src_stat UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@src_stat
+POSTHOOK: Output: default@src_stat
 PREHOOK: query: describe formatted src_stat key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat
@@ -86,8 +90,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@src_stat
+PREHOOK: Output: default@src_stat
 POSTHOOK: query: ALTER TABLE src_stat UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@src_stat
+POSTHOOK: Output: default@src_stat
 PREHOOK: query: describe formatted src_stat value
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat
@@ -138,8 +146,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat_int UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@src_stat_int
+PREHOOK: Output: default@src_stat_int
 POSTHOOK: query: ALTER TABLE src_stat_int UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@src_stat_int
+POSTHOOK: Output: default@src_stat_int
 PREHOOK: query: describe formatted src_stat_int key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat_int
@@ -173,8 +185,12 @@ POSTHOOK: type: SWITCHDATABASE
 POSTHOOK: Input: database:dummydb
 PREHOOK: query: ALTER TABLE default.src_stat UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@src_stat
+PREHOOK: Output: default@src_stat
 POSTHOOK: query: ALTER TABLE default.src_stat UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@src_stat
+POSTHOOK: Output: default@src_stat
 PREHOOK: query: describe formatted default.src_stat key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat
@@ -196,8 +212,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.src_stat UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@src_stat
+PREHOOK: Output: default@src_stat
 POSTHOOK: query: ALTER TABLE default.src_stat UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@src_stat
+POSTHOOK: Output: default@src_stat
 PREHOOK: query: describe formatted default.src_stat value
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_stat
@@ -579,8 +599,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats t
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -621,8 +645,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats s
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -663,8 +691,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats i
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -705,8 +737,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats b
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -747,8 +783,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats f
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -789,8 +829,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats d
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -831,8 +875,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats dem
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -873,8 +921,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats ts
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -915,8 +967,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\",\"ts\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats dt
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -957,8 +1013,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\",\"ts\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats str
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -999,8 +1059,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats v
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -1041,8 +1105,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats c
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -1083,8 +1151,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"c\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats bl
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats
@@ -1125,8 +1197,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"bl\":\"true\",\"c\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@datatype_stats
+PREHOOK: Output: default@datatype_stats
 POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@datatype_stats
+POSTHOOK: Output: default@datatype_stats
 PREHOOK: query: DESC FORMATTED datatype_stats bin
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@datatype_stats

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/ql/src/test/results/clientpositive/compute_stats_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/compute_stats_date.q.out b/ql/src/test/results/clientpositive/compute_stats_date.q.out
index 58d2d70..6715e05 100644
--- a/ql/src/test/results/clientpositive/compute_stats_date.q.out
+++ b/ql/src/test/results/clientpositive/compute_stats_date.q.out
@@ -127,8 +127,12 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"fl_date\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@tab_date
+PREHOOK: Output: default@tab_date
 POSTHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0')
 POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@tab_date
+POSTHOOK: Output: default@tab_date
 PREHOOK: query: describe formatted tab_date fl_date
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@tab_date

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java
new file mode 100644
index 0000000..e4ffe11
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java
@@ -0,0 +1,1067 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class RenamePartitionRequest implements org.apache.thrift.TBase<RenamePartitionRequest, RenamePartitionRequest._Fields>, java.io.Serializable, Cloneable, Comparable<RenamePartitionRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RenamePartitionRequest");
+
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("partVals", org.apache.thrift.protocol.TType.LIST, (short)4);
+  private static final org.apache.thrift.protocol.TField NEW_PART_FIELD_DESC = new org.apache.thrift.protocol.TField("newPart", org.apache.thrift.protocol.TType.STRUCT, (short)5);
+  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6);
+  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new RenamePartitionRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new RenamePartitionRequestTupleSchemeFactory());
+  }
+
+  private String catName; // optional
+  private String dbName; // required
+  private String tableName; // required
+  private List<String> partVals; // required
+  private Partition newPart; // required
+  private long txnId; // optional
+  private String validWriteIdList; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CAT_NAME((short)1, "catName"),
+    DB_NAME((short)2, "dbName"),
+    TABLE_NAME((short)3, "tableName"),
+    PART_VALS((short)4, "partVals"),
+    NEW_PART((short)5, "newPart"),
+    TXN_ID((short)6, "txnId"),
+    VALID_WRITE_ID_LIST((short)7, "validWriteIdList");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CAT_NAME
+          return CAT_NAME;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // TABLE_NAME
+          return TABLE_NAME;
+        case 4: // PART_VALS
+          return PART_VALS;
+        case 5: // NEW_PART
+          return NEW_PART;
+        case 6: // TXN_ID
+          return TXN_ID;
+        case 7: // VALID_WRITE_ID_LIST
+          return VALID_WRITE_ID_LIST;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TXNID_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("partVals", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.NEW_PART, new org.apache.thrift.meta_data.FieldMetaData("newPart", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)));
+    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(RenamePartitionRequest.class, metaDataMap);
+  }
+
+  public RenamePartitionRequest() {
+    this.txnId = -1L;
+
+  }
+
+  public RenamePartitionRequest(
+    String dbName,
+    String tableName,
+    List<String> partVals,
+    Partition newPart)
+  {
+    this();
+    this.dbName = dbName;
+    this.tableName = tableName;
+    this.partVals = partVals;
+    this.newPart = newPart;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public RenamePartitionRequest(RenamePartitionRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTableName()) {
+      this.tableName = other.tableName;
+    }
+    if (other.isSetPartVals()) {
+      List<String> __this__partVals = new ArrayList<String>(other.partVals);
+      this.partVals = __this__partVals;
+    }
+    if (other.isSetNewPart()) {
+      this.newPart = new Partition(other.newPart);
+    }
+    this.txnId = other.txnId;
+    if (other.isSetValidWriteIdList()) {
+      this.validWriteIdList = other.validWriteIdList;
+    }
+  }
+
+  public RenamePartitionRequest deepCopy() {
+    return new RenamePartitionRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catName = null;
+    this.dbName = null;
+    this.tableName = null;
+    this.partVals = null;
+    this.newPart = null;
+    this.txnId = -1L;
+
+    this.validWriteIdList = null;
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTableName() {
+    return this.tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  public void unsetTableName() {
+    this.tableName = null;
+  }
+
+  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableName() {
+    return this.tableName != null;
+  }
+
+  public void setTableNameIsSet(boolean value) {
+    if (!value) {
+      this.tableName = null;
+    }
+  }
+
+  public int getPartValsSize() {
+    return (this.partVals == null) ? 0 : this.partVals.size();
+  }
+
+  public java.util.Iterator<String> getPartValsIterator() {
+    return (this.partVals == null) ? null : this.partVals.iterator();
+  }
+
+  public void addToPartVals(String elem) {
+    if (this.partVals == null) {
+      this.partVals = new ArrayList<String>();
+    }
+    this.partVals.add(elem);
+  }
+
+  public List<String> getPartVals() {
+    return this.partVals;
+  }
+
+  public void setPartVals(List<String> partVals) {
+    this.partVals = partVals;
+  }
+
+  public void unsetPartVals() {
+    this.partVals = null;
+  }
+
+  /** Returns true if field partVals is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartVals() {
+    return this.partVals != null;
+  }
+
+  public void setPartValsIsSet(boolean value) {
+    if (!value) {
+      this.partVals = null;
+    }
+  }
+
+  public Partition getNewPart() {
+    return this.newPart;
+  }
+
+  public void setNewPart(Partition newPart) {
+    this.newPart = newPart;
+  }
+
+  public void unsetNewPart() {
+    this.newPart = null;
+  }
+
+  /** Returns true if field newPart is set (has been assigned a value) and false otherwise */
+  public boolean isSetNewPart() {
+    return this.newPart != null;
+  }
+
+  public void setNewPartIsSet(boolean value) {
+    if (!value) {
+      this.newPart = null;
+    }
+  }
+
+  public long getTxnId() {
+    return this.txnId;
+  }
+
+  public void setTxnId(long txnId) {
+    this.txnId = txnId;
+    setTxnIdIsSet(true);
+  }
+
+  public void unsetTxnId() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxnId() {
+    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  public void setTxnIdIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+  }
+
+  public String getValidWriteIdList() {
+    return this.validWriteIdList;
+  }
+
+  public void setValidWriteIdList(String validWriteIdList) {
+    this.validWriteIdList = validWriteIdList;
+  }
+
+  public void unsetValidWriteIdList() {
+    this.validWriteIdList = null;
+  }
+
+  /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidWriteIdList() {
+    return this.validWriteIdList != null;
+  }
+
+  public void setValidWriteIdListIsSet(boolean value) {
+    if (!value) {
+      this.validWriteIdList = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTableName();
+      } else {
+        setTableName((String)value);
+      }
+      break;
+
+    case PART_VALS:
+      if (value == null) {
+        unsetPartVals();
+      } else {
+        setPartVals((List<String>)value);
+      }
+      break;
+
+    case NEW_PART:
+      if (value == null) {
+        unsetNewPart();
+      } else {
+        setNewPart((Partition)value);
+      }
+      break;
+
+    case TXN_ID:
+      if (value == null) {
+        unsetTxnId();
+      } else {
+        setTxnId((Long)value);
+      }
+      break;
+
+    case VALID_WRITE_ID_LIST:
+      if (value == null) {
+        unsetValidWriteIdList();
+      } else {
+        setValidWriteIdList((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CAT_NAME:
+      return getCatName();
+
+    case DB_NAME:
+      return getDbName();
+
+    case TABLE_NAME:
+      return getTableName();
+
+    case PART_VALS:
+      return getPartVals();
+
+    case NEW_PART:
+      return getNewPart();
+
+    case TXN_ID:
+      return getTxnId();
+
+    case VALID_WRITE_ID_LIST:
+      return getValidWriteIdList();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CAT_NAME:
+      return isSetCatName();
+    case DB_NAME:
+      return isSetDbName();
+    case TABLE_NAME:
+      return isSetTableName();
+    case PART_VALS:
+      return isSetPartVals();
+    case NEW_PART:
+      return isSetNewPart();
+    case TXN_ID:
+      return isSetTxnId();
+    case VALID_WRITE_ID_LIST:
+      return isSetValidWriteIdList();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof RenamePartitionRequest)
+      return this.equals((RenamePartitionRequest)that);
+    return false;
+  }
+
+  public boolean equals(RenamePartitionRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tableName = true && this.isSetTableName();
+    boolean that_present_tableName = true && that.isSetTableName();
+    if (this_present_tableName || that_present_tableName) {
+      if (!(this_present_tableName && that_present_tableName))
+        return false;
+      if (!this.tableName.equals(that.tableName))
+        return false;
+    }
+
+    boolean this_present_partVals = true && this.isSetPartVals();
+    boolean that_present_partVals = true && that.isSetPartVals();
+    if (this_present_partVals || that_present_partVals) {
+      if (!(this_present_partVals && that_present_partVals))
+        return false;
+      if (!this.partVals.equals(that.partVals))
+        return false;
+    }
+
+    boolean this_present_newPart = true && this.isSetNewPart();
+    boolean that_present_newPart = true && that.isSetNewPart();
+    if (this_present_newPart || that_present_newPart) {
+      if (!(this_present_newPart && that_present_newPart))
+        return false;
+      if (!this.newPart.equals(that.newPart))
+        return false;
+    }
+
+    boolean this_present_txnId = true && this.isSetTxnId();
+    boolean that_present_txnId = true && that.isSetTxnId();
+    if (this_present_txnId || that_present_txnId) {
+      if (!(this_present_txnId && that_present_txnId))
+        return false;
+      if (this.txnId != that.txnId)
+        return false;
+    }
+
+    boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+    boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+    if (this_present_validWriteIdList || that_present_validWriteIdList) {
+      if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+        return false;
+      if (!this.validWriteIdList.equals(that.validWriteIdList))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tableName = true && (isSetTableName());
+    list.add(present_tableName);
+    if (present_tableName)
+      list.add(tableName);
+
+    boolean present_partVals = true && (isSetPartVals());
+    list.add(present_partVals);
+    if (present_partVals)
+      list.add(partVals);
+
+    boolean present_newPart = true && (isSetNewPart());
+    list.add(present_newPart);
+    if (present_newPart)
+      list.add(newPart);
+
+    boolean present_txnId = true && (isSetTxnId());
+    list.add(present_txnId);
+    if (present_txnId)
+      list.add(txnId);
+
+    boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+    list.add(present_validWriteIdList);
+    if (present_validWriteIdList)
+      list.add(validWriteIdList);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(RenamePartitionRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartVals()).compareTo(other.isSetPartVals());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartVals()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partVals, other.partVals);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNewPart()).compareTo(other.isSetNewPart());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNewPart()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.newPart, other.newPart);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidWriteIdList()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("RenamePartitionRequest(");
+    boolean first = true;
+
+    if (isSetCatName()) {
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tableName:");
+    if (this.tableName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tableName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("partVals:");
+    if (this.partVals == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.partVals);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("newPart:");
+    if (this.newPart == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.newPart);
+    }
+    first = false;
+    if (isSetTxnId()) {
+      if (!first) sb.append(", ");
+      sb.append("txnId:");
+      sb.append(this.txnId);
+      first = false;
+    }
+    if (isSetValidWriteIdList()) {
+      if (!first) sb.append(", ");
+      sb.append("validWriteIdList:");
+      if (this.validWriteIdList == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.validWriteIdList);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTableName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPartVals()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'partVals' is unset! Struct:" + toString());
+    }
+
+    if (!isSetNewPart()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'newPart' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+    if (newPart != null) {
+      newPart.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class RenamePartitionRequestStandardSchemeFactory implements SchemeFactory {
+    public RenamePartitionRequestStandardScheme getScheme() {
+      return new RenamePartitionRequestStandardScheme();
+    }
+  }
+
+  private static class RenamePartitionRequestStandardScheme extends StandardScheme<RenamePartitionRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, RenamePartitionRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = iprot.readString();
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // PART_VALS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list960 = iprot.readListBegin();
+                struct.partVals = new ArrayList<String>(_list960.size);
+                String _elem961;
+                for (int _i962 = 0; _i962 < _list960.size; ++_i962)
+                {
+                  _elem961 = iprot.readString();
+                  struct.partVals.add(_elem961);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartValsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // NEW_PART
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.newPart = new Partition();
+              struct.newPart.read(iprot);
+              struct.setNewPartIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // TXN_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txnId = iprot.readI64();
+              struct.setTxnIdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // VALID_WRITE_ID_LIST
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.validWriteIdList = iprot.readString();
+              struct.setValidWriteIdListIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, RenamePartitionRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tableName != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.tableName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.partVals != null) {
+        oprot.writeFieldBegin(PART_VALS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partVals.size()));
+          for (String _iter963 : struct.partVals)
+          {
+            oprot.writeString(_iter963);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.newPart != null) {
+        oprot.writeFieldBegin(NEW_PART_FIELD_DESC);
+        struct.newPart.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetTxnId()) {
+        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+        oprot.writeI64(struct.txnId);
+        oprot.writeFieldEnd();
+      }
+      if (struct.validWriteIdList != null) {
+        if (struct.isSetValidWriteIdList()) {
+          oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+          oprot.writeString(struct.validWriteIdList);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class RenamePartitionRequestTupleSchemeFactory implements SchemeFactory {
+    public RenamePartitionRequestTupleScheme getScheme() {
+      return new RenamePartitionRequestTupleScheme();
+    }
+  }
+
+  private static class RenamePartitionRequestTupleScheme extends TupleScheme<RenamePartitionRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, RenamePartitionRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tableName);
+      {
+        oprot.writeI32(struct.partVals.size());
+        for (String _iter964 : struct.partVals)
+        {
+          oprot.writeString(_iter964);
+        }
+      }
+      struct.newPart.write(oprot);
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetTxnId()) {
+        optionals.set(1);
+      }
+      if (struct.isSetValidWriteIdList()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+      if (struct.isSetTxnId()) {
+        oprot.writeI64(struct.txnId);
+      }
+      if (struct.isSetValidWriteIdList()) {
+        oprot.writeString(struct.validWriteIdList);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, RenamePartitionRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tableName = iprot.readString();
+      struct.setTableNameIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.partVals = new ArrayList<String>(_list965.size);
+        String _elem966;
+        for (int _i967 = 0; _i967 < _list965.size; ++_i967)
+        {
+          _elem966 = iprot.readString();
+          struct.partVals.add(_elem966);
+        }
+      }
+      struct.setPartValsIsSet(true);
+      struct.newPart = new Partition();
+      struct.newPart.read(iprot);
+      struct.setNewPartIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.txnId = iprot.readI64();
+        struct.setTxnIdIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.validWriteIdList = iprot.readString();
+        struct.setValidWriteIdListIsSet(true);
+      }
+    }
+  }
+
+}
+


[35/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index 0000000,5d1a525..caa55d7
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@@ -1,0 -1,1866 +1,1868 @@@
+ --
+ -- PostgreSQL database dump
+ --
+ 
+ SET statement_timeout = 0;
+ SET client_encoding = 'UTF8';
+ SET standard_conforming_strings = off;
+ SET check_function_bodies = false;
+ SET client_min_messages = warning;
+ SET escape_string_warning = off;
+ 
+ SET search_path = public, pg_catalog;
+ 
+ SET default_tablespace = '';
+ 
+ SET default_with_oids = false;
+ 
+ --
+ -- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "BUCKETING_COLS" (
+     "SD_ID" bigint NOT NULL,
+     "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "CDS" (
+     "CD_ID" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "COLUMNS_V2" (
+     "CD_ID" bigint NOT NULL,
+     "COMMENT" character varying(4000),
+     "COLUMN_NAME" character varying(767) NOT NULL,
+     "TYPE_NAME" text,
+     "INTEGER_IDX" integer NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "DATABASE_PARAMS" (
+     "DB_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(180) NOT NULL,
+     "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ 
+ CREATE TABLE "CTLGS" (
+     "CTLG_ID" BIGINT PRIMARY KEY,
+     "NAME" VARCHAR(256) UNIQUE,
+     "DESC" VARCHAR(4000),
+     "LOCATION_URI" VARCHAR(4000) NOT NULL
+ );
+ 
+ --
+ -- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "DBS" (
+     "DB_ID" bigint NOT NULL,
+     "DESC" character varying(4000) DEFAULT NULL::character varying,
+     "DB_LOCATION_URI" character varying(4000) NOT NULL,
+     "NAME" character varying(128) DEFAULT NULL::character varying,
+     "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+     "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying,
+     "CTLG_NAME" varchar(256)
+ );
+ 
+ 
+ --
+ -- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "DB_PRIVS" (
+     "DB_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "DB_ID" bigint,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "DB_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "GLOBAL_PRIVS" (
+     "USER_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "USER_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "IDXS" (
+     "INDEX_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "DEFERRED_REBUILD" boolean NOT NULL,
+     "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+     "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+     "INDEX_TBL_ID" bigint,
+     "LAST_ACCESS_TIME" bigint NOT NULL,
+     "ORIG_TBL_ID" bigint,
+     "SD_ID" bigint
+ );
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "INDEX_PARAMS" (
+     "INDEX_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "NUCLEUS_TABLES" (
+     "CLASS_NAME" character varying(128) NOT NULL,
+     "TABLE_NAME" character varying(128) NOT NULL,
+     "TYPE" character varying(4) NOT NULL,
+     "OWNER" character varying(2) NOT NULL,
+     "VERSION" character varying(20) NOT NULL,
+     "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITIONS" (
+     "PART_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "LAST_ACCESS_TIME" bigint NOT NULL,
+     "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+     "SD_ID" bigint,
 -    "TBL_ID" bigint
++    "TBL_ID" bigint,
++    "WRITE_ID" bigint DEFAULT 0
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_EVENTS" (
+     "PART_NAME_ID" bigint NOT NULL,
+     "CAT_NAME" character varying(256),
+     "DB_NAME" character varying(128),
+     "EVENT_TIME" bigint NOT NULL,
+     "EVENT_TYPE" integer NOT NULL,
+     "PARTITION_NAME" character varying(767),
+     "TBL_NAME" character varying(256)
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_KEYS" (
+     "TBL_ID" bigint NOT NULL,
+     "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+     "PKEY_NAME" character varying(128) NOT NULL,
+     "PKEY_TYPE" character varying(767) NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_KEY_VALS" (
+     "PART_ID" bigint NOT NULL,
+     "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_PARAMS" (
+     "PART_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PART_COL_PRIVS" (
+     "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+     "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_ID" bigint,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PART_PRIVS" (
+     "PART_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_ID" bigint,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "ROLES" (
+     "ROLE_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+     "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "ROLE_MAP" (
+     "ROLE_GRANT_ID" bigint NOT NULL,
+     "ADD_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "ROLE_ID" bigint
+ );
+ 
+ 
+ --
+ -- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SDS" (
+     "SD_ID" bigint NOT NULL,
+     "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+     "IS_COMPRESSED" boolean NOT NULL,
+     "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+     "NUM_BUCKETS" bigint NOT NULL,
+     "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+     "SERDE_ID" bigint,
+     "CD_ID" bigint,
+     "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SD_PARAMS" (
+     "SD_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" text DEFAULT NULL
+ );
+ 
+ 
+ --
+ -- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SEQUENCE_TABLE" (
+     "SEQUENCE_NAME" character varying(255) NOT NULL,
+     "NEXT_VAL" bigint NOT NULL
+ );
+ 
+ INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ --
+ -- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SERDES" (
+     "SERDE_ID" bigint NOT NULL,
+     "NAME" character varying(128) DEFAULT NULL::character varying,
+     "SLIB" character varying(4000) DEFAULT NULL::character varying,
+     "DESCRIPTION" varchar(4000),
+     "SERIALIZER_CLASS" varchar(4000),
+     "DESERIALIZER_CLASS" varchar(4000),
+     "SERDE_TYPE" integer
+ );
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SERDE_PARAMS" (
+     "SERDE_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" text DEFAULT NULL
+ );
+ 
+ 
+ --
+ -- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SORT_COLS" (
+     "SD_ID" bigint NOT NULL,
+     "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+     "ORDER" bigint NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TABLE_PARAMS" (
+     "TBL_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" text DEFAULT NULL
+ );
+ 
+ 
+ --
+ -- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TBLS" (
+     "TBL_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "DB_ID" bigint,
+     "LAST_ACCESS_TIME" bigint NOT NULL,
+     "OWNER" character varying(767) DEFAULT NULL::character varying,
+     "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying,
+     "RETENTION" bigint NOT NULL,
+     "SD_ID" bigint,
+     "TBL_NAME" character varying(256) DEFAULT NULL::character varying,
+     "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "VIEW_EXPANDED_TEXT" text,
+     "VIEW_ORIGINAL_TEXT" text,
 -    "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false
++    "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false,
++    "WRITE_ID" bigint DEFAULT 0
+ );
+ 
+ --
+ -- Name: MV_CREATION_METADATA; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "MV_CREATION_METADATA" (
+     "MV_CREATION_METADATA_ID" bigint NOT NULL,
+     "CAT_NAME" character varying(256) NOT NULL,
+     "DB_NAME" character varying(128) NOT NULL,
+     "TBL_NAME" character varying(256) NOT NULL,
+     "TXN_LIST" text,
+     "MATERIALIZATION_TIME" bigint NOT NULL
+ );
+ 
+ --
+ -- Name: MV_TABLES_USED; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "MV_TABLES_USED" (
+     "MV_CREATION_METADATA_ID" bigint NOT NULL,
+     "TBL_ID" bigint NOT NULL
+ );
+ 
+ --
+ -- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TBL_COL_PRIVS" (
+     "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+     "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "TBL_ID" bigint,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TBL_PRIVS" (
+     "TBL_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "TBL_ID" bigint,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TYPES" (
+     "TYPES_ID" bigint NOT NULL,
+     "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+     "TYPE1" character varying(767) DEFAULT NULL::character varying,
+     "TYPE2" character varying(767) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TYPE_FIELDS" (
+     "TYPE_NAME" bigint NOT NULL,
+     "COMMENT" character varying(256) DEFAULT NULL::character varying,
+     "FIELD_NAME" character varying(128) NOT NULL,
+     "FIELD_TYPE" character varying(767) NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_STRING_LIST" (
+     "STRING_LIST_ID" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+     "STRING_LIST_ID" bigint NOT NULL,
+     "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_COL_NAMES" (
+     "SD_ID" bigint NOT NULL,
+     "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+     "SD_ID" bigint NOT NULL,
+     "STRING_LIST_ID_KID" bigint NOT NULL,
+     "LOCATION" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ CREATE TABLE "SKEWED_VALUES" (
+     "SD_ID_OID" bigint NOT NULL,
+     "STRING_LIST_ID_EID" bigint NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE  "MASTER_KEYS"
+ (
+     "KEY_ID" SERIAL,
+     "MASTER_KEY" varchar(767) NULL,
+     PRIMARY KEY ("KEY_ID")
+ );
+ 
+ CREATE TABLE  "DELEGATION_TOKENS"
+ (
+     "TOKEN_IDENT" varchar(767) NOT NULL,
+     "TOKEN" varchar(767) NULL,
+     PRIMARY KEY ("TOKEN_IDENT")
+ );
+ 
+ CREATE TABLE "TAB_COL_STATS" (
+  "CS_ID" bigint NOT NULL,
+  "CAT_NAME" character varying(256) DEFAULT NULL::character varying,
+  "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+  "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+  "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+  "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+  "TBL_ID" bigint NOT NULL,
+  "LONG_LOW_VALUE" bigint,
+  "LONG_HIGH_VALUE" bigint,
+  "DOUBLE_LOW_VALUE" double precision,
+  "DOUBLE_HIGH_VALUE" double precision,
+  "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "NUM_NULLS" bigint NOT NULL,
+  "NUM_DISTINCTS" bigint,
+  "BIT_VECTOR" bytea,
+  "AVG_COL_LEN" double precision,
+  "MAX_COL_LEN" bigint,
+  "NUM_TRUES" bigint,
+  "NUM_FALSES" bigint,
+  "LAST_ANALYZED" bigint NOT NULL
+ );
+ 
+ --
+ -- Table structure for VERSION
+ --
+ CREATE TABLE "VERSION" (
+   "VER_ID" bigint,
+   "SCHEMA_VERSION" character varying(127) NOT NULL,
+   "VERSION_COMMENT" character varying(255) NOT NULL
+ );
+ 
+ --
+ -- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PART_COL_STATS" (
+  "CS_ID" bigint NOT NULL,
+  "CAT_NAME" character varying(256) DEFAULT NULL::character varying,
+  "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+  "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+  "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+  "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+  "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+  "PART_ID" bigint NOT NULL,
+  "LONG_LOW_VALUE" bigint,
+  "LONG_HIGH_VALUE" bigint,
+  "DOUBLE_LOW_VALUE" double precision,
+  "DOUBLE_HIGH_VALUE" double precision,
+  "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "NUM_NULLS" bigint NOT NULL,
+  "NUM_DISTINCTS" bigint,
+  "BIT_VECTOR" bytea,
+  "AVG_COL_LEN" double precision,
+  "MAX_COL_LEN" bigint,
+  "NUM_TRUES" bigint,
+  "NUM_FALSES" bigint,
+  "LAST_ANALYZED" bigint NOT NULL
+ );
+ 
+ --
+ -- Table structure for FUNCS
+ --
+ CREATE TABLE "FUNCS" (
+   "FUNC_ID" BIGINT NOT NULL,
+   "CLASS_NAME" VARCHAR(4000),
+   "CREATE_TIME" INTEGER NOT NULL,
+   "DB_ID" BIGINT,
+   "FUNC_NAME" VARCHAR(128),
+   "FUNC_TYPE" INTEGER NOT NULL,
+   "OWNER_NAME" VARCHAR(128),
+   "OWNER_TYPE" VARCHAR(10),
+   PRIMARY KEY ("FUNC_ID")
+ );
+ 
+ --
+ -- Table structure for FUNC_RU
+ --
+ CREATE TABLE "FUNC_RU" (
+   "FUNC_ID" BIGINT NOT NULL,
+   "RESOURCE_TYPE" INTEGER NOT NULL,
+   "RESOURCE_URI" VARCHAR(4000),
+   "INTEGER_IDX" INTEGER NOT NULL,
+   PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+ );
+ 
+ CREATE TABLE "NOTIFICATION_LOG"
+ (
+     "NL_ID" BIGINT NOT NULL,
+     "EVENT_ID" BIGINT NOT NULL,
+     "EVENT_TIME" INTEGER NOT NULL,
+     "EVENT_TYPE" VARCHAR(32) NOT NULL,
+     "CAT_NAME" VARCHAR(256),
+     "DB_NAME" VARCHAR(128),
+     "TBL_NAME" VARCHAR(256),
+     "MESSAGE" text,
+     "MESSAGE_FORMAT" VARCHAR(16),
+     PRIMARY KEY ("NL_ID")
+ );
+ 
+ CREATE TABLE "NOTIFICATION_SEQUENCE"
+ (
+     "NNI_ID" BIGINT NOT NULL,
+     "NEXT_EVENT_ID" BIGINT NOT NULL,
+     PRIMARY KEY ("NNI_ID")
+ );
+ 
+ INSERT INTO "NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT 1,1 WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "NOTIFICATION_SEQUENCE");
+ 
+ CREATE TABLE "KEY_CONSTRAINTS"
+ (
+   "CHILD_CD_ID" BIGINT,
+   "CHILD_INTEGER_IDX" BIGINT,
+   "CHILD_TBL_ID" BIGINT,
+   "PARENT_CD_ID" BIGINT,
+   "PARENT_INTEGER_IDX" BIGINT NOT NULL,
+   "PARENT_TBL_ID" BIGINT NOT NULL,
+   "POSITION" BIGINT NOT NULL,
+   "CONSTRAINT_NAME" VARCHAR(400) NOT NULL,
+   "CONSTRAINT_TYPE" SMALLINT NOT NULL,
+   "UPDATE_RULE" SMALLINT,
+   "DELETE_RULE"	SMALLINT,
+   "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL,
+   "DEFAULT_VALUE" VARCHAR(400),
+   PRIMARY KEY ("CONSTRAINT_NAME", "POSITION")
+ ) ;
+ 
+ ---
+ --- Table structure for METASTORE_DB_PROPERTIES
+ ---
+ CREATE TABLE "METASTORE_DB_PROPERTIES"
+ (
+   "PROPERTY_KEY" VARCHAR(255) NOT NULL,
+   "PROPERTY_VALUE" VARCHAR(1000) NOT NULL,
+   "DESCRIPTION" VARCHAR(1000)
+ );
+ 
+ 
+ CREATE TABLE "WM_RESOURCEPLAN" (
+     "RP_ID" bigint NOT NULL,
+     "NAME" character varying(128) NOT NULL,
+     "QUERY_PARALLELISM" integer,
+     "STATUS" character varying(20) NOT NULL,
+     "DEFAULT_POOL_ID" bigint
+ );
+ 
+ CREATE TABLE "WM_POOL" (
+     "POOL_ID" bigint NOT NULL,
+     "RP_ID" bigint NOT NULL,
+     "PATH" character varying(1024) NOT NULL,
+     "ALLOC_FRACTION" double precision,
+     "QUERY_PARALLELISM" integer,
+     "SCHEDULING_POLICY" character varying(1024)
+ );
+ 
+ CREATE TABLE "WM_TRIGGER" (
+     "TRIGGER_ID" bigint NOT NULL,
+     "RP_ID" bigint NOT NULL,
+     "NAME" character varying(128) NOT NULL,
+     "TRIGGER_EXPRESSION" character varying(1024) DEFAULT NULL::character varying,
+     "ACTION_EXPRESSION" character varying(1024) DEFAULT NULL::character varying,
+     "IS_IN_UNMANAGED" smallint NOT NULL DEFAULT 0
+ );
+ 
+ CREATE TABLE "WM_POOL_TO_TRIGGER" (
+     "POOL_ID" bigint NOT NULL,
+     "TRIGGER_ID" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "WM_MAPPING" (
+     "MAPPING_ID" bigint NOT NULL,
+     "RP_ID" bigint NOT NULL,
+     "ENTITY_TYPE" character varying(128) NOT NULL,
+     "ENTITY_NAME" character varying(128) NOT NULL,
+     "POOL_ID" bigint,
+     "ORDERING" integer
+ );
+ 
+ --
+ -- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "BUCKETING_COLS"
+     ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ 
+ --
+ -- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "CDS"
+     ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+ 
+ 
+ --
+ -- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "COLUMNS_V2"
+     ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DATABASE_PARAMS"
+     ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DB_PRIVS"
+     ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("AUTHORIZER", "DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DBS"
+     ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+ 
+ 
+ --
+ -- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DB_PRIVS"
+     ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+ 
+ 
+ --
+ -- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "GLOBAL_PRIVS"
+     ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("AUTHORIZER", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "GLOBAL_PRIVS"
+     ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+ 
+ 
+ --
+ -- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "INDEX_PARAMS"
+     ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "NUCLEUS_TABLES"
+     ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+ 
+ 
+ --
+ -- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_EVENTS"
+     ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEYS"
+     ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEY_VALS"
+     ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_PARAMS"
+     ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PART_COL_PRIVS"
+     ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+ 
+ 
+ --
+ -- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PART_PRIVS"
+     ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+ 
+ 
+ --
+ -- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLES"
+     ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+ 
+ 
+ --
+ -- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLES"
+     ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+ 
+ 
+ --
+ -- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLE_MAP"
+     ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+ 
+ 
+ --
+ -- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SDS"
+     ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+ 
+ 
+ --
+ -- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SD_PARAMS"
+     ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SEQUENCE_TABLE"
+     ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+ 
+ 
+ --
+ -- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SERDES"
+     ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SERDE_PARAMS"
+     ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SORT_COLS"
+     ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TABLE_PARAMS"
+     ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+ 
+ 
+ --
+ -- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBL_COL_PRIVS"
+     ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+ 
+ 
+ --
+ -- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBL_PRIVS"
+     ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+ 
+ 
+ --
+ -- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TYPES"
+     ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TYPE_FIELDS"
+     ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+ 
+ ALTER TABLE ONLY "SKEWED_STRING_LIST"
+     ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+ 
+ ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+     ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+ 
+ 
+ ALTER TABLE ONLY "SKEWED_COL_NAMES"
+     ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+     ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+ 
+ ALTER TABLE ONLY "SKEWED_VALUES"
+     ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+ 
+ --
+ -- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+ 
+ --
+ -- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+ 
+ --
+ -- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+ 
+ 
+ --
+ -- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+ 
+ 
+ --
+ -- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+ 
+ 
+ --
+ -- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DBS"
+     ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME", "CTLG_NAME");
+ 
+ 
+ --
+ -- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TYPES"
+     ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+ 
+ 
+ --
+ -- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLE_MAP"
+     ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+ 
+ ALTER TABLE ONLY "METASTORE_DB_PROPERTIES"
+     ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
+ 
+ 
+ -- Resource plan: Primary key and unique key constraints.
+ ALTER TABLE ONLY "WM_RESOURCEPLAN"
+     ADD CONSTRAINT "WM_RESOURCEPLAN_pkey" PRIMARY KEY ("RP_ID");
+ 
+ ALTER TABLE ONLY "WM_RESOURCEPLAN"
+     ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQUE ("NAME");
+ 
+ ALTER TABLE ONLY "WM_POOL"
+     ADD CONSTRAINT "WM_POOL_pkey" PRIMARY KEY ("POOL_ID");
+ 
+ ALTER TABLE ONLY "WM_POOL"
+     ADD CONSTRAINT "UNIQUE_WM_POOL" UNIQUE ("RP_ID", "PATH");
+ 
+ ALTER TABLE ONLY "WM_TRIGGER"
+     ADD CONSTRAINT "WM_TRIGGER_pkey" PRIMARY KEY ("TRIGGER_ID");
+ 
+ ALTER TABLE ONLY "WM_TRIGGER"
+     ADD CONSTRAINT "UNIQUE_WM_TRIGGER" UNIQUE ("RP_ID", "NAME");
+ 
+ ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+     ADD CONSTRAINT "WM_POOL_TO_TRIGGER_pkey" PRIMARY KEY ("POOL_ID", "TRIGGER_ID");
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "WM_MAPPING_pkey" PRIMARY KEY ("MAPPING_ID");
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+ 
+ --
+ -- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+ 
+ 
+ --
+ -- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+ 
+ 
+ --
+ -- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+ 
+ 
+ --
+ -- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+ 
+ 
+ --
+ -- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+ 
+ 
+ --
+ -- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+ 
+ 
+ --
+ -- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("AUTHORIZER", "PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+ 
+ 
+ --
+ -- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+ 
+ 
+ --
+ -- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+ 
+ 
+ --
+ -- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+ 
+ 
+ --
+ -- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("AUTHORIZER", "TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("AUTHORIZER", "TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+ 
+ 
+ --
+ -- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+ 
+ --
+ -- Name: TAB_COL_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TAB_COL_STATS_IDX" ON "TAB_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME");
+ 
+ --
+ -- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+ 
+ --
+ -- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+ 
+ --
+ -- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+ 
+ --
+ -- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+ 
+ --
+ -- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+ 
+ --
+ -- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+ 
+ CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID");
+ 
+ CREATE INDEX "CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("CONSTRAINT_TYPE");
+ 
+ ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+     ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+ 
+ 
+ ALTER TABLE ONLY "SKEWED_COL_NAMES"
+     ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+     ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+     ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "SKEWED_VALUES"
+     ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "SKEWED_VALUES"
+     ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "BUCKETING_COLS"
+     ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "COLUMNS_V2"
+     ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "DATABASE_PARAMS"
+     ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "DB_PRIVS"
+     ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "INDEX_PARAMS"
+     ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEYS"
+     ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEY_VALS"
+     ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_PARAMS"
+     ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PART_COL_PRIVS"
+     ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PART_PRIVS"
+     ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "ROLE_MAP"
+     ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SDS"
+     ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SDS"
+     ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SD_PARAMS"
+     ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SERDE_PARAMS"
+     ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SORT_COLS"
+     ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TABLE_PARAMS"
+     ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBL_COL_PRIVS"
+     ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBL_PRIVS"
+     ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TYPE_FIELDS"
+     ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+ 
+ --
+ -- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ ALTER TABLE "DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "CTLGS" ("NAME");
+ 
+ ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+ 
+ -- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ ALTER TABLE ONLY "FUNCS"
+     ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+ 
+ -- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ ALTER TABLE ONLY "FUNC_RU"
+     ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+ 
+ -- Resource plan FK constraints.
+ 
+ ALTER TABLE ONLY "WM_POOL"
+     ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_RESOURCEPLAN"
+     ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_TRIGGER"
+     ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+     ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+     ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "WM_TRIGGER" ("TRIGGER_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "MV_CREATION_METADATA"
+     ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID");
+ 
+ CREATE INDEX "MV_UNIQUE_TABLE"
+     ON "MV_CREATION_METADATA" USING btree ("TBL_NAME", "DB_NAME");
+ 
+ ALTER TABLE ONLY "MV_TABLES_USED"
+     ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "MV_TABLES_USED"
+     ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE;
+ 
+ --
+ -- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+ --
+ 
+ REVOKE ALL ON SCHEMA public FROM PUBLIC;
+ GRANT ALL ON SCHEMA public TO PUBLIC;
+ 
+ --
+ -- PostgreSQL database dump complete
+ --
+ 
+ ------------------------------
+ -- Transaction and lock tables
+ ------------------------------
+ CREATE TABLE TXNS (
+   TXN_ID bigint PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED bigint NOT NULL,
+   TXN_LAST_HEARTBEAT bigint NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar(128),
+   TXN_META_INFO varchar(128),
+   TXN_HEARTBEAT_COUNT integer,
+   TXN_TYPE integer
+ );
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID),
+   TC_DATABASE varchar(128) NOT NULL,
+   TC_TABLE varchar(128),
+   TC_PARTITION varchar(767) DEFAULT NULL,
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID bigint
+ );
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID bigint NOT NULL,
+   CTC_DATABASE varchar(128) NOT NULL,
+   CTC_TABLE varchar(256),
+   CTC_PARTITION varchar(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID bigint,
+   CTC_UPDATE_DELETE char(1) NOT NULL
+ );
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID bigint NOT NULL,
+   HL_LOCK_INT_ID bigint NOT NULL,
+   HL_TXNID bigint NOT NULL,
+   HL_DB varchar(128) NOT NULL,
+   HL_TABLE varchar(128),
+   HL_PARTITION varchar(767) DEFAULT NULL,
+   HL_LOCK_STATE char(1) NOT NULL,
+   HL_LOCK_TYPE char(1) NOT NULL,
+   HL_LAST_HEARTBEAT bigint NOT NULL,
+   HL_ACQUIRED_AT bigint,
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT integer,
+   HL_AGENT_INFO varchar(128),
+   HL_BLOCKEDBY_EXT_ID bigint,
+   HL_BLOCKEDBY_INT_ID bigint,
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+ );
+ 
+ CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID bigint PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START bigint,
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID bigint,
+   CQ_META_INFO bytea,
+   CQ_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID bigint PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START bigint,
+   CC_END bigint,
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID bigint,
+   CC_META_INFO bytea,
+   CC_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT varchar(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ );
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar(128) NOT NULL,
+   WS_TABLE varchar(128) NOT NULL,
+   WS_PARTITION varchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE varchar(128) NOT NULL,
+   T2W_TABLE varchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE varchar(128) NOT NULL,
+   NWI_TABLE varchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+   MRL_TXN_ID bigint NOT NULL,
+   MRL_DB_NAME varchar(128) NOT NULL,
+   MRL_TBL_NAME varchar(256) NOT NULL,
+   MRL_LAST_HEARTBEAT bigint NOT NULL,
+   PRIMARY KEY(MRL_TXN_ID)
+ );
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" bigint primary key,
+   "SCHEMA_TYPE" integer not null,
+   "NAME" varchar(256) unique,
+   "DB_ID" bigint references "DBS" ("DB_ID"),
+   "COMPATIBILITY" integer not null,
+   "VALIDATION_LEVEL" integer not null,
+   "CAN_EVOLVE" boolean not null,
+   "SCHEMA_GROUP" varchar(256),
+   "DESCRIPTION" varchar(4000)
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" bigint primary key,
+   "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" integer not null,
+   "CREATED_AT" bigint not null,
+   "CD_ID" bigint references "CDS" ("CD_ID"), 
+   "STATE" integer not null,
+   "DESCRIPTION" varchar(4000),
+   "SCHEMA_TEXT" text,
+   "FINGERPRINT" varchar(256),
+   "SCHEMA_VERSION_NAME" varchar(256),
+   "SERDE_ID" bigint references "SERDES" ("SERDE_ID"), 
+   unique ("SCHEMA_ID", "VERSION")
+ );
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ 
+ CREATE TABLE RUNTIME_STATS (
+  RS_ID bigint primary key,
+  CREATE_TIME bigint NOT NULL,
+  WEIGHT bigint NOT NULL,
+  PAYLOAD bytea
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE "TXN_WRITE_NOTIFICATION_LOG" (
+   "WNL_ID" bigint NOT NULL,
+   "WNL_TXNID" bigint NOT NULL,
+   "WNL_WRITEID" bigint NOT NULL,
+   "WNL_DATABASE" varchar(128) NOT NULL,
+   "WNL_TABLE" varchar(128) NOT NULL,
+   "WNL_PARTITION" varchar(1024) NOT NULL,
+   "WNL_TABLE_OBJ" text NOT NULL,
+   "WNL_PARTITION_OBJ" text,
+   "WNL_FILES" text,
+   "WNL_EVENT_TIME" integer NOT NULL,
+   PRIMARY KEY ("WNL_TXNID", "WNL_DATABASE", "WNL_TABLE", "WNL_PARTITION")
+ );
+ 
+ INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
index 0000000,40d2795..eff08b3
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0';
+ 
++-- HIVE-19416
++ALTER TABLE "TBLS" ADD "WRITE_ID" bigint;
++ALTER TABLE "PARTITIONS" ADD "WRITE_ID" bigint;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0';
+ 


[47/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 0000000,bdcbf41..9eb8424
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@@ -1,0 -1,12207 +1,12509 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.commons.lang.StringUtils.join;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+ 
+ import java.io.IOException;
+ import java.lang.reflect.Field;
+ import java.net.InetAddress;
+ import java.net.URI;
+ import java.nio.ByteBuffer;
+ import java.sql.Connection;
+ import java.sql.SQLException;
+ import java.sql.SQLIntegrityConstraintViolationException;
+ import java.sql.Statement;
+ import java.time.LocalDateTime;
+ import java.time.format.DateTimeFormatter;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.Comparator;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Iterator;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.Properties;
+ import java.util.Set;
+ import java.util.TreeSet;
+ import java.util.UUID;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.concurrent.locks.Lock;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.regex.Pattern;
+ 
+ import javax.jdo.JDOCanRetryException;
+ import javax.jdo.JDODataStoreException;
+ import javax.jdo.JDOException;
+ import javax.jdo.JDOHelper;
+ import javax.jdo.JDOObjectNotFoundException;
+ import javax.jdo.PersistenceManager;
+ import javax.jdo.PersistenceManagerFactory;
+ import javax.jdo.Query;
+ import javax.jdo.Transaction;
+ import javax.jdo.datastore.DataStoreCache;
+ import javax.jdo.datastore.JDOConnection;
+ import javax.jdo.identity.IntIdentity;
+ import javax.sql.DataSource;
+ 
+ import com.google.common.base.Strings;
+ 
+ import org.apache.commons.collections.CollectionUtils;
+ import org.apache.commons.lang.ArrayUtils;
+ import org.apache.commons.lang.StringUtils;
+ import org.apache.commons.lang.exception.ExceptionUtils;
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.Path;
 -import org.apache.hadoop.hive.common.DatabaseName;
 -import org.apache.hadoop.hive.common.StatsSetupConst;
 -import org.apache.hadoop.hive.common.TableName;
++import org.apache.hadoop.hive.common.*;
+ import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown;
 -import org.apache.hadoop.hive.metastore.api.AggrStats;
 -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 -import org.apache.hadoop.hive.metastore.api.Database;
 -import org.apache.hadoop.hive.metastore.api.FieldSchema;
 -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 -import org.apache.hadoop.hive.metastore.api.Function;
 -import org.apache.hadoop.hive.metastore.api.FunctionType;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectType;
 -import org.apache.hadoop.hive.metastore.api.ISchema;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 -import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 -import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 -import org.apache.hadoop.hive.metastore.api.MetaException;
 -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 -import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 -import org.apache.hadoop.hive.metastore.api.Order;
 -import org.apache.hadoop.hive.metastore.api.Partition;
 -import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesRow;
 -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 -import org.apache.hadoop.hive.metastore.api.PrincipalType;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
 -import org.apache.hadoop.hive.metastore.api.ResourceType;
 -import org.apache.hadoop.hive.metastore.api.ResourceUri;
 -import org.apache.hadoop.hive.metastore.api.Role;
 -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 -import org.apache.hadoop.hive.metastore.api.RuntimeStat;
 -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 -import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
 -import org.apache.hadoop.hive.metastore.api.SchemaType;
 -import org.apache.hadoop.hive.metastore.api.SchemaValidation;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
 -import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 -import org.apache.hadoop.hive.metastore.api.SerdeType;
 -import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 -import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableMeta;
 -import org.apache.hadoop.hive.metastore.api.Type;
 -import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 -import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMMapping;
 -import org.apache.hadoop.hive.metastore.api.WMNullablePool;
 -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMPool;
 -import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
 -import org.apache.hadoop.hive.metastore.api.WMTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
 -import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
++import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
 -import org.apache.hadoop.hive.metastore.model.MCatalog;
 -import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
 -import org.apache.hadoop.hive.metastore.model.MConstraint;
 -import org.apache.hadoop.hive.metastore.model.MCreationMetadata;
 -import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MDatabase;
 -import org.apache.hadoop.hive.metastore.model.MDelegationToken;
 -import org.apache.hadoop.hive.metastore.model.MFieldSchema;
 -import org.apache.hadoop.hive.metastore.model.MFunction;
 -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MISchema;
 -import org.apache.hadoop.hive.metastore.model.MMasterKey;
 -import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties;
 -import org.apache.hadoop.hive.metastore.model.MNotificationLog;
 -import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
 -import org.apache.hadoop.hive.metastore.model.MOrder;
 -import org.apache.hadoop.hive.metastore.model.MPartition;
 -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
 -import org.apache.hadoop.hive.metastore.model.MPartitionEvent;
 -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MResourceUri;
 -import org.apache.hadoop.hive.metastore.model.MRole;
 -import org.apache.hadoop.hive.metastore.model.MRoleMap;
 -import org.apache.hadoop.hive.metastore.model.MRuntimeStat;
 -import org.apache.hadoop.hive.metastore.model.MSchemaVersion;
 -import org.apache.hadoop.hive.metastore.model.MSerDeInfo;
 -import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
 -import org.apache.hadoop.hive.metastore.model.MStringList;
 -import org.apache.hadoop.hive.metastore.model.MTable;
 -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics;
 -import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
 -import org.apache.hadoop.hive.metastore.model.MType;
 -import org.apache.hadoop.hive.metastore.model.MVersionTable;
 -import org.apache.hadoop.hive.metastore.model.MWMMapping;
++import org.apache.hadoop.hive.metastore.model.*;
+ import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType;
 -import org.apache.hadoop.hive.metastore.model.MWMPool;
 -import org.apache.hadoop.hive.metastore.model.MWMResourcePlan;
+ import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status;
 -import org.apache.hadoop.hive.metastore.model.MWMTrigger;
 -import org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
++import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
++import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
++import org.apache.hive.common.util.TxnIdUtils;
+ import org.apache.thrift.TException;
+ import org.datanucleus.AbstractNucleusContext;
+ import org.datanucleus.ClassLoaderResolver;
+ import org.datanucleus.ClassLoaderResolverImpl;
+ import org.datanucleus.NucleusContext;
+ import org.datanucleus.PropertyNames;
+ import org.datanucleus.api.jdo.JDOPersistenceManager;
+ import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
+ import org.datanucleus.store.rdbms.exceptions.MissingTableException;
+ import org.datanucleus.store.scostore.Store;
+ import org.datanucleus.util.WeakValueMap;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.codahale.metrics.Counter;
+ import com.codahale.metrics.MetricRegistry;
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.base.Preconditions;
+ import com.google.common.collect.Lists;
+ import com.google.common.collect.Maps;
+ import com.google.common.collect.Sets;
+ 
+ 
+ /**
+  * This class is the interface between the application logic and the database
+  * store that contains the objects. Refrain putting any logic in mode.M* objects
+  * or in this file as former could be auto generated and this class would need
+  * to be made into a interface that can read both from a database and a
+  * filestore.
+  */
+ public class ObjectStore implements RawStore, Configurable {
+   private static Properties prop = null;
+   private static PersistenceManagerFactory pmf = null;
+   private static boolean forTwoMetastoreTesting = false;
+   private int batchSize = Batchable.NO_BATCHING;
+ 
+   private static final DateTimeFormatter YMDHMS_FORMAT = DateTimeFormatter.ofPattern(
+       "yyyy_MM_dd_HH_mm_ss");
+ 
+   private static Lock pmfPropLock = new ReentrantLock();
+   /**
+   * Verify the schema only once per JVM since the db connection info is static
+   */
+   private final static AtomicBoolean isSchemaVerified = new AtomicBoolean(false);
+   private static final Logger LOG = LoggerFactory.getLogger(ObjectStore.class);
+ 
+   private enum TXN_STATUS {
+     NO_STATE, OPEN, COMMITED, ROLLBACK
+   }
+ 
+   private static final Map<String, Class<?>> PINCLASSMAP;
+   private static final String HOSTNAME;
+   private static final String USER;
+   private static final String JDO_PARAM = ":param";
+   static {
+     Map<String, Class<?>> map = new HashMap<>();
+     map.put("table", MTable.class);
+     map.put("storagedescriptor", MStorageDescriptor.class);
+     map.put("serdeinfo", MSerDeInfo.class);
+     map.put("partition", MPartition.class);
+     map.put("database", MDatabase.class);
+     map.put("type", MType.class);
+     map.put("fieldschema", MFieldSchema.class);
+     map.put("order", MOrder.class);
+     PINCLASSMAP = Collections.unmodifiableMap(map);
+     String hostname = "UNKNOWN";
+     try {
+       InetAddress clientAddr = InetAddress.getLocalHost();
+       hostname = clientAddr.getHostAddress();
+     } catch (IOException e) {
+     }
+     HOSTNAME = hostname;
+     String user = System.getenv("USER");
+     USER = org.apache.commons.lang.StringUtils.defaultString(user, "UNKNOWN");
+   }
+ 
+ 
+   private boolean isInitialized = false;
+   private PersistenceManager pm = null;
+   private SQLGenerator sqlGenerator = null;
+   private MetaStoreDirectSql directSql = null;
+   private DatabaseProduct dbType = null;
+   private PartitionExpressionProxy expressionProxy = null;
+   private Configuration conf;
+   private volatile int openTrasactionCalls = 0;
+   private Transaction currentTransaction = null;
+   private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE;
+   private Pattern partitionValidationPattern;
+   private Counter directSqlErrors;
++  private boolean areTxnStatsSupported = false;
+ 
+   /**
+    * A Autocloseable wrapper around Query class to pass the Query object to the caller and let the caller release
+    * the resources when the QueryWrapper goes out of scope
+    */
+   public static class QueryWrapper implements AutoCloseable {
+     public Query query;
+ 
+     /**
+      * Explicitly closes the query object to release the resources
+      */
+     @Override
+     public void close() {
+       if (query != null) {
+         query.closeAll();
+         query = null;
+       }
+     }
+   }
+ 
+   public ObjectStore() {
+   }
+ 
+   @Override
+   public Configuration getConf() {
+     return conf;
+   }
+ 
+   /**
+    * Called whenever this object is instantiated using ReflectionUtils, and also
+    * on connection retries. In cases of connection retries, conf will usually
+    * contain modified values.
+    */
+   @Override
+   @SuppressWarnings("nls")
+   public void setConf(Configuration conf) {
+     // Although an instance of ObjectStore is accessed by one thread, there may
+     // be many threads with ObjectStore instances. So the static variables
+     // pmf and prop need to be protected with locks.
+     pmfPropLock.lock();
+     try {
+       isInitialized = false;
+       this.conf = conf;
++      this.areTxnStatsSupported = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED);
+       configureSSL(conf);
+       Properties propsFromConf = getDataSourceProps(conf);
+       boolean propsChanged = !propsFromConf.equals(prop);
+ 
+       if (propsChanged) {
+         if (pmf != null){
+           clearOutPmfClassLoaderCache(pmf);
+           if (!forTwoMetastoreTesting) {
+             // close the underlying connection pool to avoid leaks
+             pmf.close();
+           }
+         }
+         pmf = null;
+         prop = null;
+       }
+ 
+       assert(!isActiveTransaction());
+       shutdown();
+       // Always want to re-create pm as we don't know if it were created by the
+       // most recent instance of the pmf
+       pm = null;
+       directSql = null;
+       expressionProxy = null;
+       openTrasactionCalls = 0;
+       currentTransaction = null;
+       transactionStatus = TXN_STATUS.NO_STATE;
+ 
+       initialize(propsFromConf);
+ 
+       String partitionValidationRegex =
+           MetastoreConf.getVar(this.conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN);
+       if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
+         partitionValidationPattern = Pattern.compile(partitionValidationRegex);
+       } else {
+         partitionValidationPattern = null;
+       }
+ 
+       // Note, if metrics have not been initialized this will return null, which means we aren't
+       // using metrics.  Thus we should always check whether this is non-null before using.
+       MetricRegistry registry = Metrics.getRegistry();
+       if (registry != null) {
+         directSqlErrors = Metrics.getOrCreateCounter(MetricsConstants.DIRECTSQL_ERRORS);
+       }
+ 
+       this.batchSize = MetastoreConf.getIntVar(conf, ConfVars.RAWSTORE_PARTITION_BATCH_SIZE);
+ 
+       if (!isInitialized) {
+         throw new RuntimeException(
+         "Unable to create persistence manager. Check dss.log for details");
+       } else {
+         LOG.debug("Initialized ObjectStore");
+       }
+     } finally {
+       pmfPropLock.unlock();
+     }
+   }
+ 
+   private ClassLoader classLoader;
+   {
+     classLoader = Thread.currentThread().getContextClassLoader();
+     if (classLoader == null) {
+       classLoader = ObjectStore.class.getClassLoader();
+     }
+   }
+ 
+   @SuppressWarnings("nls")
+   private void initialize(Properties dsProps) {
+     int retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMS_HANDLER_ATTEMPTS);
+     long retryInterval = MetastoreConf.getTimeVar(conf,
+         ConfVars.HMS_HANDLER_INTERVAL, TimeUnit.MILLISECONDS);
+     int numTries = retryLimit;
+ 
+     while (numTries > 0){
+       try {
+         initializeHelper(dsProps);
+         return; // If we reach here, we succeed.
+       } catch (Exception e){
+         numTries--;
+         boolean retriable = isRetriableException(e);
+         if ((numTries > 0) && retriable){
+           LOG.info("Retriable exception while instantiating ObjectStore, retrying. " +
+               "{} tries left", numTries, e);
+           try {
+             Thread.sleep(retryInterval);
+           } catch (InterruptedException ie) {
+             // Restore the interrupted status, since we do not want to catch it.
+             LOG.debug("Interrupted while sleeping before retrying.", ie);
+             Thread.currentThread().interrupt();
+           }
+           // If we're here, we'll proceed down the next while loop iteration.
+         } else {
+           // we've reached our limit, throw the last one.
+           if (retriable){
+             LOG.warn("Exception retry limit reached, not retrying any longer.",
+               e);
+           } else {
+             LOG.debug("Non-retriable exception during ObjectStore initialize.", e);
+           }
+           throw e;
+         }
+       }
+     }
+   }
+ 
+   private static final Set<Class<? extends Throwable>> retriableExceptionClasses =
+       new HashSet<>(Arrays.asList(JDOCanRetryException.class));
+   /**
+    * Helper function for initialize to determine if we should retry an exception.
+    * We return true if the exception is of a known type of retriable exceptions, or if one
+    * of its recursive .getCause returns a known type of retriable exception.
+    */
+   private boolean isRetriableException(Throwable e) {
+     if (e == null){
+       return false;
+     }
+     if (retriableExceptionClasses.contains(e.getClass())){
+       return true;
+     }
+     for (Class<? extends Throwable> c : retriableExceptionClasses){
+       if (c.isInstance(e)){
+         return true;
+       }
+     }
+ 
+     if (e.getCause() == null){
+       return false;
+     }
+     return isRetriableException(e.getCause());
+   }
+ 
+   /**
+    * private helper to do initialization routine, so we can retry if needed if it fails.
+    * @param dsProps
+    */
+   private void initializeHelper(Properties dsProps) {
+     LOG.debug("ObjectStore, initialize called");
+     prop = dsProps;
+     pm = getPersistenceManager();
+     try {
+       String productName = MetaStoreDirectSql.getProductName(pm);
+       sqlGenerator = new SQLGenerator(DatabaseProduct.determineDatabaseProduct(productName), conf);
+     } catch (SQLException e) {
+       LOG.error("error trying to figure out the database product", e);
+       throw new RuntimeException(e);
+     }
+     isInitialized = pm != null;
+     if (isInitialized) {
+       dbType = determineDatabaseProduct();
+       expressionProxy = createExpressionProxy(conf);
+       if (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL)) {
+         String schema = prop.getProperty("javax.jdo.mapping.Schema");
+         schema = org.apache.commons.lang.StringUtils.defaultIfBlank(schema, null);
+         directSql = new MetaStoreDirectSql(pm, conf, schema);
+       }
+     }
+     LOG.debug("RawStore: {}, with PersistenceManager: {}" +
+         " created in the thread with id: {}", this, pm, Thread.currentThread().getId());
+   }
+ 
+   private DatabaseProduct determineDatabaseProduct() {
+     try {
+       return DatabaseProduct.determineDatabaseProduct(getProductName(pm));
+     } catch (SQLException e) {
+       LOG.warn("Cannot determine database product; assuming OTHER", e);
+       return DatabaseProduct.OTHER;
+     }
+   }
+ 
+   private static String getProductName(PersistenceManager pm) {
+     JDOConnection jdoConn = pm.getDataStoreConnection();
+     try {
+       return ((Connection)jdoConn.getNativeConnection()).getMetaData().getDatabaseProductName();
+     } catch (Throwable t) {
+       LOG.warn("Error retrieving product name", t);
+       return null;
+     } finally {
+       jdoConn.close(); // We must release the connection before we call other pm methods.
+     }
+   }
+ 
+   /**
+    * Creates the proxy used to evaluate expressions. This is here to prevent circular
+    * dependency - ql -&gt; metastore client &lt;-&gt metastore server -&gt ql. If server and
+    * client are split, this can be removed.
+    * @param conf Configuration.
+    * @return The partition expression proxy.
+    */
+   private static PartitionExpressionProxy createExpressionProxy(Configuration conf) {
+     String className = MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS);
+     try {
+       Class<? extends PartitionExpressionProxy> clazz =
+            JavaUtils.getClass(className, PartitionExpressionProxy.class);
+       return JavaUtils.newInstance(clazz, new Class<?>[0], new Object[0]);
+     } catch (MetaException e) {
+       LOG.error("Error loading PartitionExpressionProxy", e);
+       throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage());
+     }
+   }
+ 
+   /**
+    * Configure the SSL properties of the connection from provided config
+    * @param conf
+    */
+   private static void configureSSL(Configuration conf) {
+     // SSL support
+     String sslPropString = MetastoreConf.getVar(conf, ConfVars.DBACCESS_SSL_PROPS);
+     if (org.apache.commons.lang.StringUtils.isNotEmpty(sslPropString)) {
+       LOG.info("Metastore setting SSL properties of the connection to backed DB");
+       for (String sslProp : sslPropString.split(",")) {
+         String[] pair = sslProp.trim().split("=");
+         if (pair != null && pair.length == 2) {
+           System.setProperty(pair[0].trim(), pair[1].trim());
+         } else {
+           LOG.warn("Invalid metastore property value for {}", ConfVars.DBACCESS_SSL_PROPS);
+         }
+       }
+     }
+   }
+ 
+   /**
+    * Properties specified in hive-default.xml override the properties specified
+    * in jpox.properties.
+    */
+   @SuppressWarnings("nls")
+   private static Properties getDataSourceProps(Configuration conf) {
+     Properties prop = new Properties();
+     correctAutoStartMechanism(conf);
+ 
+     // First, go through and set all our values for datanucleus and javax.jdo parameters.  This
+     // has to be a separate first step because we don't set the default values in the config object.
+     for (ConfVars var : MetastoreConf.dataNucleusAndJdoConfs) {
+       String confVal = MetastoreConf.getAsString(conf, var);
+       String varName = var.getVarname();
+       Object prevVal = prop.setProperty(varName, confVal);
+       if (MetastoreConf.isPrintable(varName)) {
+         LOG.debug("Overriding {} value {} from jpox.properties with {}",
+           varName, prevVal, confVal);
+       }
+     }
+ 
+     // Now, we need to look for any values that the user set that MetastoreConf doesn't know about.
+     // TODO Commenting this out for now, as it breaks because the conf values aren't getting properly
+     // interpolated in case of variables.  See HIVE-17788.
+     /*
+     for (Map.Entry<String, String> e : conf) {
+       if (e.getKey().startsWith("datanucleus.") || e.getKey().startsWith("javax.jdo.")) {
+         // We have to handle this differently depending on whether it is a value known to
+         // MetastoreConf or not.  If it is, we need to get the default value if a value isn't
+         // provided.  If not, we just set whatever the user has set.
+         Object prevVal = prop.setProperty(e.getKey(), e.getValue());
+         if (LOG.isDebugEnabled() && MetastoreConf.isPrintable(e.getKey())) {
+           LOG.debug("Overriding " + e.getKey() + " value " + prevVal
+               + " from  jpox.properties with " + e.getValue());
+         }
+       }
+     }
+     */
+ 
+     // Password may no longer be in the conf, use getPassword()
+     try {
+       String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
+       if (org.apache.commons.lang.StringUtils.isNotEmpty(passwd)) {
+         // We can get away with the use of varname here because varname == hiveName for PWD
+         prop.setProperty(ConfVars.PWD.getVarname(), passwd);
+       }
+     } catch (IOException err) {
+       throw new RuntimeException("Error getting metastore password: " + err.getMessage(), err);
+     }
+ 
+     if (LOG.isDebugEnabled()) {
+       for (Entry<Object, Object> e : prop.entrySet()) {
+         if (MetastoreConf.isPrintable(e.getKey().toString())) {
+           LOG.debug("{} = {}", e.getKey(), e.getValue());
+         }
+       }
+     }
+ 
+     return prop;
+   }
+ 
+   /**
+    * Update conf to set datanucleus.autoStartMechanismMode=ignored.
+    * This is necessary to able to use older version of hive against
+    * an upgraded but compatible metastore schema in db from new version
+    * of hive
+    * @param conf
+    */
+   private static void correctAutoStartMechanism(Configuration conf) {
+     final String autoStartKey = "datanucleus.autoStartMechanismMode";
+     final String autoStartIgnore = "ignored";
+     String currentAutoStartVal = conf.get(autoStartKey);
+     if (!autoStartIgnore.equalsIgnoreCase(currentAutoStartVal)) {
+       LOG.warn("{} is set to unsupported value {} . Setting it to value: {}", autoStartKey,
+         conf.get(autoStartKey), autoStartIgnore);
+     }
+     conf.set(autoStartKey, autoStartIgnore);
+   }
+ 
+   private static synchronized PersistenceManagerFactory getPMF() {
+     if (pmf == null) {
+ 
+       Configuration conf = MetastoreConf.newMetastoreConf();
+       DataSourceProvider dsp = DataSourceProviderFactory.hasProviderSpecificConfigurations(conf) ?
+               DataSourceProviderFactory.getDataSourceProvider(conf) : null;
+ 
+       if (dsp == null) {
+         pmf = JDOHelper.getPersistenceManagerFactory(prop);
+       } else {
+         try {
+           DataSource ds = dsp.create(conf);
+           Map<Object, Object> dsProperties = new HashMap<>();
+           //Any preexisting datanucleus property should be passed along
+           dsProperties.putAll(prop);
+           dsProperties.put(PropertyNames.PROPERTY_CONNECTION_FACTORY, ds);
+           dsProperties.put(PropertyNames.PROPERTY_CONNECTION_FACTORY2, ds);
+           dsProperties.put("javax.jdo.PersistenceManagerFactoryClass",
+               "org.datanucleus.api.jdo.JDOPersistenceManagerFactory");
+           pmf = JDOHelper.getPersistenceManagerFactory(dsProperties);
+         } catch (SQLException e) {
+           LOG.warn("Could not create PersistenceManagerFactory using " +
+               "connection pool properties, will fall back", e);
+           pmf = JDOHelper.getPersistenceManagerFactory(prop);
+         }
+       }
+       DataStoreCache dsc = pmf.getDataStoreCache();
+       if (dsc != null) {
+         String objTypes = MetastoreConf.getVar(conf, ConfVars.CACHE_PINOBJTYPES);
+         LOG.info("Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes=\"{}\"", objTypes);
+         if (org.apache.commons.lang.StringUtils.isNotEmpty(objTypes)) {
+           String[] typeTokens = objTypes.toLowerCase().split(",");
+           for (String type : typeTokens) {
+             type = type.trim();
+             if (PINCLASSMAP.containsKey(type)) {
+               dsc.pinAll(true, PINCLASSMAP.get(type));
+             } else {
+               LOG.warn("{} is not one of the pinnable object types: {}", type,
+                 org.apache.commons.lang.StringUtils.join(PINCLASSMAP.keySet(), " "));
+             }
+           }
+         }
+       } else {
+         LOG.warn("PersistenceManagerFactory returned null DataStoreCache object. Unable to initialize object pin types defined by hive.metastore.cache.pinobjtypes");
+       }
+     }
+     return pmf;
+   }
+ 
+   @InterfaceAudience.LimitedPrivate({"HCATALOG"})
+   @InterfaceStability.Evolving
+   public PersistenceManager getPersistenceManager() {
+     return getPMF().getPersistenceManager();
+   }
+ 
+   @Override
+   public void shutdown() {
+     LOG.debug("RawStore: {}, with PersistenceManager: {} will be shutdown", this, pm);
+     if (pm != null) {
+       pm.close();
+       pm = null;
+     }
+   }
+ 
+   /**
+    * Opens a new one or the one already created Every call of this function must
+    * have corresponding commit or rollback function call
+    *
+    * @return an active transaction
+    */
+ 
+   @Override
+   public boolean openTransaction() {
+     openTrasactionCalls++;
+     if (openTrasactionCalls == 1) {
+       currentTransaction = pm.currentTransaction();
+       currentTransaction.begin();
+       transactionStatus = TXN_STATUS.OPEN;
+     } else {
+       // openTransactionCalls > 1 means this is an interior transaction
+       // We should already have a transaction created that is active.
+       if ((currentTransaction == null) || (!currentTransaction.isActive())){
+         throw new RuntimeException("openTransaction called in an interior"
+             + " transaction scope, but currentTransaction is not active.");
+       }
+     }
+ 
+     boolean result = currentTransaction.isActive();
+     debugLog("Open transaction: count = " + openTrasactionCalls + ", isActive = " + result);
+     return result;
+   }
+ 
+   /**
+    * if this is the commit of the first open call then an actual commit is
+    * called.
+    *
+    * @return Always returns true
+    */
+   @Override
+   @SuppressWarnings("nls")
+   public boolean commitTransaction() {
+     if (TXN_STATUS.ROLLBACK == transactionStatus) {
+       debugLog("Commit transaction: rollback");
+       return false;
+     }
+     if (openTrasactionCalls <= 0) {
+       RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = "
+           + openTrasactionCalls + ". This probably indicates that there are unbalanced " +
+           "calls to openTransaction/commitTransaction");
+       LOG.error("Unbalanced calls to open/commit Transaction", e);
+       throw e;
+     }
+     if (!currentTransaction.isActive()) {
+       RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = "
+           + openTrasactionCalls + ". This probably indicates that there are unbalanced " +
+           "calls to openTransaction/commitTransaction");
+       LOG.error("Unbalanced calls to open/commit Transaction", e);
+       throw e;
+     }
+     openTrasactionCalls--;
+     debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive());
+ 
+     if ((openTrasactionCalls == 0) && currentTransaction.isActive()) {
+       transactionStatus = TXN_STATUS.COMMITED;
+       currentTransaction.commit();
+     }
+     return true;
+   }
+ 
+   /**
+    * @return true if there is an active transaction. If the current transaction
+    *         is either committed or rolled back it returns false
+    */
+   @Override
+   public boolean isActiveTransaction() {
+     if (currentTransaction == null) {
+       return false;
+     }
+     return currentTransaction.isActive();
+   }
+ 
+   /**
+    * Rolls back the current transaction if it is active
+    */
+   @Override
+   public void rollbackTransaction() {
+     if (openTrasactionCalls < 1) {
+       debugLog("rolling back transaction: no open transactions: " + openTrasactionCalls);
+       return;
+     }
+     debugLog("Rollback transaction, isActive: " + currentTransaction.isActive());
+     try {
+       if (currentTransaction.isActive()
+           && transactionStatus != TXN_STATUS.ROLLBACK) {
+         currentTransaction.rollback();
+       }
+     } finally {
+       openTrasactionCalls = 0;
+       transactionStatus = TXN_STATUS.ROLLBACK;
+       // remove all detached objects from the cache, since the transaction is
+       // being rolled back they are no longer relevant, and this prevents them
+       // from reattaching in future transactions
+       pm.evictAll();
+     }
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+     LOG.debug("Creating catalog " + cat.getName());
+     boolean committed = false;
+     MCatalog mCat = catToMCat(cat);
+     try {
+       openTransaction();
+       pm.makePersistent(mCat);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat)
+       throws MetaException, InvalidOperationException {
+     if (!cat.getName().equals(catName)) {
+       throw new InvalidOperationException("You cannot change a catalog's name");
+     }
+     boolean committed = false;
+     try {
+       MCatalog mCat = getMCatalog(catName);
+       if (org.apache.commons.lang.StringUtils.isNotBlank(cat.getLocationUri())) {
+         mCat.setLocationUri(cat.getLocationUri());
+       }
+       if (org.apache.commons.lang.StringUtils.isNotBlank(cat.getDescription())) {
+         mCat.setDescription(cat.getDescription());
+       }
+       openTransaction();
+       pm.makePersistent(mCat);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     LOG.debug("Fetching catalog " + catalogName);
+     MCatalog mCat = getMCatalog(catalogName);
+     if (mCat == null) {
+       throw new NoSuchObjectException("No catalog " + catalogName);
+     }
+     return mCatToCat(mCat);
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     LOG.debug("Fetching all catalog names");
+     boolean commited = false;
+     List<String> catalogs = null;
+ 
+     String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MCatalog";
+     Query query = null;
+ 
+     openTransaction();
+     try {
+       query = pm.newQuery(queryStr);
+       query.setResult("name");
+       catalogs = new ArrayList<>((Collection<String>) query.execute());
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     Collections.sort(catalogs);
+     return catalogs;
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     LOG.debug("Dropping catalog " + catalogName);
+     boolean committed = false;
+     try {
+       openTransaction();
+       MCatalog mCat = getMCatalog(catalogName);
+       pm.retrieve(mCat);
+       if (mCat == null) {
+         throw new NoSuchObjectException("No catalog " + catalogName);
+       }
+       pm.deletePersistent(mCat);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   private MCatalog getMCatalog(String catalogName) throws MetaException {
+     boolean committed = false;
+     Query query = null;
+     try {
+       openTransaction();
+       catalogName = normalizeIdentifier(catalogName);
+       query = pm.newQuery(MCatalog.class, "name == catname");
+       query.declareParameters("java.lang.String catname");
+       query.setUnique(true);
+       MCatalog mCat = (MCatalog)query.execute(catalogName);
+       pm.retrieve(mCat);
+       committed = commitTransaction();
+       return mCat;
+     } finally {
+       rollbackAndCleanup(committed, query);
+     }
+   }
+ 
+   private MCatalog catToMCat(Catalog cat) {
+     MCatalog mCat = new MCatalog();
+     mCat.setName(normalizeIdentifier(cat.getName()));
+     if (cat.isSetDescription()) {
+       mCat.setDescription(cat.getDescription());
+     }
+     mCat.setLocationUri(cat.getLocationUri());
+     return mCat;
+   }
+ 
+   private Catalog mCatToCat(MCatalog mCat) {
+     Catalog cat = new Catalog(mCat.getName(), mCat.getLocationUri());
+     if (mCat.getDescription() != null) {
+       cat.setDescription(mCat.getDescription());
+     }
+     return cat;
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+     boolean commited = false;
+     MDatabase mdb = new MDatabase();
+     assert db.getCatalogName() != null;
+     mdb.setCatalogName(normalizeIdentifier(db.getCatalogName()));
+     assert mdb.getCatalogName() != null;
+     mdb.setName(db.getName().toLowerCase());
+     mdb.setLocationUri(db.getLocationUri());
+     mdb.setDescription(db.getDescription());
+     mdb.setParameters(db.getParameters());
+     mdb.setOwnerName(db.getOwnerName());
+     PrincipalType ownerType = db.getOwnerType();
+     mdb.setOwnerType((null == ownerType ? PrincipalType.USER.name() : ownerType.name()));
+     try {
+       openTransaction();
+       pm.makePersistent(mdb);
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @SuppressWarnings("nls")
+   private MDatabase getMDatabase(String catName, String name) throws NoSuchObjectException {
+     MDatabase mdb = null;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       name = normalizeIdentifier(name);
+       catName = normalizeIdentifier(catName);
+       query = pm.newQuery(MDatabase.class, "name == dbname && catalogName == catname");
+       query.declareParameters("java.lang.String dbname, java.lang.String catname");
+       query.setUnique(true);
+       mdb = (MDatabase) query.execute(name, catName);
+       pm.retrieve(mdb);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     if (mdb == null) {
+       throw new NoSuchObjectException("There is no database " + catName + "." + name);
+     }
+     return mdb;
+   }
+ 
+   @Override
+   public Database getDatabase(String catalogName, String name) throws NoSuchObjectException {
+     MetaException ex = null;
+     Database db = null;
+     try {
+       db = getDatabaseInternal(catalogName, name);
+     } catch (MetaException e) {
+       // Signature restriction to NSOE, and NSOE being a flat exception prevents us from
+       // setting the cause of the NSOE as the MetaException. We should not lose the info
+       // we got here, but it's very likely that the MetaException is irrelevant and is
+       // actually an NSOE message, so we should log it and throw an NSOE with the msg.
+       ex = e;
+     }
+     if (db == null) {
+       LOG.warn("Failed to get database {}.{}, returning NoSuchObjectException",
+           catalogName, name, ex);
+       throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage())));
+     }
+     return db;
+   }
+ 
+   public Database getDatabaseInternal(String catalogName, String name)
+       throws MetaException, NoSuchObjectException {
+     return new GetDbHelper(catalogName, name, true, true) {
+       @Override
+       protected Database getSqlResult(GetHelper<Database> ctx) throws MetaException {
+         return directSql.getDatabase(catalogName, dbName);
+       }
+ 
+       @Override
+       protected Database getJdoResult(GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
+         return getJDODatabase(catalogName, dbName);
+       }
+     }.run(false);
+    }
+ 
+   public Database getJDODatabase(String catName, String name) throws NoSuchObjectException {
+     MDatabase mdb = null;
+     boolean commited = false;
+     try {
+       openTransaction();
+       mdb = getMDatabase(catName, name);
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+     Database db = new Database();
+     db.setName(mdb.getName());
+     db.setDescription(mdb.getDescription());
+     db.setLocationUri(mdb.getLocationUri());
+     db.setParameters(convertMap(mdb.getParameters()));
+     db.setOwnerName(mdb.getOwnerName());
+     String type = org.apache.commons.lang.StringUtils.defaultIfBlank(mdb.getOwnerType(), null);
+     PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type);
+     db.setOwnerType(principalType);
+     db.setCatalogName(catName);
+     return db;
+   }
+ 
+   /**
+    * Alter the database object in metastore. Currently only the parameters
+    * of the database or the owner can be changed.
+    * @param dbName the database name
+    * @param db the Hive Database object
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    */
+   @Override
+   public boolean alterDatabase(String catName, String dbName, Database db)
+     throws MetaException, NoSuchObjectException {
+ 
+     MDatabase mdb = null;
+     boolean committed = false;
+     try {
+       mdb = getMDatabase(catName, dbName);
+       mdb.setParameters(db.getParameters());
+       mdb.setOwnerName(db.getOwnerName());
+       if (db.getOwnerType() != null) {
+         mdb.setOwnerType(db.getOwnerType().name());
+       }
+       if (org.apache.commons.lang.StringUtils.isNotBlank(db.getDescription())) {
+         mdb.setDescription(db.getDescription());
+       }
+       if (org.apache.commons.lang.StringUtils.isNotBlank(db.getLocationUri())) {
+         mdb.setLocationUri(db.getLocationUri());
+       }
+       openTransaction();
+       pm.makePersistent(mdb);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+         return false;
+       }
+     }
+     return true;
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbname)
+       throws NoSuchObjectException, MetaException {
+     boolean success = false;
+     LOG.info("Dropping database {}.{} along with all tables", catName, dbname);
+     dbname = normalizeIdentifier(dbname);
+     catName = normalizeIdentifier(catName);
+     QueryWrapper queryWrapper = new QueryWrapper();
+     try {
+       openTransaction();
+ 
+       // then drop the database
+       MDatabase db = getMDatabase(catName, dbname);
+       pm.retrieve(db);
+       if (db != null) {
+         List<MDBPrivilege> dbGrants = this.listDatabaseGrants(catName, dbname, null, queryWrapper);
+         if (CollectionUtils.isNotEmpty(dbGrants)) {
+           pm.deletePersistentAll(dbGrants);
+         }
+         pm.deletePersistent(db);
+       }
+       success = commitTransaction();
+     } finally {
+       rollbackAndCleanup(success, queryWrapper);
+     }
+     return success;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+     if (pattern == null || pattern.equals("*")) {
+       return getAllDatabases(catName);
+     }
+     boolean commited = false;
+     List<String> databases = null;
+     Query query = null;
+     try {
+       openTransaction();
+       // Take the pattern and split it on the | to get all the composing
+       // patterns
+       String[] subpatterns = pattern.trim().split("\\|");
+       StringBuilder filterBuilder = new StringBuilder();
+       List<String> parameterVals = new ArrayList<>(subpatterns.length);
+       appendSimpleCondition(filterBuilder, "catalogName", new String[] {catName}, parameterVals);
+       appendPatternCondition(filterBuilder, "name", subpatterns, parameterVals);
+       query = pm.newQuery(MDatabase.class, filterBuilder.toString());
+       query.setResult("name");
+       query.setOrdering("name ascending");
+       Collection<String> names = (Collection<String>) query.executeWithArray(parameterVals.toArray(new String[0]));
+       databases = new ArrayList<>(names);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return databases;
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+     boolean commited = false;
+     List<String> databases = null;
+ 
+     Query query = null;
+     catName = normalizeIdentifier(catName);
+ 
+     openTransaction();
+     try {
+       query = pm.newQuery("select name from org.apache.hadoop.hive.metastore.model.MDatabase " +
+           "where catalogName == catname");
+       query.declareParameters("java.lang.String catname");
+       query.setResult("name");
+       databases = new ArrayList<>((Collection<String>) query.execute(catName));
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     Collections.sort(databases);
+     return databases;
+   }
+ 
+   private MType getMType(Type type) {
+     List<MFieldSchema> fields = new ArrayList<>();
+     if (type.getFields() != null) {
+       for (FieldSchema field : type.getFields()) {
+         fields.add(new MFieldSchema(field.getName(), field.getType(), field
+             .getComment()));
+       }
+     }
+     return new MType(type.getName(), type.getType1(), type.getType2(), fields);
+   }
+ 
+   private Type getType(MType mtype) {
+     List<FieldSchema> fields = new ArrayList<>();
+     if (mtype.getFields() != null) {
+       for (MFieldSchema field : mtype.getFields()) {
+         fields.add(new FieldSchema(field.getName(), field.getType(), field
+             .getComment()));
+       }
+     }
+     Type ret = new Type();
+     ret.setName(mtype.getName());
+     ret.setType1(mtype.getType1());
+     ret.setType2(mtype.getType2());
+     ret.setFields(fields);
+     return ret;
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+     boolean success = false;
+     MType mtype = getMType(type);
+     boolean commited = false;
+     try {
+       openTransaction();
+       pm.makePersistent(mtype);
+       commited = commitTransaction();
+       success = true;
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+     return success;
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+     Type type = null;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       query = pm.newQuery(MType.class, "name == typeName");
+       query.declareParameters("java.lang.String typeName");
+       query.setUnique(true);
+       MType mtype = (MType) query.execute(typeName.trim());
+       pm.retrieve(type);
+       if (mtype != null) {
+         type = getType(mtype);
+       }
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return type;
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+     boolean success = false;
+     Query query = null;
+     try {
+       openTransaction();
+       query = pm.newQuery(MType.class, "name == typeName");
+       query.declareParameters("java.lang.String typeName");
+       query.setUnique(true);
+       MType type = (MType) query.execute(typeName.trim());
+       pm.retrieve(type);
+       if (type != null) {
+         pm.deletePersistent(type);
+       }
+       success = commitTransaction();
+     } catch (JDOObjectNotFoundException e) {
+       success = commitTransaction();
+       LOG.debug("type not found {}", typeName, e);
+     } finally {
+       rollbackAndCleanup(success, query);
+     }
+     return success;
+   }
+ 
+   @Override
+   public List<String> createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints, List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints, List<SQLCheckConstraint> checkConstraints)
+     throws InvalidObjectException, MetaException {
+     boolean success = false;
+     try {
+       openTransaction();
+       createTable(tbl);
+       // Add constraints.
+       // We need not do a deep retrieval of the Table Column Descriptor while persisting the
+       // constraints since this transaction involving create table is not yet committed.
+       List<String> constraintNames = new ArrayList<>();
+       if (foreignKeys != null) {
+         constraintNames.addAll(addForeignKeys(foreignKeys, false, primaryKeys, uniqueConstraints));
+       }
+       if (primaryKeys != null) {
+         constraintNames.addAll(addPrimaryKeys(primaryKeys, false));
+       }
+       if (uniqueConstraints != null) {
+         constraintNames.addAll(addUniqueConstraints(uniqueConstraints, false));
+       }
+       if (notNullConstraints != null) {
+         constraintNames.addAll(addNotNullConstraints(notNullConstraints, false));
+       }
+       if (defaultConstraints != null) {
+         constraintNames.addAll(addDefaultConstraints(defaultConstraints, false));
+       }
+       if (checkConstraints != null) {
+         constraintNames.addAll(addCheckConstraints(checkConstraints, false));
+       }
+       success = commitTransaction();
+       return constraintNames;
+     } finally {
+       if (!success) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     boolean commited = false;
++    MTable mtbl = null;
++
+     try {
+       openTransaction();
+ 
 -      MTable mtbl = convertToMTable(tbl);
++      mtbl = convertToMTable(tbl);
++      if (TxnUtils.isTransactionalTable(tbl)) {
++        mtbl.setWriteId(tbl.getWriteId());
++      }
+       pm.makePersistent(mtbl);
+ 
+       if (tbl.getCreationMetadata() != null) {
+         MCreationMetadata mcm = convertToMCreationMetadata(tbl.getCreationMetadata());
+         pm.makePersistent(mcm);
+       }
+ 
+       PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges();
+       List<Object> toPersistPrivObjs = new ArrayList<>();
+       if (principalPrivs != null) {
+         int now = (int)(System.currentTimeMillis()/1000);
+ 
+         Map<String, List<PrivilegeGrantInfo>> userPrivs = principalPrivs.getUserPrivileges();
+         putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, userPrivs, PrincipalType.USER, "SQL");
+ 
+         Map<String, List<PrivilegeGrantInfo>> groupPrivs = principalPrivs.getGroupPrivileges();
+         putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, groupPrivs, PrincipalType.GROUP, "SQL");
+ 
+         Map<String, List<PrivilegeGrantInfo>> rolePrivs = principalPrivs.getRolePrivileges();
+         putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, rolePrivs, PrincipalType.ROLE, "SQL");
+       }
+       pm.makePersistentAll(toPersistPrivObjs);
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   /**
+    * Convert PrivilegeGrantInfo from privMap to MTablePrivilege, and add all of
+    * them to the toPersistPrivObjs. These privilege objects will be persisted as
+    * part of createTable.
+    *
+    * @param mtbl
+    * @param toPersistPrivObjs
+    * @param now
+    * @param privMap
+    * @param type
+    */
+   private void putPersistentPrivObjects(MTable mtbl, List<Object> toPersistPrivObjs,
+       int now, Map<String, List<PrivilegeGrantInfo>> privMap, PrincipalType type, String authorizer) {
+     if (privMap != null) {
+       for (Map.Entry<String, List<PrivilegeGrantInfo>> entry : privMap
+           .entrySet()) {
+         String principalName = entry.getKey();
+         List<PrivilegeGrantInfo> privs = entry.getValue();
+         for (int i = 0; i < privs.size(); i++) {
+           PrivilegeGrantInfo priv = privs.get(i);
+           if (priv == null) {
+             continue;
+           }
+           MTablePrivilege mTblSec = new MTablePrivilege(
+               principalName, type.toString(), mtbl, priv.getPrivilege(),
+               now, priv.getGrantor(), priv.getGrantorType().toString(), priv
+                   .isGrantOption(), authorizer);
+           toPersistPrivObjs.add(mTblSec);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean materializedView = false;
+     boolean success = false;
+     try {
+       openTransaction();
+       MTable tbl = getMTable(catName, dbName, tableName);
+       pm.retrieve(tbl);
+       if (tbl != null) {
+         materializedView = TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType());
+         // first remove all the grants
+         List<MTablePrivilege> tabGrants = listAllTableGrants(catName, dbName, tableName);
+         if (CollectionUtils.isNotEmpty(tabGrants)) {
+           pm.deletePersistentAll(tabGrants);
+         }
+         List<MTableColumnPrivilege> tblColGrants = listTableAllColumnGrants(catName, dbName,
+             tableName);
+         if (CollectionUtils.isNotEmpty(tblColGrants)) {
+           pm.deletePersistentAll(tblColGrants);
+         }
+ 
+         List<MPartitionPrivilege> partGrants = this.listTableAllPartitionGrants(catName, dbName, tableName);
+         if (CollectionUtils.isNotEmpty(partGrants)) {
+           pm.deletePersistentAll(partGrants);
+         }
+ 
+         List<MPartitionColumnPrivilege> partColGrants = listTableAllPartitionColumnGrants(catName, dbName,
+             tableName);
+         if (CollectionUtils.isNotEmpty(partColGrants)) {
+           pm.deletePersistentAll(partColGrants);
+         }
+         // delete column statistics if present
+         try {
+           deleteTableColumnStatistics(catName, dbName, tableName, null);
+         } catch (NoSuchObjectException e) {
+           LOG.info("Found no table level column statistics associated with {} to delete",
+               TableName.getQualified(catName, dbName, tableName));
+         }
+ 
++        // TODO## remove? unused
++        Table table = convertToTable(tbl);
++
+         List<MConstraint> tabConstraints = listAllTableConstraintsWithOptionalConstraintName(
+                                            catName, dbName, tableName, null);
+         if (CollectionUtils.isNotEmpty(tabConstraints)) {
+           pm.deletePersistentAll(tabConstraints);
+         }
+ 
+         preDropStorageDescriptor(tbl.getSd());
+ 
+         if (materializedView) {
+           dropCreationMetadata(tbl.getDatabase().getCatalogName(),
+               tbl.getDatabase().getName(), tbl.getTableName());
+         }
+ 
+         // then remove the table
+         pm.deletePersistentAll(tbl);
+       }
+       success = commitTransaction();
+     } finally {
+       if (!success) {
+         rollbackTransaction();
+       }
+     }
+     return success;
+   }
+ 
+   private boolean dropCreationMetadata(String catName, String dbName, String tableName) throws MetaException,
+       NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean success = false;
+     dbName = normalizeIdentifier(dbName);
+     tableName = normalizeIdentifier(tableName);
+     try {
+       openTransaction();
+       MCreationMetadata mcm = getCreationMetadata(catName, dbName, tableName);
+       pm.retrieve(mcm);
+       if (mcm != null) {
+         pm.deletePersistentAll(mcm);
+       }
+       success = commitTransaction();
+     } finally {
+       if (!success) {
+         rollbackTransaction();
+       }
+     }
+     return success;
+   }
+ 
+   private List<MConstraint> listAllTableConstraintsWithOptionalConstraintName(
+       String catName, String dbName, String tableName, String constraintname) {
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tableName = normalizeIdentifier(tableName);
+     constraintname = constraintname!=null?normalizeIdentifier(constraintname):null;
+     List<MConstraint> mConstraints = null;
+     List<String> constraintNames = new ArrayList<>();
+     Query query = null;
+ 
+     try {
+       query = pm.newQuery("select constraintName from org.apache.hadoop.hive.metastore.model.MConstraint  where "
+         + "((parentTable.tableName == ptblname && parentTable.database.name == pdbname && " +
+               "parentTable.database.catalogName == pcatname) || "
+         + "(childTable != null && childTable.tableName == ctblname &&" +
+               "childTable.database.name == cdbname && childTable.database.catalogName == ccatname)) " +
+           (constraintname != null ? " && constraintName == constraintname" : ""));
+       query.declareParameters("java.lang.String ptblname, java.lang.String pdbname,"
+           + "java.lang.String pcatname, java.lang.String ctblname, java.lang.String cdbname," +
+           "java.lang.String ccatname" +
+         (constraintname != null ? ", java.lang.String constraintname" : ""));
+       Collection<?> constraintNamesColl =
+         constraintname != null ?
+           ((Collection<?>) query.
+             executeWithArray(tableName, dbName, catName, tableName, dbName, catName, constraintname)):
+           ((Collection<?>) query.
+             executeWithArray(tableName, dbName, catName, tableName, dbName, catName));
+       for (Iterator<?> i = constraintNamesColl.iterator(); i.hasNext();) {
+         String currName = (String) i.next();
+         constraintNames.add(currName);
+       }
+       query = pm.newQuery(MConstraint.class);
+       query.setFilter("param.contains(constraintName)");
+       query.declareParameters("java.util.Collection param");
+       Collection<?> constraints = (Collection<?>)query.execute(constraintNames);
+       mConstraints = new ArrayList<>();
+       for (Iterator<?> i = constraints.iterator(); i.hasNext();) {
+         MConstraint currConstraint = (MConstraint) i.next();
+         mConstraints.add(currConstraint);
+       }
+     } finally {
+       if (query != null) {
+         query.closeAll();
+       }
+     }
+     return mConstraints;
+   }
+ 
++  private static String getFullyQualifiedTableName(String dbName, String tblName) {
++    return ((dbName == null || dbName.isEmpty()) ? "" : "\"" + dbName + "\".\"")
++        + "\"" + tblName + "\"";
++  }
++
++  @Override
++  public Table
++  getTable(String catName, String dbName, String tableName)
++      throws MetaException {
++    return getTable(catName, dbName, tableName, -1, null);
++  }
++
+   @Override
 -  public Table getTable(String catName, String dbName, String tableName) throws MetaException {
++  public Table getTable(String catName, String dbName, String tableName,
++                        long txnId, String writeIdList)
++      throws MetaException {
+     boolean commited = false;
+     Table tbl = null;
+     try {
+       openTransaction();
 -      tbl = convertToTable(getMTable(catName, dbName, tableName));
++      MTable mtable = getMTable(catName, dbName, tableName);
++      tbl = convertToTable(mtable);
+       // Retrieve creation metadata if needed
+       if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) {
+         tbl.setCreationMetadata(
 -            convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
++                convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
++      }
++
++      // If transactional non partitioned table,
++      // check whether the current version table statistics
++      // in the metastore comply with the client query's snapshot isolation.
++      // Note: a partitioned table has table stats and table snapshot in MPartiiton.
++      if (writeIdList != null) {
++        boolean isTxn = tbl != null && TxnUtils.isTransactionalTable(tbl);
++        if (isTxn && !areTxnStatsSupported) {
++          StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
++          LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters.");
++        } else if (isTxn && tbl.getPartitionKeysSize() == 0) {
++          if (isCurrentStatsValidForTheQuery(mtable, txnId, writeIdList, false)) {
++            tbl.setIsStatsCompliant(true);
++          } else {
++            tbl.setIsStatsCompliant(false);
++            // Do not make persistent the following state since it is the query specific (not global).
++            StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
++            LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters.");
++          }
++        }
+       }
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+     return tbl;
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern)
+       throws MetaException {
+     return getTables(catName, dbName, pattern, null);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType)
+       throws MetaException {
+     try {
+       // We only support pattern matching via jdo since pattern matching in Java
+       // might be different than the one used by the metastore backends
+       return getTablesInternal(catName, dbName, pattern, tableType,
+           (pattern == null || pattern.equals(".*")), true);
+     } catch (NoSuchObjectException e) {
+       throw new MetaException(ExceptionUtils.getStackTrace(e));
+     }
+   }
+ 
+   @Override
+   public List<TableName> getTableNamesWithStats() throws MetaException, NoSuchObjectException {
+     return new GetListHelper<TableName>(null, null, null, true, false) {
+       @Override
+       protected List<TableName> getSqlResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         return directSql.getTableNamesWithStats();
+       }
+ 
+       @Override
+       protected List<TableName> getJdoResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         throw new UnsupportedOperationException("UnsupportedOperationException"); // TODO: implement?
+       }
+     }.run(false);
+   }
+ 
+   @Override
+   public Map<String, List<String>> getPartitionColsWithStats(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException {
+     return new GetHelper<Map<String, List<String>>>(catName, dbName, null, true, false) {
+       @Override
+       protected Map<String, List<String>> getSqlResult(
+           GetHelper<Map<String, List<String>>> ctx) throws MetaException {
+         try {
+           return directSql.getColAndPartNamesWithStats(catName, dbName, tableName);
+         } catch (Throwable ex) {
+           LOG.error("DirectSQL failed", ex);
+           throw new MetaException(ex.getMessage());
+         }
+       }
+ 
+       @Override
+       protected Map<String, List<String>> getJdoResult(
+           GetHelper<Map<String, List<String>>> ctx) throws MetaException {
+         throw new UnsupportedOperationException("UnsupportedOperationException"); // TODO: implement?
+       }
+ 
+       @Override
+       protected String describeResult() {
+         return results.size() + " partitions";
+       }
+     }.run(false);
+   }
+ 
+   @Override
+   public List<TableName> getAllTableNamesForStats() throws MetaException, NoSuchObjectException {
+     return new GetListHelper<TableName>(null, null, null, true, false) {
+       @Override
+       protected List<TableName> getSqlResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         return directSql.getAllTableNamesForStats();
+       }
+ 
+       @Override
+       protected List<TableName> getJdoResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         boolean commited = false;
+         Query query = null;
+         List<TableName> result = new ArrayList<>();
+         openTransaction();
+         try {
+           String paramStr = "", whereStr = "";
+           for (int i = 0; i < MetaStoreDirectSql.STATS_TABLE_TYPES.length; ++i) {
+             if (i != 0) {
+               paramStr += ", ";
+               whereStr += "||";
+             }
+             paramStr += "java.lang.String tt" + i;
+             whereStr += " tableType == tt" + i;
+           }
+           query = pm.newQuery(MTable.class, whereStr);
+           query.declareParameters(paramStr);
+           @SuppressWarnings("unchecked")
+           Collection<MTable> tbls = (Collection<MTable>) query.executeWithArray(
+               query, MetaStoreDirectSql.STATS_TABLE_TYPES);
+           pm.retrieveAll(tbls);
+           for (MTable tbl : tbls) {
+             result.add(new TableName(
+                 tbl.getDatabase().getCatalogName(), tbl.getDatabase().getName(), tbl.getTableName()));
+           }
+           commited = commitTransaction();
+         } finally {
+           rollbackAndCleanup(commited, query);
+         }
+         return result;
+       }
+     }.run(false);
+   }
+ 
+   protected List<String> getTablesInternal(String catName, String dbName, String pattern,
+                                            TableType tableType, boolean allowSql, boolean allowJdo)
+       throws MetaException, NoSuchObjectException {
+     final String db_name = normalizeIdentifier(dbName);
+     final String cat_name = normalizeIdentifier(catName);
+     return new GetListHelper<String>(cat_name, dbName, null, allowSql, allowJdo) {
+       @Override
+       protected List<String> getSqlResult(GetHelper<List<String>> ctx)
+               throws MetaException {
+         return directSql.getTables(cat_name, db_name, tableType);
+       }
+ 
+       @Override
+       protected List<String> getJdoResult(GetHelper<List<String>> ctx)
+               throws MetaException, NoSuchObjectException {
+         return getTablesInternalViaJdo(cat_name, db_name, pattern, tableType);
+       }
+     }.run(false);
+   }
+ 
+   private List<String> getTablesInternalViaJdo(String catName, String dbName, String pattern,
+                                                TableType tableType) throws MetaException {
+     boolean commited = false;
+     Query query = null;
+     List<String> tbls = null;
+     try {
+       openTransaction();
+       dbName = normalizeIdentifier(dbName);
+       // Take the pattern and split it on the | to get all the composing
+       // patterns
+       List<String> parameterVals = new ArrayList<>();
+       StringBuilder filterBuilder = new StringBuilder();
+       //adds database.name == dbName to the filter
+       appendSimpleCondition(filterBuilder, "database.name", new String[] {dbName}, parameterVals);
+       appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals);
+       if(pattern != null) {
+         appendPatternCondition(filterBuilder, "tableName", pattern, parameterVals);
+       }
+       if(tableType != null) {
+         appendPatternCondition(filterBuilder, "tableType", new String[] {tableType.toString()}, parameterVals);
+       }
+ 
+       query = pm.newQuery(MTable.class, filterBuilder.toString());
+       query.setResult("tableName");
+       query.setOrdering("tableName ascending");
+       Collection<String> names = (Collection<String>) query.executeWithArray(parameterVals.toArray(new String[0]));
+       tbls = new ArrayList<>(names);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return tbls;
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     final String db_name = normalizeIdentifier(dbName);
+     catName = normalizeIdentifier(catName);
+     boolean commited = false;
+     Query<?> query = null;
+     List<String> tbls = null;
+     try {
+       openTransaction();
+       dbName = normalizeIdentifier(dbName);
+       query = pm.newQuery(MTable.class,
+           "database.name == db && database.catalogName == cat && tableType == tt && rewriteEnabled == re");
+       query.declareParameters(
+           "java.lang.String db, java.lang.String cat, java.lang.String tt, boolean re");
+       query.setResult("tableName");
+       Collection<String> names = (Collection<String>) query.executeWithArray(
+           db_name, catName, TableType.MATERIALIZED_VIEW.toString(), true);
+       tbls = new ArrayList<>(names);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return tbls;
+   }
+ 
+   @Override
+   public int getDatabaseCount() throws MetaException {
+     return getObjectCount("name", MDatabase.class.getName());
+   }
+ 
+   @Override
+   public int getPartitionCount() throws MetaException {
+     return getObjectCount("partitionName", MPartition.class.getName());
+   }
+ 
+   @Override
+   public int getTableCount() throws MetaException {
+     return getObjectCount("tableName", MTable.class.getName());
+   }
+ 
+   private int getObjectCount(String fieldName, String objName) {
+     Long result = 0L;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       String queryStr =
+         "select count(" + fieldName + ") from " + objName;
+       query = pm.newQuery(queryStr);
+       result = (Long) query.execute();
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return result.intValue();
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
+                                       List<String> tableTypes) throws MetaException {
+ 
+     boolean commited = false;
+     Query query = null;
+     List<TableMeta> metas = new ArrayList<>();
+     try {
+       openTransaction();
+       // Take the pattern and split it on the | to get all the composing
+       // patterns
+       StringBuilder filterBuilder = new StringBuilder();
+       List<String> parameterVals = new ArrayList<>();
+       appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals);
+       if (dbNames != null && !dbNames.equals("*")) {
+         appendPatternCondition(filterBuilder, "database.name", dbNames, parameterVals);
+       }
+       if (tableNames != null && !tableNames.equals("*")) {
+         appendPatternCondition(filterBuilder, "tableName", tableNames, parameterVals);
+       }
+       if (tableTypes != null && !tableTypes.isEmpty()) {
+         appendSimpleCondition(filterBuilder, "tableType", tableTypes.toArray(new String[0]), parameterVals);
+       }
+ 
+       if (LOG.isDebugEnabled()) {
+         LOG.debug("getTableMeta with filter " + filterBuilder.toString() + " params: " +
+             StringUtils.join(parameterVals, ", "));
+       }
+       query = pm.newQuery(MTable.class, filterBuilder.toString());
+       Collection<MTable> tables = (Collection<MTable>) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()]));
+       for (MTable table : tables) {
+         TableMeta metaData = new TableMeta(
+             table.getDatabase().getName(), table.getTableName(), table.getTableType());
+         metaData.setComments(table.getParameters().get("comment"));
+         metas.add(metaData);
+       }
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return metas;
+   }
+ 
+   private StringBuilder appendPatternCondition(StringBuilder filterBuilder, String fieldName,
+       String[] elements, List<String> parameterVals) {
+     return appendCondition(filterBuilder, fieldName, elements, true, parameterVals);
+   }
+ 
+   private StringBuilder appendPatternCondition(StringBuilder builder,
+       String fieldName, String elements, List<String> parameters) {
+       elements = normalizeIdentifier(elements);
+     return appendCondition(builder, fieldName, elements.split("\\|"), true, parameters);
+   }
+ 
+   private StringBuilder appendSimpleCondition(StringBuilder builder,
+       String fieldName, String[] elements, List<String> parameters) {
+     return appendCondition(builder, fieldName, elements, false, parameters);
+   }
+ 
+   private StringBuilder appendCondition(StringBuilder builder,
+       String fieldName, String[] elements, boolean pattern, List<String> parameters) {
+     if (builder.length() > 0) {
+       builder.append(" && ");
+     }
+     builder.append(" (");
+     int length = builder.length();
+     for (String element : elements) {
+       if (pattern) {
+         element = "(?i)" + element.replaceAll("\\*", ".*");
+       }
+       parameters.add(element);
+       if (builder.length() > length) {
+         builder.append(" || ");
+       }
+       builder.append(fieldName);
+       if (pattern) {
+         builder.append(".matches(").append(JDO_PARAM).append(parameters.size()).append(")");
+       } else {
+         builder.append(" == ").append(JDO_PARAM).append(parameters.size());
+       }
+     }
+     builder.append(" )");
+     return builder;
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+     return getTables(catName, dbName, ".*");
+   }
+ 
+   class AttachedMTableInfo {
+     MTable mtbl;
+     MColumnDescriptor mcd;
+ 
+     public AttachedMTableInfo() {}
+ 
+     public AttachedMTableInfo(MTable mtbl, MColumnDescriptor mcd) {
+       this.mtbl = mtbl;
+       this.mcd = mcd;
+     }
+   }
+ 
+   private AttachedMTableInfo getMTable(String catName, String db, String table,
+                                        boolean retrieveCD) {
+     AttachedMTableInfo nmtbl = new AttachedMTableInfo();
+     MTable mtbl = null;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       catName = normalizeIdentifier(catName);
+       db = normalizeIdentifier(db);
+       table = normalizeIdentifier(table);
+       query = pm.newQuery(MTable.class,
+           "tableName == table && database.name == db && database.catalogName == catname");
+       query.declareParameters(
+           "java.lang.String table, java.lang.String db, java.lang.String catname");
+       query.setUnique(true);
+       LOG.debug("Executing getMTable for " +
+           TableName.getQualified(catName, db, table));
+       mtbl = (MTable) query.execute(table, db, catName);
+       pm.retrieve(mtbl);
+       // Retrieving CD can be expensive and unnecessary, so do it only when required.
+       if (mtbl != null && retrieveCD) {
+         pm.retrieve(mtbl.getSd());
+         pm.retrieveAll(mtbl.getSd().getCD());
+         nmtbl.mcd = mtbl.getSd().getCD();
+       }
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     nmtbl.mtbl = mtbl;
+     return nmtbl;
+   }
+ 
+   private MCreationMetadata getCreationMetadata(String catName, String dbName, String tblName) {
+     boolean commited = false;
+     MCreationMetadata mcm = null;
+     Query query = null;
+     try {
+       openTransaction();
+       query = pm.newQuery(
+           MCreationMetadata.class, "tblName == table && dbName == db && catalogName == cat");
+       query.declareParameters("java.lang.String table, java.lang.String db, java.lang.String cat");
+       query.setUnique(true);
+       mcm = (MCreationMetadata) query.execute(tblName, dbName, catName);
+       pm.retrieve(mcm);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return mcm;
+   }
+ 
+   private MTable getMTable(String catName, String db, String table) {
+     AttachedMTableInfo nmtbl = getMTable(catName, db, table, false);
+     return nmtbl.mtbl;
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String db, List<String> tbl_names)
+       throws MetaException, UnknownDBException {
+     List<Table> tables = new ArrayList<>();
+     boolean committed = false;
+     Query dbExistsQuery = null;
+     Query query = null;
+     try {
+       openTransaction();
+       db = normalizeIdentifier(db);
+       catName = normalizeIdentifier(catName);
+ 
+       List<String> lowered_tbl_names = new ArrayList<>(tbl_names.size());
+       for (String t : tbl_names) {
+         lowered_tbl_names.add(normalizeIdentifier(t));
+       }
+       query = pm.newQuery(MTable.class);
+       query.setFilter("database.name == db && database.catalogName == cat && tbl_names.contains(tableName)");
+       query.declareParameters("java.lang.String db, java.lang.String cat, java.util.Collection tbl_names");
+       Collection mtables = (Collection) query.execute(db, catName, lowered_tbl_names);
+       if (mtables == null || mtables.isEmpty()) {
+         // Need to differentiate between an unmatched pattern and a non-existent database
+         dbExistsQuery = pm.newQuery(MDatabase.class, "name == db && catalogName == cat");
+         dbExistsQuery.declareParameters("java.lang.String db, java.lang.String cat");
+         dbExistsQuery.setUnique(true);
+         dbExistsQuery.setResult("name");
+         String dbNameIfExists = (String) dbExistsQuery.execute(db, catName);
+         if (org.apache.commons.lang.StringUtils.isEmpty(dbNameIfExists)) {
+           throw new UnknownDBException("Could not find database " +
+               DatabaseName.getQualified(catName, db));
+         }
+       } else {
+         for (Iterator iter = mtables.iterator(); iter.hasNext(); ) {
+           Table tbl = convertToTable((MTable) iter.next());
+           // Retrieve creation metadata if needed
+           if (TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) {
+             tbl.setCreationMetadata(
+                 convertToCreationMetadata(
+                     getCreationMetadata(tbl.getCatName(), tbl.getDbName(), tbl.getTableName())));
+           }
+           tables.add(tbl);
+         }
+       }
+       committed = commitTransaction();
+     } finally {
+       rollbackAndCleanup(committed, query);
+       if (dbExistsQuery != null) {
+         dbExistsQuery.closeAll();
+       }
+     }
+     return tables;
+   }
+ 
+   /** Makes shallow copy of a list to avoid DataNucleus mucking with our objects. */
+   private <T> List<T> convertList(List<T> dnList) {
+     return (dnList == null) ? null : Lists.newArrayList(dnList);
+   }
+ 
+   /** Makes shallow copy of a map to avoid DataNucleus mucking with our objects. */
+   private Map<String, String> convertMap(Map<String, String> dnMap) {
+     return MetaStoreUtils.trimMapNulls(dnMap,
+         MetastoreConf.getBoolVar(getConf(), ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS));
+   }
+ 
+   private Table convertToTable(MTable mtbl) throws MetaException {
+     if (mtbl == null) {
+       return null;
+     }
+     String tableType = mtbl.getTableType();
+     if (tableType == null) {
+       // for backwards compatibility with old metastore persistence
+       if (mtbl.getViewOriginalText() != null) {
+         tableType = TableType.VIRTUAL_VIEW.toString();
+       } else if (Boolean.parseBoolean(mtbl.getParameters().get("EXTERNAL"))) {
+         tableType = TableType.EXTERNAL_TABLE.toString();
+       } else {
+         tableType = TableType.MANAGED_TABLE.toString();
+       }
+     }
+     final Table t = new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl
+         .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl
+         .getRetention(), convertToStorageDescriptor(mtbl.getSd()),
+         convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()),
+         mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType);
+ 
+     if (Strings.isNullOrEmpty(mtbl.getOwnerType())) {
+       // Before the ownerType exists in an old Hive schema, USER was the default type for owner.
+       // Let's set the default to USER to keep backward compatibility.
+       t.setOwnerType(PrincipalType.USER);
+     } else {
+       t.setOwnerType(PrincipalType.valueOf(mtbl.getOwnerType()));
+     }
+ 
+     t.setRewriteEnabled(mtbl.isRewriteEnabled());
+     t.setCatName(mtbl.getDatabase().getCatalogName());
++    t.setWriteId(mtbl.getWriteId());
+     return t;
+   }
+ 
+   private MTable convertToMTable(Table tbl) throws InvalidObjectException,
+       MetaException {
++    // NOTE: we don't set writeId in this method. Write ID is only set after validating the
++    //       existing write ID against the caller's valid list.
+     if (tbl == null) {
+       return null;
+     }
+     MDatabase mdb = null;
+     String catName = tbl.isSetCatName() ? tbl.getCatName() : getDefaultCatalog(conf);
+     try {
+       mdb = getMDatabase(catName, tbl.getDbName());
+     } catch (NoSuchObjectException e) {
+       LOG.error("Could not convert to MTable", e);
+       throw new InvalidObjectException("Database " +
+           DatabaseName.getQualified(catName, tbl.getDbName()) + " doesn't exist.");
+     }
+ 
+     // If the table has property EXTERNAL set, update table type
+     // accordingly
+     String tableType = tbl.getTableType();
+     boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL"));
+     if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
+       if (isExternal) {
+         tableType = TableType.EXTERNAL_TABLE.toString();
+       }
+     }
+     if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
+       if (!isExternal) {
+         tableType = TableType.MANAGED_TABLE.toString();
+       }
+     }
+ 
+     PrincipalType ownerPrincipalType = tbl.getOwnerType();
+     String ownerType = (ownerPrincipalType == null) ? PrincipalType.USER.name() : ownerPrincipalType.name();
+ 
+     // A new table is always created with a new column descriptor
 -    return new MTable(normalizeIdentifier(tbl.getTableName()), mdb,
++    MTable mtable = new MTable(normalizeIdentifier(tbl.getTableName()), mdb,
+         convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), ownerType, tbl
+         .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
+         convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
+         tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(),
+         tableType);
++    return mtable;
+   }
+ 
+   private List<MFieldSchema> convertToMFieldSchemas(List<FieldSchema> keys) {
+     List<MFieldSchema> mkeys = null;
+     if (keys != null) {
+       mkeys = new ArrayList<>(keys.size());
+       for (FieldSchema part : keys) {
+         mkeys.add(new MFieldSchema(part.getName().toLowerCase(),
+             part.getType(), part.getComment()));
+       }
+     }
+     return mkeys;
+   }
+ 
+   private List<FieldSchema> convertToFieldSchemas(List<MFieldSchema> mkeys) {
+     List<FieldSchema> keys = null;
+     if (mkeys != null) {
+       keys = new ArrayList<>(mkeys.size());
+       for (MFieldSchema part : mkeys) {
+         keys.add(new FieldSchema(part.getName(), part.getType(), part
+             .getComment()));
+       }
+     }
+     return keys;
+   }
+ 
+   private List<MOrder> convertToMOrders(List<Order> keys) {
+     List<MOrder> mkeys = null;
+     if (keys != null) {
+       mkeys = new ArrayList<>(keys.size());
+       for (Order part : keys) {
+         mkeys.add(new MOrder(normalizeIdentifier(part.getCol()), part.getOrder()));
+       }
+     }
+     return mkeys;
+   }
+ 
+   private List<Order> convertToOrders(List<MOrder> mkeys) {
+     List<Order> keys = null;
+     if (mkeys != null) {
+       keys = new ArrayList<>(mkeys.size());
+       for (MOrder part : mkeys) {
+         keys.add(new Order(part.getCol(), part.getOrder()));
+       }
+     }
+     return keys;
+   }
+ 
+   private SerDeInfo convertToSerDeInfo(MSerDeInfo ms) throws MetaException {
+     if (ms == null) {
+       throw new MetaException("Invalid SerDeInfo object");
+     }
+     SerDeInfo serde =
+         new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
+     if (ms.getDescription() != null) {
+       serde.setDescription(ms.getDescription());
+     }
+     if (ms.getSerializerClass() != null) {
+       serde.setSerializerClass(ms.getSerializerClass());
+     }
+     if (ms.getDeserializerClass() != null) {
+       serde.setDeserializerClass(ms.getDeserializerClass());
+     }
+     if (ms.getSerdeType() > 0) {
+       serde.setSerdeType(SerdeType.findByValue(ms.getSerdeType()));
+     }
+     return serde;
+   }
+ 
+   private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException {
+     if (ms == null) {
+       throw new MetaException("Invalid SerDeInfo object");
+     }
+     return new MSerDeInfo(ms.getName(), ms.getSerializationLib(), ms.getParameters(),
+         ms.getDescription(), ms.getSerializerClass(), ms.getDeserializerClass(),
+         ms.getSerdeType() == null ? 0 : ms.

<TRUNCATED>

[16/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/parquet_map_type_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/parquet_map_type_vectorization.q.out b/ql/src/test/results/clientpositive/llap/parquet_map_type_vectorization.q.out
index 6f65061..b278ecc 100644
--- a/ql/src/test/results/clientpositive/llap/parquet_map_type_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/llap/parquet_map_type_vectorization.q.out
@@ -230,31 +230,40 @@ STAGE PLANS:
                           projectedOutputColumnNums: [8, 9, 10]
                           selectExpressions: VectorUDFMapIndexStringScalar(col 1:map<string,string>, key: k1) -> 8:string, VectorUDFMapIndexLongScalar(col 2:map<int,int>, key: 123) -> 9:int, VectorUDFMapIndexDoubleScalar(col 3:map<double,double>, key: 123.123) -> 10:double
                       Statistics: Num rows: 511 Data size: 995378 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: sum(_col1), sum(_col2)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFSumLong(col 9:int) -> bigint, VectorUDAFSumDouble(col 10:double) -> double
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            keyExpressions: col 8:string
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: [0, 1]
+                      Top N Key Operator
+                        sort order: +
                         keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 511 Data size: 995378 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkStringOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        top n: 10
+                        Top N Key Vectorization:
+                            className: VectorTopNKeyOperator
+                            keyExpressions: col 8:string
+                            native: true
+                        Group By Operator
+                          aggregations: sum(_col1), sum(_col2)
+                          Group By Vectorization:
+                              aggregators: VectorUDAFSumLong(col 9:int) -> bigint, VectorUDAFSumDouble(col 10:double) -> double
+                              className: VectorGroupByOperator
+                              groupByMode: HASH
+                              keyExpressions: col 8:string
+                              native: false
+                              vectorProcessingMode: HASH
+                              projectedOutputColumnNums: [0, 1]
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
                           Statistics: Num rows: 511 Data size: 995378 Basic stats: COMPLETE Column stats: NONE
-                          TopN Hash Memory Usage: 0.1
-                          value expressions: _col1 (type: bigint), _col2 (type: double)
+                          Reduce Output Operator
+                            key expressions: _col0 (type: string)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: string)
+                            Reduce Sink Vectorization:
+                                className: VectorReduceSinkStringOperator
+                                native: true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            Statistics: Num rows: 511 Data size: 995378 Basic stats: COMPLETE Column stats: NONE
+                            TopN Hash Memory Usage: 0.1
+                            value expressions: _col1 (type: bigint), _col2 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: all inputs (cache only)
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/parquet_struct_type_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/parquet_struct_type_vectorization.q.out b/ql/src/test/results/clientpositive/llap/parquet_struct_type_vectorization.q.out
index affb27e..fec8093 100644
--- a/ql/src/test/results/clientpositive/llap/parquet_struct_type_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/llap/parquet_struct_type_vectorization.q.out
@@ -238,31 +238,40 @@ STAGE PLANS:
                           projectedOutputColumnNums: [4]
                           selectExpressions: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int
                       Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: sum(_col0)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFSumLong(col 4:int) -> bigint
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            keyExpressions: col 4:int
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: [0]
+                      Top N Key Operator
+                        sort order: +
                         keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkLongOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        top n: 10
+                        Top N Key Vectorization:
+                            className: VectorTopNKeyOperator
+                            keyExpressions: col 4:int
+                            native: true
+                        Group By Operator
+                          aggregations: sum(_col0)
+                          Group By Vectorization:
+                              aggregators: VectorUDAFSumLong(col 4:int) -> bigint
+                              className: VectorGroupByOperator
+                              groupByMode: HASH
+                              keyExpressions: col 4:int
+                              native: false
+                              vectorProcessingMode: HASH
+                              projectedOutputColumnNums: [0]
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
                           Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
-                          TopN Hash Memory Usage: 0.1
-                          value expressions: _col1 (type: bigint)
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Reduce Sink Vectorization:
+                                className: VectorReduceSinkLongOperator
+                                native: true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
+                            TopN Hash Memory Usage: 0.1
+                            value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs (cache only)
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/topnkey.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/topnkey.q.out b/ql/src/test/results/clientpositive/llap/topnkey.q.out
new file mode 100644
index 0000000..c1d8874
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/topnkey.q.out
@@ -0,0 +1,318 @@
+PREHOOK: query: EXPLAIN
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), UDFToInteger(substr(value, 5)) (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Top N Key Operator
+                      sort order: +
+                      keys: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 5
+                      Group By Operator
+                        aggregations: sum(_col1)
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 475 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 475 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0
+10	10
+100	200
+103	206
+104	208
+PREHOOK: query: EXPLAIN
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Top N Key Operator
+                      sort order: +
+                      keys: key (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 5
+                      Group By Operator
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0
+10
+100
+103
+104
+PREHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  filterExpr: key is not null (type: boolean)
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src2
+                  filterExpr: key is not null (type: boolean)
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string), _col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
index f801856..8c74a92 100644
--- a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
@@ -141,31 +141,40 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumnNums: [2]
                     Statistics: Num rows: 1049 Data size: 4196 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(50), count(), sum(50.0D), count(50.0D), sum(50), count(50)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(ConstantVectorExpression(val 50) -> 12:int) -> bigint, VectorUDAFCountStar(*) -> bigint, VectorUDAFSumDouble(ConstantVectorExpression(val 50.0) -> 13:double) -> double, VectorUDAFCount(ConstantVectorExpression(val 50.0) -> 14:double) -> bigint, VectorUDAFSumDecimal(ConstantVectorExpression(val 50) -> 15:decimal(10,0)) -> decimal(20,0), VectorUDAFCount(ConstantVectorExpression(val 50) -> 16:decimal(10,0)) -> bigint
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 2:int
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0, 1, 2, 3, 4, 5]
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                      Statistics: Num rows: 257 Data size: 40092 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      Statistics: Num rows: 1049 Data size: 4196 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 2:int
+                          native: true
+                      Group By Operator
+                        aggregations: sum(50), count(), sum(50.0D), count(50.0D), sum(50), count(50)
+                        Group By Vectorization:
+                            aggregators: VectorUDAFSumLong(ConstantVectorExpression(val 50) -> 12:int) -> bigint, VectorUDAFCountStar(*) -> bigint, VectorUDAFSumDouble(ConstantVectorExpression(val 50.0) -> 13:double) -> double, VectorUDAFCount(ConstantVectorExpression(val 50.0) -> 14:double) -> bigint, VectorUDAFSumDecimal(ConstantVectorExpression(val 50) -> 15:decimal(10,0)) -> decimal(20,0), VectorUDAFCount(ConstantVectorExpression(val 50) -> 16:decimal(10,0)) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 2:int
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0, 1, 2, 3, 4, 5]
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                         Statistics: Num rows: 257 Data size: 40092 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint), _col2 (type: bigint), _col3 (type: double), _col4 (type: bigint), _col5 (type: decimal(12,0)), _col6 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkLongOperator
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 257 Data size: 40092 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint), _col2 (type: bigint), _col3 (type: double), _col4 (type: bigint), _col5 (type: decimal(12,0)), _col6 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
index 73e8060..b58de03 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
@@ -102,31 +102,40 @@ STAGE PLANS:
                         projectedOutputColumnNums: [1, 3]
                         selectExpressions: CastStringToLong(col 0:char(10)) -> 3:int
                     Statistics: Num rows: 501 Data size: 89178 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(_col1), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 3:int) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 1:char(20)
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0, 1]
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: char(20))
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: char(20))
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: char(20))
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      Statistics: Num rows: 501 Data size: 89178 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 5
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 1:char(20)
+                          native: true
+                      Group By Operator
+                        aggregations: sum(_col1), count()
+                        Group By Vectorization:
+                            aggregators: VectorUDAFSumLong(col 3:int) -> bigint, VectorUDAFCountStar(*) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 1:char(20)
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0, 1]
+                        keys: _col0 (type: char(20))
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint), _col2 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: char(20))
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: char(20))
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkStringOperator
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint), _col2 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -298,31 +307,40 @@ STAGE PLANS:
                         projectedOutputColumnNums: [1, 3]
                         selectExpressions: CastStringToLong(col 0:char(10)) -> 3:int
                     Statistics: Num rows: 501 Data size: 89178 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(_col1), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 3:int) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 1:char(20)
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0, 1]
+                    Top N Key Operator
+                      sort order: -
                       keys: _col0 (type: char(20))
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: char(20))
-                        sort order: -
-                        Map-reduce partition columns: _col0 (type: char(20))
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      Statistics: Num rows: 501 Data size: 89178 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 5
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 1:char(20)
+                          native: true
+                      Group By Operator
+                        aggregations: sum(_col1), count()
+                        Group By Vectorization:
+                            aggregators: VectorUDAFSumLong(col 3:int) -> bigint, VectorUDAFCountStar(*) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 1:char(20)
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0, 1]
+                        keys: _col0 (type: char(20))
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint), _col2 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: char(20))
+                          sort order: -
+                          Map-reduce partition columns: _col0 (type: char(20))
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkStringOperator
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint), _col2 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out
index bddde5f..1f49804 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out
@@ -68,33 +68,42 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumnNums: [0, 1]
                     Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0]
+                    Top N Key Operator
+                      sort order: +++
                       keys: a (type: string), b (type: string), 0L (type: bigint)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                        sort order: +++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            keyColumnNums: [0, 1, 2]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: [3]
+                      Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
+                      top n: 10
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
+                          native: true
+                      Group By Operator
+                        aggregations: count()
+                        Group By Vectorization:
+                            aggregators: VectorUDAFCountStar(*) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 5:bigint
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: a (type: string), b (type: string), 0L (type: bigint)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col3 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
+                          sort order: +++
+                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkMultiKeyOperator
+                              keyColumnNums: [0, 1, 2]
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              valueColumnNums: [3]
+                          Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col3 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -111,7 +120,7 @@ STAGE PLANS:
                     includeColumns: [0, 1]
                     dataColumns: a:string, b:string, c:string
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [bigint, bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -269,33 +278,42 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumnNums: [0, 1]
                     Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0]
+                    Top N Key Operator
+                      sort order: +++
                       keys: a (type: string), b (type: string), 0L (type: bigint)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                        sort order: +++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            keyColumnNums: [0, 1, 2]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: [3]
+                      Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
+                      top n: 10
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
+                          native: true
+                      Group By Operator
+                        aggregations: count()
+                        Group By Vectorization:
+                            aggregators: VectorUDAFCountStar(*) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 5:bigint
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: a (type: string), b (type: string), 0L (type: bigint)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col3 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
+                          sort order: +++
+                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkMultiKeyOperator
+                              keyColumnNums: [0, 1, 2]
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              valueColumnNums: [3]
+                          Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col3 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -312,7 +330,7 @@ STAGE PLANS:
                     includeColumns: [0, 1]
                     dataColumns: a:string, b:string, c:string
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [bigint, bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -470,33 +488,42 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumnNums: [0, 1]
                     Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0]
+                    Top N Key Operator
+                      sort order: +++
                       keys: a (type: string), b (type: string), 0L (type: bigint)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                        sort order: +++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            keyColumnNums: [0, 1, 2]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: [3]
+                      Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
+                      top n: 10
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
+                          native: true
+                      Group By Operator
+                        aggregations: count()
+                        Group By Vectorization:
+                            aggregators: VectorUDAFCountStar(*) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 5:bigint
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: a (type: string), b (type: string), 0L (type: bigint)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col3 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
+                          sort order: +++
+                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkMultiKeyOperator
+                              keyColumnNums: [0, 1, 2]
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              valueColumnNums: [3]
+                          Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col3 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -513,7 +540,7 @@ STAGE PLANS:
                     includeColumns: [0, 1]
                     dataColumns: a:string, b:string, c:string
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [bigint, bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -671,30 +698,39 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumnNums: [0, 1, 2]
                     Statistics: Num rows: 6 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:string, col 1:string, col 2:string, ConstantVectorExpression(val 0) -> 4:bigint
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
+                    Top N Key Operator
+                      sort order: ++++
                       keys: a (type: string), b (type: string), c (type: string), 0L (type: bigint)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 18 Data size: 9936 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint)
-                        sort order: ++++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            keyColumnNums: [0, 1, 2, 3]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: []
+                      Statistics: Num rows: 6 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
+                      top n: 10
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 0:string, col 1:string, col 2:string, ConstantVectorExpression(val 0) -> 4:bigint
+                          native: true
+                      Group By Operator
+                        Group By Vectorization:
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 0:string, col 1:string, col 2:string, ConstantVectorExpression(val 0) -> 5:bigint
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: []
+                        keys: a (type: string), b (type: string), c (type: string), 0L (type: bigint)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 18 Data size: 9936 Basic stats: COMPLETE Column stats: NONE
-                        TopN Hash Memory Usage: 0.1
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint)
+                          sort order: ++++
+                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkMultiKeyOperator
+                              keyColumnNums: [0, 1, 2, 3]
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              valueColumnNums: []
+                          Statistics: Num rows: 18 Data size: 9936 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -711,7 +747,7 @@ STAGE PLANS:
                     includeColumns: [0, 1, 2]
                     dataColumns: a:string, b:string, c:string
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [bigint, bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -866,30 +902,39 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumnNums: [0]
                     Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:string
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
+                    Top N Key Operator
+                      sort order: +
                       keys: a (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
                       Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkStringOperator
-                            keyColumnNums: [0]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: []
+                      top n: 10
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 0:string
+                          native: true
+                      Group By Operator
+                        Group By Vectorization:
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 0:string
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: []
+                        keys: a (type: string)
+                        mode: hash
+                        outputColumnNames: _col0
                         Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-                        TopN Hash Memory Usage: 0.1
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkStringOperator
+                              keyColumnNums: [0]
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              valueColumnNums: []
+                          Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -1048,33 +1093,42 @@ STAGE PLANS:
                         projectedOutputColumnNums: [6]
                         selectExpressions: DoubleColAddDoubleColumn(col 4:double, col 5:double)(children: CastStringToDouble(col 0:string) -> 4:double, CastStringToDouble(col 1:string) -> 5:double) -> 6:double
                     Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 6:double
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0]
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: double)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: double)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: double)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            keyColumnNums: [0]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: [1]
+                      top n: 10
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 6:double
+                          native: true
+                      Group By Operator
+                        aggregations: count()
+                        Group By Vectorization:
+                            aggregators: VectorUDAFCountStar(*) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 6:double
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: _col0 (type: double)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: double)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: double)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkMultiKeyOperator
+                              keyColumnNums: [0]
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              valueColumnNums: [1]
+                          Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
index 1235bda..bdcc286 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
@@ -266,28 +266,37 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumnNums: [9]
                     Statistics: Num rows: 1000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 9:int
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
+                    Top N Key Operator
+                      sort order: +
                       keys: ss_ticket_number (type: int)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 85 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      Statistics: Num rows: 1000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 20
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 9:int
+                          native: true
+                      Group By Operator
+                        Group By Vectorization:
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 9:int
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: []
+                        keys: ss_ticket_number (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
                         Statistics: Num rows: 85 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkLongOperator
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 85 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
index fee5a5f..e81d7df 100644
--- a/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
@@ -365,19 +365,24 @@ STAGE PLANS:
                   1 _col0 (type: int)
                 outputColumnNames: _col4
                 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
+                Top N Key Operator
+                  sort order: +
                   keys: _col4 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
+                  top n: 100
+                  Group By Operator
+                    aggregations: count()
+                    keys: _col4 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
-                    TopN Hash Memory Usage: 0.1
-                    value expressions: _col1 (type: bigint)
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
+                      value expressions: _col1 (type: bigint)
         Reducer 4 
             Execution mode: vectorized, llap
             Reduce Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
index 983c71d..f65712a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
@@ -60,31 +60,40 @@ STAGE PLANS:
                         predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 2:decimal(20,10)), SelectColumnIsNotNull(col 3:decimal(23,14)))
                     predicate: (cdecimal1 is not null and cdecimal2 is not null) (type: boolean)
                     Statistics: Num rows: 5492 Data size: 1231540 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: min(cdecimal1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinDecimal(col 2:decimal(20,10)) -> decimal(20,10)
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:int, col 1:double, col 2:decimal(20,10), col 3:decimal(23,14)
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0]
+                    Top N Key Operator
+                      sort order: ++++
                       keys: cint (type: int), cdouble (type: double), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 5492 Data size: 1231540 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                        sort order: ++++
-                        Map-reduce partition columns: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      top n: 50
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 0:int, col 1:double, col 2:decimal(20,10), col 3:decimal(23,14)
+                          native: true
+                      Group By Operator
+                        aggregations: min(cdecimal1)
+                        Group By Vectorization:
+                            aggregators: VectorUDAFMinDecimal(col 2:decimal(20,10)) -> decimal(20,10)
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 0:int, col 1:double, col 2:decimal(20,10), col 3:decimal(23,14)
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: cint (type: int), cdouble (type: double), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
                         Statistics: Num rows: 5492 Data size: 1231540 Basic stats: COMPLETE Column stats: NONE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col4 (type: decimal(20,10))
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
+                          sort order: ++++
+                          Map-reduce partition columns: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkMultiKeyOperator
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 5492 Data size: 1231540 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col4 (type: decimal(20,10))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out b/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
index 38d9172..c6b3dcc 100644
--- a/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
@@ -352,28 +352,37 @@ STAGE PLANS:
                         projectedOutputColumnNums: [20]
                         selectExpressions: StringGroupConcatColCol(col 18:string, col 19:string)(children: StringGroupColConcatStringScalar(col 19:string, val -)(children: StringScalarConcatStringGroupCol(val Quarter , col 18:string)(children: CastLongToString(col 14:int)(children: CastDoubleToLong(col 16:double)(children: DoubleColAddDoubleScalar(col 17:double, val 1.0)(children: DoubleColDivideDoubleScalar(col 16:double, val 3.0)(children: CastLongToDouble(col 15:int)(children: LongColSubtractLongScalar(col 14:int, val 1)(children: VectorUDFMonthDate(col 12, field MONTH) -> 14:int) -> 15:int) -> 16:double) -> 17:double) -> 16:double) -> 14:int) -> 18:string) -> 19:string) -> 18:string, CastLongToString(col 14:int)(children: VectorUDFYearDate(col 12, field YEAR) -> 14:int) -> 19:string) -> 20:string
                     Statistics: Num rows: 2000 Data size: 106456 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 20:string
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
                       Statistics: Num rows: 2000 Data size: 106456 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      top n: 50
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 20:string
+                          native: true
+                      Group By Operator
+                        Group By Vectorization:
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 20:string
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: []
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0
                         Statistics: Num rows: 2000 Data size: 106456 Basic stats: COMPLETE Column stats: NONE
-                        TopN Hash Memory Usage: 0.1
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkStringOperator
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 2000 Data size: 106456 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:


[54/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/651e7950
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/651e7950
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/651e7950

Branch: refs/heads/master-txnstats
Commit: 651e7950977dd4e63da42648c38b03c3bf097e7f
Parents: f0a2fff 851c8ab
Author: sergey <se...@apache.org>
Authored: Thu Jul 19 14:44:10 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Thu Jul 19 14:44:10 2018 -0700

----------------------------------------------------------------------
 .gitignore                                      |     1 +
 RELEASE_NOTES.txt                               |     8 +
 .../org/apache/hive/beeline/BeeLineOpts.java    |    11 +-
 .../apache/hive/beeline/cli/TestHiveCli.java    |     2 -
 .../org/apache/hadoop/hive/conf/HiveConf.java   |     2 +
 .../HiveHBaseTableSnapshotInputFormat.java      |     4 +-
 hcatalog/core/pom.xml                           |     7 +
 hcatalog/webhcat/java-client/pom.xml            |     7 +
 hcatalog/webhcat/svr/pom.xml                    |     7 +
 itests/hcatalog-unit/pom.xml                    |     6 +
 itests/hive-blobstore/pom.xml                   |    13 +
 .../insert_overwrite_directory.q.out            |     2 +
 .../write_final_output_blobstore.q.out          |     8 +
 itests/hive-minikdc/pom.xml                     |    13 +
 itests/hive-unit-hadoop2/pom.xml                |     6 +
 itests/hive-unit/pom.xml                        |     8 +-
 itests/qtest-accumulo/pom.xml                   |    13 +
 itests/qtest-spark/pom.xml                      |    13 +
 itests/qtest/pom.xml                            |    13 +
 .../test/resources/testconfiguration.properties |     7 +-
 itests/util/pom.xml                             |     6 +
 llap-server/pom.xml                             |     7 +
 metastore/pom.xml                               |     5 +
 packaging/src/main/assembly/bin.xml             |     2 +-
 packaging/src/main/assembly/src.xml             |     1 +
 ql/pom.xml                                      |     8 +
 .../hadoop/hive/ql/plan/api/OperatorType.java   |     5 +-
 ...eColumnArithmeticIntervalYearMonthColumn.txt |     3 +-
 ...YearMonthColumnArithmeticTimestampColumn.txt |     4 +-
 .../java/org/apache/hadoop/hive/ql/Context.java |    10 +
 .../java/org/apache/hadoop/hive/ql/Driver.java  |     5 +-
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |     9 +
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |    13 +-
 .../hadoop/hive/ql/exec/KeyWrapperFactory.java  |     2 +-
 .../hadoop/hive/ql/exec/OperatorFactory.java    |     4 +
 .../hadoop/hive/ql/exec/TableScanOperator.java  |     7 +
 .../hadoop/hive/ql/exec/TopNKeyOperator.java    |   214 +
 .../hadoop/hive/ql/exec/repl/ReplLoadTask.java  |     6 +-
 .../IncrementalLoadTasksBuilder.java            |    10 +-
 .../hive/ql/exec/vector/VectorAssignRow.java    |    20 +-
 .../ql/exec/vector/VectorTopNKeyOperator.java   |   304 +
 .../apache/hadoop/hive/ql/hooks/ATSHook.java    |     3 +-
 .../hive/ql/hooks/HiveProtoLoggingHook.java     |     3 +-
 .../metadata/HiveMaterializedViewsRegistry.java |    11 +-
 .../hive/ql/optimizer/TopNKeyProcessor.java     |   109 +
 .../ql/optimizer/calcite/RelOptHiveTable.java   |    73 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |    37 +
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |    86 +-
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |     3 +-
 .../hadoop/hive/ql/parse/TezCompiler.java       |    27 +
 .../apache/hadoop/hive/ql/plan/ExplainWork.java |    14 +-
 .../apache/hadoop/hive/ql/plan/TopNKeyDesc.java |   139 +
 .../hadoop/hive/ql/plan/VectorTopNKeyDesc.java  |    39 +
 .../hadoop/hive/ql/txn/compactor/Worker.java    |     1 +
 .../hive/ql/udf/generic/GenericUDAFCount.java   |     6 +-
 .../hadoop/hive/ql/exec/TestExplainTask.java    |     2 +-
 .../ql/exec/vector/VectorRandomRowSource.java   |    35 +-
 .../vector/aggregation/AggregationBase.java     |    22 +-
 .../aggregation/TestVectorAggregation.java      |   133 +-
 .../expressions/TestVectorArithmetic.java       |   143 +-
 .../expressions/TestVectorCastStatement.java    |     2 +
 .../expressions/TestVectorDateAddSub.java       |     4 +
 .../vector/expressions/TestVectorDateDiff.java  |     4 +
 .../expressions/TestVectorIfStatement.java      |     2 +
 .../vector/expressions/TestVectorNegative.java  |     2 +
 .../expressions/TestVectorStringConcat.java     |     2 +
 .../expressions/TestVectorStringUnary.java      |     2 +
 .../vector/expressions/TestVectorSubStr.java    |     2 +
 .../expressions/TestVectorTimestampExtract.java |     3 +
 .../parse/TestUpdateDeleteSemanticAnalyzer.java |     2 +-
 ql/src/test/queries/clientpositive/bucket7.q    |    12 +
 ql/src/test/queries/clientpositive/topnkey.q    |    31 +
 .../queries/clientpositive/vector_topnkey.q     |    30 +
 .../results/clientpositive/acid_nullscan.q.out  |     3 +
 .../alter_partition_coltype.q.out               |    18 +
 .../results/clientpositive/ambiguitycheck.q.out |     3 +
 .../analyze_table_null_partition.q.out          |     2 +
 .../clientpositive/autoColumnStats_1.q.out      |     2 +
 .../clientpositive/autoColumnStats_2.q.out      |     2 +
 .../auto_join_reordering_values.q.out           |    17 +
 .../test/results/clientpositive/bucket1.q.out   |     2 +
 .../test/results/clientpositive/bucket2.q.out   |     2 +
 .../test/results/clientpositive/bucket3.q.out   |     2 +
 .../test/results/clientpositive/bucket7.q.out   |    56 +
 .../clientpositive/bucket_map_join_spark1.q.out |    14 +
 .../clientpositive/bucket_map_join_spark2.q.out |    14 +
 .../clientpositive/bucket_map_join_spark3.q.out |    14 +
 .../clientpositive/bucket_map_join_spark4.q.out |    20 +
 .../test/results/clientpositive/combine2.q.out  |     3 +
 .../test/results/clientpositive/comments.q.out  |     7 +
 .../constantPropagateForSubQuery.q.out          |     6 +
 .../dynamic_partition_skip_default.q.out        |     9 +
 .../encryption_join_unencrypted_tbl.q.out       |     7 +
 ...on_join_with_different_encryption_keys.q.out |     7 +
 .../erasurecoding/erasure_explain.q.out         |     4 +
 .../extrapolate_part_stats_date.q.out           |     2 +
 .../extrapolate_part_stats_full.q.out           |     8 +
 .../extrapolate_part_stats_partial.q.out        |    12 +
 .../results/clientpositive/filter_aggr.q.out    |     3 +
 .../clientpositive/filter_join_breaktask.q.out  |    10 +
 .../results/clientpositive/filter_union.q.out   |     7 +
 .../clientpositive/groupby_sort_1_23.q.out      |    71 +
 .../results/clientpositive/groupby_sort_6.q.out |    12 +
 .../clientpositive/groupby_sort_skew_1_23.q.out |    71 +
 .../test/results/clientpositive/input23.q.out   |     8 +
 ql/src/test/results/clientpositive/input4.q.out |     2 +-
 .../test/results/clientpositive/input42.q.out   |     9 +
 .../results/clientpositive/input_part1.q.out    |     3 +
 .../results/clientpositive/input_part9.q.out    |     3 +
 ql/src/test/results/clientpositive/join17.q.out |     7 +
 ql/src/test/results/clientpositive/join26.q.out |    10 +
 ql/src/test/results/clientpositive/join32.q.out |    10 +
 ql/src/test/results/clientpositive/join33.q.out |    10 +
 ql/src/test/results/clientpositive/join34.q.out |    11 +
 ql/src/test/results/clientpositive/join35.q.out |    13 +
 ql/src/test/results/clientpositive/join9.q.out  |     7 +
 .../clientpositive/join_filters_overlap.q.out   |    50 +
 .../clientpositive/list_bucket_dml_1.q.out      |     6 +
 .../clientpositive/list_bucket_dml_11.q.out     |     5 +
 .../clientpositive/list_bucket_dml_12.q.out     |     8 +
 .../clientpositive/list_bucket_dml_13.q.out     |     5 +
 .../clientpositive/list_bucket_dml_14.q.out     |     5 +
 .../clientpositive/list_bucket_dml_2.q.out      |     6 +
 .../clientpositive/list_bucket_dml_3.q.out      |     6 +
 .../clientpositive/list_bucket_dml_4.q.out      |     9 +
 .../clientpositive/list_bucket_dml_5.q.out      |     6 +
 .../clientpositive/list_bucket_dml_6.q.out      |     9 +
 .../clientpositive/list_bucket_dml_7.q.out      |     9 +
 .../clientpositive/list_bucket_dml_8.q.out      |     6 +
 .../clientpositive/list_bucket_dml_9.q.out      |     9 +
 .../list_bucket_query_multiskew_1.q.out         |    12 +
 .../list_bucket_query_multiskew_2.q.out         |     9 +
 .../list_bucket_query_multiskew_3.q.out         |    12 +
 .../list_bucket_query_oneskew_1.q.out           |     9 +
 .../list_bucket_query_oneskew_2.q.out           |    14 +
 .../list_bucket_query_oneskew_3.q.out           |     3 +
 .../llap/acid_bucket_pruning.q.out              |     3 +
 .../clientpositive/llap/autoColumnStats_1.q.out |     2 +
 .../clientpositive/llap/autoColumnStats_2.q.out |     2 +
 .../llap/auto_sortmerge_join_1.q.out            |    21 +
 .../llap/auto_sortmerge_join_11.q.out           |    14 +
 .../llap/auto_sortmerge_join_12.q.out           |    12 +
 .../llap/auto_sortmerge_join_2.q.out            |    14 +
 .../llap/auto_sortmerge_join_3.q.out            |    21 +
 .../llap/auto_sortmerge_join_4.q.out            |    21 +
 .../llap/auto_sortmerge_join_5.q.out            |    21 +
 .../llap/auto_sortmerge_join_7.q.out            |    21 +
 .../llap/auto_sortmerge_join_8.q.out            |    21 +
 .../results/clientpositive/llap/bucket2.q.out   |     2 +
 .../results/clientpositive/llap/bucket3.q.out   |     2 +
 .../results/clientpositive/llap/bucket4.q.out   |     2 +
 .../clientpositive/llap/bucket_groupby.q.out    |   274 +-
 .../clientpositive/llap/bucket_many.q.out       |     2 +
 .../llap/bucket_num_reducers.q.out              |     2 +
 .../llap/bucket_num_reducers2.q.out             |     2 +
 .../clientpositive/llap/bucketmapjoin1.q.out    |    28 +
 .../clientpositive/llap/bucketmapjoin2.q.out    |    21 +
 .../clientpositive/llap/bucketmapjoin3.q.out    |    14 +
 .../clientpositive/llap/bucketmapjoin4.q.out    |    14 +
 .../clientpositive/llap/bucketpruning1.q.out    |    54 +
 .../clientpositive/llap/check_constraint.q.out  |    27 +-
 .../llap/current_date_timestamp.q.out           |     2 +
 .../llap/disable_merge_for_bucketing.q.out      |     2 +
 .../llap/dynamic_semijoin_reduction.q.out       |     7 +
 .../llap/dynamic_semijoin_user_level.q.out      |     7 +
 .../clientpositive/llap/explainuser_1.q.out     |    28 +-
 .../clientpositive/llap/explainuser_2.q.out     |   406 +-
 .../extrapolate_part_stats_partial_ndv.q.out    |     6 +
 .../llap/filter_join_breaktask.q.out            |    10 +
 .../clientpositive/llap/filter_union.q.out      |     7 +
 .../clientpositive/llap/join32_lessSize.q.out   |    44 +
 .../clientpositive/llap/limit_pushdown.q.out    |   135 +-
 .../clientpositive/llap/limit_pushdown3.q.out   |    89 +-
 .../llap/list_bucket_dml_10.q.out               |     2 +
 .../llap/llap_decimal64_reader.q.out            |    46 +-
 .../clientpositive/llap/mapjoin_mapjoin.q.out   |    10 +
 .../clientpositive/llap/metadataonly1.q.out     |    27 +
 .../clientpositive/llap/offset_limit.q.out      |    27 +-
 .../llap/offset_limit_ppd_optimizer.q.out       |    85 +-
 .../llap/orc_struct_type_vectorization.q.out    |    53 +-
 .../parquet_complex_types_vectorization.q.out   |   159 +-
 .../llap/parquet_map_type_vectorization.q.out   |    53 +-
 .../parquet_struct_type_vectorization.q.out     |    53 +-
 .../clientpositive/llap/partition_pruning.q.out |     9 +
 .../clientpositive/llap/ppd_union_view.q.out    |    24 +
 .../clientpositive/llap/smb_mapjoin_15.q.out    |    36 +
 .../results/clientpositive/llap/stats11.q.out   |    14 +
 .../llap/tez_fixed_bucket_pruning.q.out         |    32 +
 .../llap/tez_join_result_complex.q.out          |    14 +
 .../results/clientpositive/llap/topnkey.q.out   |   318 +
 .../clientpositive/llap/unionDistinct_1.q.out   |    74 +
 .../clientpositive/llap/union_stats.q.out       |    93 +-
 .../llap/vector_cast_constant.q.out             |    55 +-
 .../clientpositive/llap/vector_char_2.q.out     |   110 +-
 .../vector_groupby_grouping_sets_limit.q.out    |   346 +-
 .../llap/vector_groupby_reduce.q.out            |    49 +-
 .../llap/vector_mr_diff_schema_alias.q.out      |    25 +-
 .../llap/vector_reduce_groupby_decimal.q.out    |    53 +-
 .../llap/vector_string_concat.q.out             |    47 +-
 .../clientpositive/llap/vector_topnkey.q.out    |   592 +
 .../clientpositive/llap/vectorization_0.q.out   |     9 +
 .../llap/vectorization_limit.q.out              |    63 +-
 .../clientpositive/louter_join_ppr.q.out        |    28 +
 ql/src/test/results/clientpositive/macro.q.out  |     9 +
 .../clientpositive/mapjoin_mapjoin.q.out        |    10 +
 ql/src/test/results/clientpositive/merge3.q.out |     5 +
 .../offset_limit_global_optimizer.q.out         |    20 +
 .../results/clientpositive/outer_join_ppr.q.out |    14 +
 .../parquet_vectorization_0.q.out               |     9 +
 ql/src/test/results/clientpositive/pcr.q.out    |    82 +
 .../clientpositive/perf/tez/query10.q.out       |   346 +-
 .../clientpositive/perf/tez/query14.q.out       |  2198 +--
 .../clientpositive/perf/tez/query15.q.out       |   138 +-
 .../clientpositive/perf/tez/query17.q.out       |   372 +-
 .../clientpositive/perf/tez/query25.q.out       |   366 +-
 .../clientpositive/perf/tez/query26.q.out       |   226 +-
 .../clientpositive/perf/tez/query27.q.out       |   230 +-
 .../clientpositive/perf/tez/query29.q.out       |   374 +-
 .../clientpositive/perf/tez/query35.q.out       |   346 +-
 .../clientpositive/perf/tez/query37.q.out       |   142 +-
 .../clientpositive/perf/tez/query40.q.out       |   206 +-
 .../clientpositive/perf/tez/query43.q.out       |   128 +-
 .../clientpositive/perf/tez/query45.q.out       |   272 +-
 .../clientpositive/perf/tez/query49.q.out       |   478 +-
 .../clientpositive/perf/tez/query5.q.out        |   542 +-
 .../clientpositive/perf/tez/query50.q.out       |   250 +-
 .../clientpositive/perf/tez/query60.q.out       |   546 +-
 .../clientpositive/perf/tez/query66.q.out       |   452 +-
 .../clientpositive/perf/tez/query69.q.out       |   364 +-
 .../clientpositive/perf/tez/query7.q.out        |   226 +-
 .../clientpositive/perf/tez/query76.q.out       |   356 +-
 .../clientpositive/perf/tez/query77.q.out       |   562 +-
 .../clientpositive/perf/tez/query8.q.out        |   276 +-
 .../clientpositive/perf/tez/query80.q.out       |   756 +-
 .../clientpositive/perf/tez/query82.q.out       |   142 +-
 .../clientpositive/perf/tez/query99.q.out       |   230 +-
 .../test/results/clientpositive/plan_json.q.out |     2 +-
 .../results/clientpositive/pointlookup2.q.out   |    56 +
 .../results/clientpositive/pointlookup3.q.out   |    53 +
 .../results/clientpositive/pointlookup4.q.out   |     4 +
 .../clientpositive/ppd_join_filter.q.out        |    36 +
 ql/src/test/results/clientpositive/ppd_vc.q.out |    11 +
 .../clientpositive/ppr_allchildsarenull.q.out   |     6 +
 .../test/results/clientpositive/push_or.q.out   |     4 +
 .../clientpositive/rand_partitionpruner1.q.out  |     3 +
 .../clientpositive/rand_partitionpruner2.q.out  |     3 +
 .../clientpositive/rand_partitionpruner3.q.out  |     6 +
 .../clientpositive/router_join_ppr.q.out        |    28 +
 .../clientpositive/serde_user_properties.q.out  |     8 +
 .../spark/auto_join_reordering_values.q.out     |    17 +
 .../spark/auto_sortmerge_join_1.q.out           |    21 +
 .../spark/auto_sortmerge_join_12.q.out          |    12 +
 .../spark/auto_sortmerge_join_2.q.out           |    14 +
 .../spark/auto_sortmerge_join_3.q.out           |    21 +
 .../spark/auto_sortmerge_join_4.q.out           |    21 +
 .../spark/auto_sortmerge_join_5.q.out           |    21 +
 .../spark/auto_sortmerge_join_7.q.out           |    21 +
 .../spark/auto_sortmerge_join_8.q.out           |    21 +
 .../results/clientpositive/spark/bucket2.q.out  |     2 +
 .../results/clientpositive/spark/bucket3.q.out  |     2 +
 .../results/clientpositive/spark/bucket4.q.out  |     2 +
 .../clientpositive/spark/bucket4.q.out_spark    |     2 +
 .../results/clientpositive/spark/bucket7.q.out  |    56 +
 .../spark/bucket_map_join_spark1.q.out          |    14 +
 .../spark/bucket_map_join_spark2.q.out          |    14 +
 .../spark/bucket_map_join_spark3.q.out          |    14 +
 .../spark/bucket_map_join_spark4.q.out          |    20 +
 .../clientpositive/spark/bucketmapjoin1.q.out   |    28 +
 .../clientpositive/spark/bucketmapjoin2.q.out   |    21 +
 .../clientpositive/spark/bucketmapjoin3.q.out   |    14 +
 .../clientpositive/spark/bucketmapjoin4.q.out   |    14 +
 .../spark/disable_merge_for_bucketing.q.out     |     2 +
 .../disable_merge_for_bucketing.q.out_spark     |     2 +
 .../spark/filter_join_breaktask.q.out           |    10 +
 .../spark/groupby_sort_1_23.q.out               |    71 +
 .../spark/groupby_sort_skew_1_23.q.out          |    71 +
 .../results/clientpositive/spark/join17.q.out   |     7 +
 .../results/clientpositive/spark/join26.q.out   |    10 +
 .../results/clientpositive/spark/join32.q.out   |    10 +
 .../clientpositive/spark/join32_lessSize.q.out  |    44 +
 .../results/clientpositive/spark/join33.q.out   |    10 +
 .../results/clientpositive/spark/join34.q.out   |    11 +
 .../results/clientpositive/spark/join35.q.out   |    13 +
 .../results/clientpositive/spark/join9.q.out    |     7 +
 .../spark/join_filters_overlap.q.out            |    50 +
 .../spark/list_bucket_dml_10.q.out              |     2 +
 .../spark/list_bucket_dml_2.q.out               |     6 +
 .../clientpositive/spark/louter_join_ppr.q.out  |    28 +
 .../clientpositive/spark/mapjoin_mapjoin.q.out  |    10 +
 .../clientpositive/spark/outer_join_ppr.q.out   |    14 +
 .../spark/parquet_vectorization_0.q.out         |     9 +
 .../test/results/clientpositive/spark/pcr.q.out |    82 +
 .../clientpositive/spark/ppd_join_filter.q.out  |    36 +
 .../clientpositive/spark/router_join_ppr.q.out  |    28 +
 .../clientpositive/spark/smb_mapjoin_15.q.out   |    36 +
 .../spark/spark_union_merge.q.out               |    14 +
 .../results/clientpositive/spark/stats0.q.out   |     4 +
 .../results/clientpositive/spark/union22.q.out  |    11 +
 .../results/clientpositive/spark/union24.q.out  |    47 +
 .../clientpositive/spark/vectorization_0.q.out  |     9 +
 ql/src/test/results/clientpositive/stats0.q.out |     4 +
 .../results/clientpositive/tez/topnkey.q.out    |   162 +
 .../clientpositive/tez/vector_topnkey.q.out     |   162 +
 .../test/results/clientpositive/topnkey.q.out   |   301 +
 .../truncate_column_list_bucket.q.out           |     6 +
 .../results/clientpositive/udf_reflect2.q.out   |     3 +
 .../test/results/clientpositive/union22.q.out   |    11 +
 .../test/results/clientpositive/union24.q.out   |    47 +
 .../clientpositive/vector_outer_join3.q.out     |     6 +-
 .../clientpositive/vector_outer_join4.q.out     |     6 +-
 .../clientpositive/vector_outer_join6.q.out     |     4 +-
 .../results/clientpositive/vector_topnkey.q.out |   480 +
 .../objectinspector/ObjectInspectorUtils.java   |    19 +
 service/pom.xml                                 |     7 +
 standalone-metastore/metastore-common/pom.xml   |   128 -
 .../metastore-common/src/assembly/bin.xml       |    28 -
 .../hadoop/hive/common/StatsSetupConst.java     |   336 -
 .../common/ndv/NumDistinctValueEstimator.java   |    51 -
 .../ndv/NumDistinctValueEstimatorFactory.java   |    75 -
 .../hadoop/hive/common/ndv/fm/FMSketch.java     |   359 -
 .../hive/common/ndv/fm/FMSketchUtils.java       |   132 -
 .../hive/common/ndv/hll/HLLConstants.java       |   933 --
 .../hive/common/ndv/hll/HLLDenseRegister.java   |   202 -
 .../hadoop/hive/common/ndv/hll/HLLRegister.java |    50 -
 .../hive/common/ndv/hll/HLLSparseRegister.java  |   261 -
 .../hadoop/hive/common/ndv/hll/HyperLogLog.java |   664 -
 .../hive/common/ndv/hll/HyperLogLogUtils.java   |   409 -
 .../hive/metastore/AcidEventListener.java       |   146 -
 .../hive/metastore/AggregateStatsCache.java     |   571 -
 .../hadoop/hive/metastore/AlterHandler.java     |   204 -
 .../apache/hadoop/hive/metastore/Batchable.java |    86 -
 .../hadoop/hive/metastore/ColumnType.java       |   301 -
 .../hadoop/hive/metastore/DatabaseProduct.java  |    75 -
 .../apache/hadoop/hive/metastore/Deadline.java  |   172 -
 .../hive/metastore/DeadlineException.java       |    29 -
 .../DefaultPartitionExpressionProxy.java        |    57 -
 .../metastore/DefaultStorageSchemaReader.java   |    38 -
 .../hive/metastore/FileMetadataHandler.java     |   109 -
 .../hive/metastore/FileMetadataManager.java     |   119 -
 .../hive/metastore/HMSMetricsListener.java      |    90 -
 .../hadoop/hive/metastore/HiveAlterHandler.java |   961 --
 .../hadoop/hive/metastore/HiveMetaStore.java    |  9602 -------------
 .../hive/metastore/HiveMetaStoreClient.java     |  3597 -----
 .../hive/metastore/HiveMetaStoreFsImpl.java     |    55 -
 .../hive/metastore/IExtrapolatePartStatus.java  |    85 -
 .../hadoop/hive/metastore/IHMSHandler.java      |   109 -
 .../hadoop/hive/metastore/IMetaStoreClient.java |  3757 -----
 .../hive/metastore/IMetaStoreSchemaInfo.java    |   115 -
 .../metastore/LinearExtrapolatePartStatus.java  |   106 -
 .../hive/metastore/LockComponentBuilder.java    |   121 -
 .../hive/metastore/LockRequestBuilder.java      |   168 -
 .../MaterializationsRebuildLockCleanerTask.java |    81 -
 .../MaterializationsRebuildLockHandler.java     |   216 -
 .../hive/metastore/MetaStoreDirectSql.java      |  2837 ----
 .../metastore/MetaStoreEndFunctionContext.java  |    59 -
 .../metastore/MetaStoreEndFunctionListener.java |    58 -
 .../hive/metastore/MetaStoreEventListener.java  |   306 -
 .../MetaStoreEventListenerConstants.java        |    41 -
 .../hadoop/hive/metastore/MetaStoreFS.java      |    43 -
 .../hadoop/hive/metastore/MetaStoreInit.java    |   109 -
 .../hive/metastore/MetaStoreInitContext.java    |    27 -
 .../hive/metastore/MetaStoreInitListener.java   |    49 -
 .../metastore/MetaStoreListenerNotifier.java    |   375 -
 .../metastore/MetaStorePreEventListener.java    |    57 -
 .../hive/metastore/MetaStoreSchemaInfo.java     |   246 -
 .../metastore/MetaStoreSchemaInfoFactory.java   |    64 -
 .../hadoop/hive/metastore/MetaStoreThread.java  |    58 -
 .../hadoop/hive/metastore/MetadataStore.java    |    52 -
 .../hive/metastore/MetastoreTaskThread.java     |    38 -
 .../hadoop/hive/metastore/ObjectStore.java      | 12509 -----------------
 .../hive/metastore/PartFilterExprUtil.java      |   165 -
 .../metastore/PartitionExpressionProxy.java     |    73 -
 .../apache/hadoop/hive/metastore/RawStore.java  |  1719 ---
 .../hadoop/hive/metastore/RawStoreProxy.java    |   114 -
 .../hive/metastore/ReplChangeManager.java       |   501 -
 .../hive/metastore/RetryingHMSHandler.java      |   232 -
 .../hive/metastore/RetryingMetaStoreClient.java |   341 -
 .../hive/metastore/RuntimeStatsCleanerTask.java |    66 -
 .../metastore/SessionPropertiesListener.java    |    46 -
 .../hive/metastore/StatObjectConverter.java     |   892 --
 .../hive/metastore/TServerSocketKeepAlive.java  |    47 -
 .../hive/metastore/TSetIpAddressProcessor.java  |    62 -
 .../hive/metastore/TUGIBasedProcessor.java      |   183 -
 .../hadoop/hive/metastore/TableIterable.java    |   115 -
 .../hadoop/hive/metastore/ThreadPool.java       |    63 -
 .../TransactionalMetaStoreEventListener.java    |    39 -
 .../TransactionalValidationListener.java        |   487 -
 .../apache/hadoop/hive/metastore/Warehouse.java |   756 -
 .../hive/metastore/api/utils/DecimalUtils.java  |    49 -
 .../hive/metastore/cache/ByteArrayWrapper.java  |    45 -
 .../hadoop/hive/metastore/cache/CacheUtils.java |   136 -
 .../hive/metastore/cache/CachedStore.java       |  2532 ----
 .../hive/metastore/cache/SharedCache.java       |  1650 ---
 .../client/builder/CatalogBuilder.java          |    62 -
 .../client/builder/ConstraintBuilder.java       |   115 -
 .../client/builder/DatabaseBuilder.java         |   122 -
 .../client/builder/FunctionBuilder.java         |   143 -
 .../GrantRevokePrivilegeRequestBuilder.java     |    63 -
 .../builder/HiveObjectPrivilegeBuilder.java     |    69 -
 .../client/builder/HiveObjectRefBuilder.java    |    69 -
 .../client/builder/ISchemaBuilder.java          |   102 -
 .../client/builder/PartitionBuilder.java        |   119 -
 .../builder/PrivilegeGrantInfoBuilder.java      |    84 -
 .../metastore/client/builder/RoleBuilder.java   |    55 -
 .../builder/SQLCheckConstraintBuilder.java      |    51 -
 .../builder/SQLDefaultConstraintBuilder.java    |    51 -
 .../client/builder/SQLForeignKeyBuilder.java    |   103 -
 .../builder/SQLNotNullConstraintBuilder.java    |    52 -
 .../client/builder/SQLPrimaryKeyBuilder.java    |    52 -
 .../builder/SQLUniqueConstraintBuilder.java     |    46 -
 .../client/builder/SchemaVersionBuilder.java    |   114 -
 .../client/builder/SerdeAndColsBuilder.java     |   124 -
 .../builder/StorageDescriptorBuilder.java       |   163 -
 .../metastore/client/builder/TableBuilder.java  |   224 -
 .../aggr/BinaryColumnStatsAggregator.java       |    61 -
 .../aggr/BooleanColumnStatsAggregator.java      |    62 -
 .../columnstats/aggr/ColumnStatsAggregator.java |    35 -
 .../aggr/ColumnStatsAggregatorFactory.java      |   113 -
 .../aggr/DateColumnStatsAggregator.java         |   360 -
 .../aggr/DecimalColumnStatsAggregator.java      |   375 -
 .../aggr/DoubleColumnStatsAggregator.java       |   348 -
 .../aggr/IExtrapolatePartStatus.java            |    47 -
 .../aggr/LongColumnStatsAggregator.java         |   348 -
 .../aggr/StringColumnStatsAggregator.java       |   304 -
 .../cache/DateColumnStatsDataInspector.java     |   124 -
 .../cache/DecimalColumnStatsDataInspector.java  |   124 -
 .../cache/DoubleColumnStatsDataInspector.java   |   124 -
 .../cache/LongColumnStatsDataInspector.java     |   124 -
 .../cache/StringColumnStatsDataInspector.java   |   125 -
 .../merge/BinaryColumnStatsMerger.java          |    35 -
 .../merge/BooleanColumnStatsMerger.java         |    35 -
 .../columnstats/merge/ColumnStatsMerger.java    |    31 -
 .../merge/ColumnStatsMergerFactory.java         |   120 -
 .../merge/DateColumnStatsMerger.java            |    59 -
 .../merge/DecimalColumnStatsMerger.java         |    85 -
 .../merge/DoubleColumnStatsMerger.java          |    54 -
 .../merge/LongColumnStatsMerger.java            |    54 -
 .../merge/StringColumnStatsMerger.java          |    54 -
 .../metastore/conf/ConfTemplatePrinter.java     |   150 -
 .../hive/metastore/conf/MetastoreConf.java      |  1688 ---
 .../hive/metastore/conf/TimeValidator.java      |    67 -
 .../datasource/BoneCPDataSourceProvider.java    |    87 -
 .../datasource/DataSourceProvider.java          |    79 -
 .../datasource/DataSourceProviderFactory.java   |    66 -
 .../datasource/DbCPDataSourceProvider.java      |   117 -
 .../datasource/HikariCPDataSourceProvider.java  |    89 -
 .../hive/metastore/datasource/package-info.java |    23 -
 .../hive/metastore/events/AbortTxnEvent.java    |    51 -
 .../hive/metastore/events/AcidWriteEvent.java   |    91 -
 .../metastore/events/AddForeignKeyEvent.java    |    41 -
 .../events/AddNotNullConstraintEvent.java       |    42 -
 .../metastore/events/AddPartitionEvent.java     |    84 -
 .../metastore/events/AddPrimaryKeyEvent.java    |    42 -
 .../metastore/events/AddSchemaVersionEvent.java |    40 -
 .../events/AddUniqueConstraintEvent.java        |    42 -
 .../metastore/events/AllocWriteIdEvent.java     |    57 -
 .../metastore/events/AlterCatalogEvent.java     |    44 -
 .../metastore/events/AlterDatabaseEvent.java    |    56 -
 .../metastore/events/AlterISchemaEvent.java     |    45 -
 .../metastore/events/AlterPartitionEvent.java   |    75 -
 .../events/AlterSchemaVersionEvent.java         |    46 -
 .../hive/metastore/events/AlterTableEvent.java  |    63 -
 .../hive/metastore/events/CommitTxnEvent.java   |    51 -
 .../metastore/events/ConfigChangeEvent.java     |    52 -
 .../metastore/events/CreateCatalogEvent.java    |    39 -
 .../metastore/events/CreateDatabaseEvent.java   |    43 -
 .../metastore/events/CreateFunctionEvent.java   |    43 -
 .../metastore/events/CreateISchemaEvent.java    |    39 -
 .../hive/metastore/events/CreateTableEvent.java |    43 -
 .../hive/metastore/events/DropCatalogEvent.java |    39 -
 .../metastore/events/DropConstraintEvent.java   |    57 -
 .../metastore/events/DropDatabaseEvent.java     |    43 -
 .../metastore/events/DropFunctionEvent.java     |    43 -
 .../hive/metastore/events/DropISchemaEvent.java |    39 -
 .../metastore/events/DropPartitionEvent.java    |    70 -
 .../events/DropSchemaVersionEvent.java          |    40 -
 .../hive/metastore/events/DropTableEvent.java   |    54 -
 .../hive/metastore/events/EventCleanerTask.java |    66 -
 .../hive/metastore/events/InsertEvent.java      |   132 -
 .../hive/metastore/events/ListenerEvent.java    |   187 -
 .../events/LoadPartitionDoneEvent.java          |    57 -
 .../hive/metastore/events/OpenTxnEvent.java     |    51 -
 .../metastore/events/PreAddPartitionEvent.java  |    79 -
 .../events/PreAddSchemaVersionEvent.java        |    39 -
 .../metastore/events/PreAlterCatalogEvent.java  |    40 -
 .../metastore/events/PreAlterDatabaseEvent.java |    47 -
 .../metastore/events/PreAlterISchemaEvent.java  |    44 -
 .../events/PreAlterPartitionEvent.java          |    65 -
 .../events/PreAlterSchemaVersionEvent.java      |    45 -
 .../metastore/events/PreAlterTableEvent.java    |    53 -
 .../events/PreAuthorizationCallEvent.java       |    33 -
 .../metastore/events/PreCreateCatalogEvent.java |    39 -
 .../events/PreCreateDatabaseEvent.java          |    43 -
 .../metastore/events/PreCreateISchemaEvent.java |    39 -
 .../metastore/events/PreCreateTableEvent.java   |    43 -
 .../metastore/events/PreDropCatalogEvent.java   |    39 -
 .../metastore/events/PreDropDatabaseEvent.java  |    43 -
 .../metastore/events/PreDropISchemaEvent.java   |    39 -
 .../metastore/events/PreDropPartitionEvent.java |    67 -
 .../events/PreDropSchemaVersionEvent.java       |    39 -
 .../metastore/events/PreDropTableEvent.java     |    55 -
 .../hive/metastore/events/PreEventContext.java  |    82 -
 .../events/PreLoadPartitionDoneEvent.java       |    64 -
 .../metastore/events/PreReadCatalogEvent.java   |    39 -
 .../metastore/events/PreReadDatabaseEvent.java  |    46 -
 .../metastore/events/PreReadISchemaEvent.java   |    39 -
 .../metastore/events/PreReadTableEvent.java     |    47 -
 .../events/PreReadhSchemaVersionEvent.java      |    36 -
 .../metastore/hooks/JDOConnectionURLHook.java   |    52 -
 .../metastore/messaging/AbortTxnMessage.java    |    36 -
 .../metastore/messaging/AcidWriteMessage.java   |    50 -
 .../messaging/AddForeignKeyMessage.java         |    36 -
 .../messaging/AddNotNullConstraintMessage.java  |    36 -
 .../messaging/AddPartitionMessage.java          |    68 -
 .../messaging/AddPrimaryKeyMessage.java         |    35 -
 .../messaging/AddUniqueConstraintMessage.java   |    36 -
 .../messaging/AllocWriteIdMessage.java          |    36 -
 .../messaging/AlterCatalogMessage.java          |    29 -
 .../messaging/AlterDatabaseMessage.java         |    36 -
 .../messaging/AlterPartitionMessage.java        |    69 -
 .../metastore/messaging/AlterTableMessage.java  |    58 -
 .../metastore/messaging/CommitTxnMessage.java   |    59 -
 .../messaging/CreateCatalogMessage.java         |    25 -
 .../messaging/CreateDatabaseMessage.java        |    31 -
 .../messaging/CreateFunctionMessage.java        |    46 -
 .../metastore/messaging/CreateTableMessage.java |    53 -
 .../metastore/messaging/DropCatalogMessage.java |    25 -
 .../messaging/DropConstraintMessage.java        |    29 -
 .../messaging/DropDatabaseMessage.java          |    27 -
 .../messaging/DropFunctionMessage.java          |    38 -
 .../messaging/DropPartitionMessage.java         |    49 -
 .../metastore/messaging/DropTableMessage.java   |    46 -
 .../hive/metastore/messaging/EventMessage.java  |   127 -
 .../hive/metastore/messaging/EventUtils.java    |   202 -
 .../hive/metastore/messaging/InsertMessage.java |    75 -
 .../messaging/MessageDeserializer.java          |   200 -
 .../metastore/messaging/MessageFactory.java     |   341 -
 .../metastore/messaging/OpenTxnMessage.java     |    38 -
 .../metastore/messaging/PartitionFiles.java     |    53 -
 .../messaging/event/filters/AndFilter.java      |    39 -
 .../messaging/event/filters/BasicFilter.java    |    33 -
 .../event/filters/DatabaseAndTableFilter.java   |    65 -
 .../event/filters/EventBoundaryFilter.java      |    34 -
 .../event/filters/MessageFormatFilter.java      |    36 -
 .../messaging/json/JSONAbortTxnMessage.java     |    88 -
 .../messaging/json/JSONAcidWriteMessage.java    |   150 -
 .../json/JSONAddForeignKeyMessage.java          |   102 -
 .../json/JSONAddNotNullConstraintMessage.java   |    97 -
 .../messaging/json/JSONAddPartitionMessage.java |   175 -
 .../json/JSONAddPrimaryKeyMessage.java          |   102 -
 .../json/JSONAddUniqueConstraintMessage.java    |    99 -
 .../messaging/json/JSONAllocWriteIdMessage.java |   113 -
 .../messaging/json/JSONAlterCatalogMessage.java |    90 -
 .../json/JSONAlterDatabaseMessage.java          |    97 -
 .../json/JSONAlterPartitionMessage.java         |   153 -
 .../messaging/json/JSONAlterTableMessage.java   |   128 -
 .../messaging/json/JSONCommitTxnMessage.java    |   183 -
 .../json/JSONCreateCatalogMessage.java          |    80 -
 .../json/JSONCreateDatabaseMessage.java         |    85 -
 .../json/JSONCreateFunctionMessage.java         |    87 -
 .../messaging/json/JSONCreateTableMessage.java  |   134 -
 .../messaging/json/JSONDropCatalogMessage.java  |    67 -
 .../json/JSONDropConstraintMessage.java         |    91 -
 .../messaging/json/JSONDropDatabaseMessage.java |    72 -
 .../messaging/json/JSONDropFunctionMessage.java |    79 -
 .../json/JSONDropPartitionMessage.java          |   135 -
 .../messaging/json/JSONDropTableMessage.java    |   121 -
 .../messaging/json/JSONInsertMessage.java       |   148 -
 .../messaging/json/JSONMessageDeserializer.java |   273 -
 .../messaging/json/JSONMessageFactory.java      |   402 -
 .../messaging/json/JSONOpenTxnMessage.java      |   106 -
 .../hive/metastore/metrics/JsonReporter.java    |   223 -
 .../hive/metastore/metrics/JvmPauseMonitor.java |   222 -
 .../hadoop/hive/metastore/metrics/Metrics.java  |   244 -
 .../metastore/metrics/MetricsConstants.java     |    46 -
 .../hive/metastore/metrics/PerfLogger.java      |   194 -
 .../hadoop/hive/metastore/model/MCatalog.java   |    58 -
 .../hive/metastore/model/MColumnDescriptor.java |    51 -
 .../hive/metastore/model/MConstraint.java       |   214 -
 .../hive/metastore/model/MCreationMetadata.java |    97 -
 .../hive/metastore/model/MDBPrivilege.java      |   142 -
 .../hadoop/hive/metastore/model/MDatabase.java  |   157 -
 .../hive/metastore/model/MDelegationToken.java  |    45 -
 .../hive/metastore/model/MFieldSchema.java      |    80 -
 .../hadoop/hive/metastore/model/MFunction.java  |   119 -
 .../hive/metastore/model/MGlobalPrivilege.java  |   130 -
 .../hadoop/hive/metastore/model/MISchema.java   |   107 -
 .../hadoop/hive/metastore/model/MIndex.java     |   200 -
 .../hadoop/hive/metastore/model/MMasterKey.java |    55 -
 .../metastore/model/MMetastoreDBProperties.java |    56 -
 .../hive/metastore/model/MNotificationLog.java  |   108 -
 .../metastore/model/MNotificationNextId.java    |    42 -
 .../hadoop/hive/metastore/model/MOrder.java     |    62 -
 .../hadoop/hive/metastore/model/MPartition.java |   162 -
 .../model/MPartitionColumnPrivilege.java        |   171 -
 .../model/MPartitionColumnStatistics.java       |   281 -
 .../hive/metastore/model/MPartitionEvent.java   |    97 -
 .../metastore/model/MPartitionPrivilege.java    |   149 -
 .../hive/metastore/model/MPrincipalDesc.java    |    59 -
 .../hive/metastore/model/MResourceUri.java      |    49 -
 .../hadoop/hive/metastore/model/MRole.java      |    80 -
 .../hadoop/hive/metastore/model/MRoleMap.java   |   120 -
 .../hive/metastore/model/MRuntimeStat.java      |    59 -
 .../hive/metastore/model/MSchemaVersion.java    |   127 -
 .../hadoop/hive/metastore/model/MSerDeInfo.java |   127 -
 .../metastore/model/MStorageDescriptor.java     |   277 -
 .../hive/metastore/model/MStringList.java       |    62 -
 .../hadoop/hive/metastore/model/MTable.java     |   283 -
 .../metastore/model/MTableColumnPrivilege.java  |   170 -
 .../metastore/model/MTableColumnStatistics.java |   272 -
 .../hive/metastore/model/MTablePrivilege.java   |   149 -
 .../model/MTxnWriteNotificationLog.java         |   123 -
 .../hadoop/hive/metastore/model/MType.java      |   105 -
 .../hive/metastore/model/MVersionTable.java     |    57 -
 .../hadoop/hive/metastore/model/MWMMapping.java |    83 -
 .../hadoop/hive/metastore/model/MWMPool.java    |    89 -
 .../hive/metastore/model/MWMResourcePlan.java   |   105 -
 .../hadoop/hive/metastore/model/MWMTrigger.java |    89 -
 .../hive/metastore/parser/ExpressionTree.java   |   606 -
 .../hadoop/hive/metastore/parser/Filter.g       |   486 -
 .../hive/metastore/parser/package-info.java     |    23 -
 .../spec/CompositePartitionSpecProxy.java       |   258 -
 .../spec/PartitionListComposingSpecProxy.java   |   209 -
 .../partition/spec/PartitionSpecProxy.java      |   220 -
 .../spec/PartitionSpecWithSharedSDProxy.java    |   192 -
 .../hive/metastore/security/DBTokenStore.java   |   180 -
 .../security/DelegationTokenIdentifier.java     |    52 -
 .../security/DelegationTokenSecretManager.java  |   134 -
 .../security/DelegationTokenSelector.java       |    33 -
 .../security/DelegationTokenStore.java          |   116 -
 .../metastore/security/DelegationTokenTool.java |   252 -
 .../security/HadoopThriftAuthBridge.java        |   700 -
 .../security/HadoopThriftAuthBridge23.java      |   114 -
 .../metastore/security/MemoryTokenStore.java    |   118 -
 .../MetastoreDelegationTokenManager.java        |   180 -
 .../security/TUGIContainingTransport.java       |    96 -
 .../TokenStoreDelegationTokenSecretManager.java |   334 -
 .../metastore/security/ZooKeeperTokenStore.java |   474 -
 .../hive/metastore/tools/HiveMetaTool.java      |   490 -
 .../hive/metastore/tools/HiveSchemaHelper.java  |   673 -
 .../metastore/tools/MetastoreSchemaTool.java    |   460 -
 .../hive/metastore/tools/SQLGenerator.java      |   187 -
 .../metastore/tools/SchemaToolCommandLine.java  |   308 -
 .../hive/metastore/tools/SchemaToolTask.java    |    32 -
 .../tools/SchemaToolTaskAlterCatalog.java       |    90 -
 .../tools/SchemaToolTaskCreateCatalog.java      |   132 -
 .../tools/SchemaToolTaskCreateUser.java         |   115 -
 .../metastore/tools/SchemaToolTaskInfo.java     |    43 -
 .../metastore/tools/SchemaToolTaskInit.java     |    73 -
 .../tools/SchemaToolTaskMoveDatabase.java       |    96 -
 .../tools/SchemaToolTaskMoveTable.java          |   142 -
 .../metastore/tools/SchemaToolTaskUpgrade.java  |   116 -
 .../metastore/tools/SchemaToolTaskValidate.java |   630 -
 .../hadoop/hive/metastore/tools/SmokeTest.java  |   102 -
 .../txn/AcidCompactionHistoryService.java       |    71 -
 .../metastore/txn/AcidHouseKeeperService.java   |    71 -
 .../txn/AcidOpenTxnsCounterService.java         |    72 -
 .../hive/metastore/txn/AcidWriteSetService.java |    69 -
 .../hive/metastore/txn/CompactionInfo.java      |   170 -
 .../metastore/txn/CompactionTxnHandler.java     |  1158 --
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |   621 -
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  5094 -------
 .../hadoop/hive/metastore/txn/TxnStore.java     |   509 -
 .../hadoop/hive/metastore/txn/TxnUtils.java     |   481 -
 .../hive/metastore/utils/CommonCliOptions.java  |   160 -
 .../hadoop/hive/metastore/utils/FileUtils.java  |   537 -
 .../hadoop/hive/metastore/utils/HdfsUtils.java  |   395 -
 .../metastore/utils/HiveStrictManagedUtils.java |   100 -
 .../hadoop/hive/metastore/utils/LogUtils.java   |   140 -
 .../hive/metastore/utils/MetaStoreUtils.java    |  1840 ---
 .../metastore/utils/MetastoreVersionInfo.java   |   133 -
 .../hive/metastore/utils/SecurityUtils.java     |   313 -
 .../hive/metastore/utils/StringableMap.java     |    80 -
 .../src/main/resources/package.jdo              |  1426 --
 .../metastore-common/src/main/scripts/base      |   231 -
 .../src/main/scripts/ext/metastore.sh           |    41 -
 .../src/main/scripts/ext/schemaTool.sh          |    33 -
 .../src/main/scripts/ext/smokeTest.sh           |    33 -
 .../src/main/scripts/metastore-config.sh        |    69 -
 .../src/main/scripts/schematool                 |    21 -
 .../src/main/scripts/start-metastore            |    22 -
 .../main/sql/derby/hive-schema-1.2.0.derby.sql  |   405 -
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |   692 -
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |   720 -
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |   720 -
 .../sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql  |    62 -
 .../sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql  |    22 -
 .../sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql  |    59 -
 .../sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql  |     5 -
 .../sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql  |   283 -
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |    68 -
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |     8 -
 .../src/main/sql/derby/upgrade.order.derby      |    18 -
 .../src/main/sql/mssql/create-user.mssql.sql    |     5 -
 .../main/sql/mssql/hive-schema-1.2.0.mssql.sql  |   947 --
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  |  1246 --
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |  1284 --
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |  1284 --
 .../sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql  |    73 -
 .../sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |    39 -
 .../sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql  |    43 -
 .../sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql  |     7 -
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql  |   352 -
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |    70 -
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |    10 -
 .../src/main/sql/mssql/upgrade.order.mssql      |    12 -
 .../src/main/sql/mysql/create-user.mysql.sql    |     8 -
 .../main/sql/mysql/hive-schema-1.2.0.mysql.sql  |   910 --
 .../main/sql/mysql/hive-schema-3.0.0.mysql.sql  |  1183 --
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |  1218 --
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |  1220 --
 .../sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql  |    75 -
 .../sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |    42 -
 .../sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql  |    43 -
 .../sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql  |     8 -
 .../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql  |   326 -
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |    71 -
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |    10 -
 .../src/main/sql/mysql/upgrade.order.mysql      |    18 -
 .../src/main/sql/oracle/create-user.oracle.sql  |     3 -
 .../sql/oracle/hive-schema-1.2.0.oracle.sql     |   856 --
 .../sql/oracle/hive-schema-3.0.0.oracle.sql     |  1140 --
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |  1175 --
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |  1177 --
 .../oracle/upgrade-1.2.0-to-2.0.0.oracle.sql    |    83 -
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql    |    39 -
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |    58 -
 .../oracle/upgrade-2.2.0-to-2.3.0.oracle.sql    |     7 -
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |   343 -
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |    70 -
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql    |     9 -
 .../src/main/sql/oracle/upgrade.order.oracle    |    14 -
 .../main/sql/postgres/create-user.postgres.sql  |     2 -
 .../sql/postgres/hive-schema-1.2.0.postgres.sql |  1562 --
 .../sql/postgres/hive-schema-3.0.0.postgres.sql |  1827 ---
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |  1866 ---
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |  1868 ---
 .../upgrade-1.2.0-to-2.0.0.postgres.sql         |    73 -
 .../upgrade-2.0.0-to-2.1.0.postgres.sql         |    40 -
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |    39 -
 .../upgrade-2.2.0-to-2.3.0.postgres.sql         |     8 -
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |   360 -
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |    72 -
 .../upgrade-3.1.0-to-4.0.0.postgres.sql         |    10 -
 .../main/sql/postgres/upgrade.order.postgres    |    18 -
 .../hadoop/hive/common/TestStatsSetupConst.java |   114 -
 .../ndv/fm/TestFMSketchSerialization.java       |   101 -
 .../hive/common/ndv/hll/TestHLLNoBias.java      |   117 -
 .../common/ndv/hll/TestHLLSerialization.java    |   270 -
 .../hive/common/ndv/hll/TestHyperLogLog.java    |   338 -
 .../common/ndv/hll/TestHyperLogLogDense.java    |    85 -
 .../common/ndv/hll/TestHyperLogLogMerge.java    |   147 -
 .../common/ndv/hll/TestHyperLogLogSparse.java   |    84 -
 .../common/ndv/hll/TestSparseEncodeHash.java    |    59 -
 .../metastore/AlternateFailurePreListener.java  |    62 -
 .../metastore/DummyEndFunctionListener.java     |    47 -
 .../metastore/DummyJdoConnectionUrlHook.java    |    45 -
 .../hadoop/hive/metastore/DummyListener.java    |   126 -
 .../metastore/DummyMetaStoreInitListener.java   |    39 -
 .../hadoop/hive/metastore/DummyPreListener.java |    49 -
 .../DummyRawStoreControlledCommit.java          |  1268 --
 .../DummyRawStoreForJdoConnection.java          |  1249 --
 .../apache/hadoop/hive/metastore/FakeDerby.java |   404 -
 .../HiveMetaStoreClientPreCatalog.java          |  3546 -----
 .../InjectableBehaviourObjectStore.java         |   218 -
 .../hive/metastore/IpAddressListener.java       |   102 -
 .../hive/metastore/MetaStoreTestUtils.java      |   291 -
 .../MockPartitionExpressionForMetastore.java    |    58 -
 .../hive/metastore/NonCatCallsWithCatalog.java  |  1158 --
 .../hadoop/hive/metastore/TestAdminUser.java    |    49 -
 .../hive/metastore/TestAggregateStatsCache.java |   272 -
 .../metastore/TestCatalogNonDefaultClient.java  |    74 -
 .../metastore/TestCatalogNonDefaultSvr.java     |    68 -
 .../hive/metastore/TestCatalogOldClient.java    |    44 -
 .../hadoop/hive/metastore/TestDeadline.java     |   130 -
 .../metastore/TestEmbeddedHiveMetaStore.java    |    51 -
 .../hadoop/hive/metastore/TestFilterHooks.java  |   254 -
 .../hive/metastore/TestHiveAlterHandler.java    |   121 -
 .../hive/metastore/TestHiveMetaStore.java       |  3102 ----
 .../metastore/TestHiveMetaStoreGetMetaConf.java |   115 -
 .../TestHiveMetaStorePartitionSpecs.java        |   383 -
 .../TestHiveMetaStoreSchemaMethods.java         |  1248 --
 .../metastore/TestHiveMetaStoreTimeout.java     |   142 -
 .../hive/metastore/TestHiveMetaStoreTxns.java   |   267 -
 ...TestHiveMetaStoreWithEnvironmentContext.java |   191 -
 .../hive/metastore/TestHiveMetastoreCli.java    |    68 -
 .../hive/metastore/TestLockRequestBuilder.java  |   587 -
 .../hive/metastore/TestMarkPartition.java       |   118 -
 .../hive/metastore/TestMarkPartitionRemote.java |    34 -
 .../TestMetaStoreConnectionUrlHook.java         |    49 -
 .../TestMetaStoreEndFunctionListener.java       |   146 -
 .../metastore/TestMetaStoreEventListener.java   |   472 -
 .../TestMetaStoreEventListenerOnlyOnCommit.java |   121 -
 .../TestMetaStoreEventListenerWithOldConf.java  |   129 -
 .../metastore/TestMetaStoreInitListener.java    |    56 -
 .../metastore/TestMetaStoreListenersError.java  |    97 -
 .../metastore/TestMetaStoreSchemaFactory.java   |    72 -
 .../hive/metastore/TestMetaStoreSchemaInfo.java |    55 -
 .../hadoop/hive/metastore/TestObjectStore.java  |   904 --
 .../metastore/TestObjectStoreInitRetry.java     |   135 -
 .../metastore/TestObjectStoreSchemaMethods.java |   602 -
 .../hadoop/hive/metastore/TestOldSchema.java    |   233 -
 .../TestPartitionNameWhitelistValidation.java   |   125 -
 .../hive/metastore/TestRawStoreProxy.java       |    67 -
 .../hive/metastore/TestRemoteHiveMetaStore.java |    64 -
 .../TestRemoteHiveMetaStoreIpAddress.java       |    66 -
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |    31 -
 .../TestRetriesInRetryingHMSHandler.java        |   111 -
 .../hive/metastore/TestRetryingHMSHandler.java  |    82 -
 .../metastore/TestSetUGIOnBothClientServer.java |    34 -
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |    35 -
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |    35 -
 .../apache/hadoop/hive/metastore/TestStats.java |   732 -
 .../hive/metastore/TestTableIterable.java       |    76 -
 .../hive/metastore/VerifyingObjectStore.java    |   219 -
 .../annotation/MetastoreCheckinTest.java        |    25 -
 .../metastore/annotation/MetastoreTest.java     |    24 -
 .../metastore/annotation/MetastoreUnitTest.java |    25 -
 .../hive/metastore/cache/TestCachedStore.java   |  1075 --
 .../metastore/cache/TestCatalogCaching.java     |   142 -
 .../metastore/client/MetaStoreClientTest.java   |    95 -
 .../client/MetaStoreFactoryForTests.java        |   112 -
 .../metastore/client/TestAddPartitions.java     |  1736 ---
 .../client/TestAddPartitionsFromPartSpec.java   |  1267 --
 .../metastore/client/TestAlterPartitions.java   |  1154 --
 .../metastore/client/TestAppendPartitions.java  |   600 -
 .../hive/metastore/client/TestCatalogs.java     |   267 -
 .../metastore/client/TestCheckConstraint.java   |   363 -
 .../hive/metastore/client/TestDatabases.java    |   634 -
 .../metastore/client/TestDefaultConstraint.java |   363 -
 .../metastore/client/TestDropPartitions.java    |   659 -
 .../client/TestExchangePartitions.java          |  1337 --
 .../hive/metastore/client/TestForeignKey.java   |   538 -
 .../hive/metastore/client/TestFunctions.java    |   765 -
 .../metastore/client/TestGetPartitions.java     |   608 -
 .../hive/metastore/client/TestGetTableMeta.java |   330 -
 .../metastore/client/TestListPartitions.java    |  1522 --
 .../metastore/client/TestNotNullConstraint.java |   355 -
 .../hive/metastore/client/TestPrimaryKey.java   |   468 -
 .../hive/metastore/client/TestRuntimeStats.java |   154 -
 .../TestTablesCreateDropAlterTruncate.java      |  1400 --
 .../metastore/client/TestTablesGetExists.java   |   514 -
 .../hive/metastore/client/TestTablesList.java   |   320 -
 .../metastore/client/TestUniqueConstraint.java  |   356 -
 .../hive/metastore/client/package-info.java     |    22 -
 .../merge/DecimalColumnStatsMergerTest.java     |   235 -
 .../hive/metastore/conf/TestMetastoreConf.java  |   433 -
 .../TestDataSourceProviderFactory.java          |   248 -
 .../hive/metastore/dbinstall/DbInstallBase.java |   265 -
 .../hive/metastore/dbinstall/ITestMysql.java    |    82 -
 .../hive/metastore/dbinstall/ITestOracle.java   |    83 -
 .../hive/metastore/dbinstall/ITestPostgres.java |    82 -
 .../metastore/dbinstall/ITestSqlServer.java     |    84 -
 .../json/TestJSONMessageDeserializer.java       |   115 -
 .../hive/metastore/metrics/TestMetrics.java     |   164 -
 .../minihms/AbstractMetaStoreService.java       |   173 -
 .../minihms/ClusterMetaStoreForTests.java       |    32 -
 .../minihms/EmbeddedMetaStoreForTests.java      |    33 -
 .../hadoop/hive/metastore/minihms/MiniHMS.java  |    76 -
 .../minihms/RemoteMetaStoreForTests.java        |    43 -
 .../hive/metastore/minihms/package-info.java    |    23 -
 .../tools/TestMetastoreSchemaTool.java          |    70 -
 .../tools/TestSchemaToolForMetastore.java       |   534 -
 .../metastore/txn/TestTxnHandlerNegative.java   |    58 -
 .../hadoop/hive/metastore/txn/TestTxnUtils.java |   239 -
 .../hive/metastore/utils/TestHdfsUtils.java     |   348 -
 .../metastore/utils/TestMetaStoreUtils.java     |   291 -
 standalone-metastore/metastore-server/pom.xml   |   684 +
 .../metastore-server/src/assembly/bin.xml       |   134 +
 .../metastore-server/src/assembly/src.xml       |    53 +
 .../hadoop/hive/common/StatsSetupConst.java     |   336 +
 .../common/ndv/NumDistinctValueEstimator.java   |    51 +
 .../ndv/NumDistinctValueEstimatorFactory.java   |    75 +
 .../hadoop/hive/common/ndv/fm/FMSketch.java     |   359 +
 .../hive/common/ndv/fm/FMSketchUtils.java       |   132 +
 .../hive/common/ndv/hll/HLLConstants.java       |   933 ++
 .../hive/common/ndv/hll/HLLDenseRegister.java   |   202 +
 .../hadoop/hive/common/ndv/hll/HLLRegister.java |    50 +
 .../hive/common/ndv/hll/HLLSparseRegister.java  |   261 +
 .../hadoop/hive/common/ndv/hll/HyperLogLog.java |   664 +
 .../hive/common/ndv/hll/HyperLogLogUtils.java   |   409 +
 .../hive/metastore/AcidEventListener.java       |   146 +
 .../hive/metastore/AggregateStatsCache.java     |   571 +
 .../hadoop/hive/metastore/AlterHandler.java     |   204 +
 .../apache/hadoop/hive/metastore/Batchable.java |    86 +
 .../hadoop/hive/metastore/ColumnType.java       |   301 +
 .../hadoop/hive/metastore/DatabaseProduct.java  |    75 +
 .../apache/hadoop/hive/metastore/Deadline.java  |   172 +
 .../hive/metastore/DeadlineException.java       |    29 +
 .../DefaultPartitionExpressionProxy.java        |    57 +
 .../metastore/DefaultStorageSchemaReader.java   |    38 +
 .../hive/metastore/FileMetadataHandler.java     |   109 +
 .../hive/metastore/FileMetadataManager.java     |   119 +
 .../hive/metastore/HMSMetricsListener.java      |    90 +
 .../hadoop/hive/metastore/HiveAlterHandler.java |   961 ++
 .../hadoop/hive/metastore/HiveMetaStore.java    |  9602 +++++++++++++
 .../hive/metastore/HiveMetaStoreClient.java     |  3597 +++++
 .../hive/metastore/HiveMetaStoreFsImpl.java     |    55 +
 .../hive/metastore/IExtrapolatePartStatus.java  |    85 +
 .../hadoop/hive/metastore/IHMSHandler.java      |   109 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |  3757 +++++
 .../hive/metastore/IMetaStoreSchemaInfo.java    |   115 +
 .../metastore/LinearExtrapolatePartStatus.java  |   106 +
 .../hive/metastore/LockComponentBuilder.java    |   121 +
 .../hive/metastore/LockRequestBuilder.java      |   168 +
 .../MaterializationsRebuildLockCleanerTask.java |    81 +
 .../MaterializationsRebuildLockHandler.java     |   216 +
 .../hive/metastore/MetaStoreDirectSql.java      |  2837 ++++
 .../metastore/MetaStoreEndFunctionContext.java  |    59 +
 .../metastore/MetaStoreEndFunctionListener.java |    58 +
 .../hive/metastore/MetaStoreEventListener.java  |   306 +
 .../MetaStoreEventListenerConstants.java        |    41 +
 .../hadoop/hive/metastore/MetaStoreFS.java      |    43 +
 .../hadoop/hive/metastore/MetaStoreInit.java    |   109 +
 .../hive/metastore/MetaStoreInitContext.java    |    27 +
 .../hive/metastore/MetaStoreInitListener.java   |    49 +
 .../metastore/MetaStoreListenerNotifier.java    |   375 +
 .../metastore/MetaStorePreEventListener.java    |    57 +
 .../hive/metastore/MetaStoreSchemaInfo.java     |   246 +
 .../metastore/MetaStoreSchemaInfoFactory.java   |    64 +
 .../hadoop/hive/metastore/MetaStoreThread.java  |    58 +
 .../hadoop/hive/metastore/MetadataStore.java    |    52 +
 .../hive/metastore/MetastoreTaskThread.java     |    38 +
 .../hadoop/hive/metastore/ObjectStore.java      | 12509 +++++++++++++++++
 .../hive/metastore/PartFilterExprUtil.java      |   165 +
 .../metastore/PartitionExpressionProxy.java     |    73 +
 .../apache/hadoop/hive/metastore/RawStore.java  |  1719 +++
 .../hadoop/hive/metastore/RawStoreProxy.java    |   114 +
 .../hive/metastore/ReplChangeManager.java       |   501 +
 .../hive/metastore/RetryingHMSHandler.java      |   232 +
 .../hive/metastore/RetryingMetaStoreClient.java |   341 +
 .../hive/metastore/RuntimeStatsCleanerTask.java |    66 +
 .../metastore/SessionPropertiesListener.java    |    46 +
 .../hive/metastore/StatObjectConverter.java     |   892 ++
 .../hive/metastore/TServerSocketKeepAlive.java  |    47 +
 .../hive/metastore/TSetIpAddressProcessor.java  |    62 +
 .../hive/metastore/TUGIBasedProcessor.java      |   183 +
 .../hadoop/hive/metastore/TableIterable.java    |   115 +
 .../hadoop/hive/metastore/ThreadPool.java       |    63 +
 .../TransactionalMetaStoreEventListener.java    |    39 +
 .../TransactionalValidationListener.java        |   487 +
 .../apache/hadoop/hive/metastore/Warehouse.java |   756 +
 .../hive/metastore/api/utils/DecimalUtils.java  |    49 +
 .../hive/metastore/cache/ByteArrayWrapper.java  |    45 +
 .../hadoop/hive/metastore/cache/CacheUtils.java |   136 +
 .../hive/metastore/cache/CachedStore.java       |  2532 ++++
 .../hive/metastore/cache/SharedCache.java       |  1650 +++
 .../client/builder/CatalogBuilder.java          |    62 +
 .../client/builder/ConstraintBuilder.java       |   115 +
 .../client/builder/DatabaseBuilder.java         |   122 +
 .../client/builder/FunctionBuilder.java         |   143 +
 .../GrantRevokePrivilegeRequestBuilder.java     |    63 +
 .../builder/HiveObjectPrivilegeBuilder.java     |    69 +
 .../client/builder/HiveObjectRefBuilder.java    |    69 +
 .../client/builder/ISchemaBuilder.java          |   102 +
 .../client/builder/PartitionBuilder.java        |   119 +
 .../builder/PrivilegeGrantInfoBuilder.java      |    84 +
 .../metastore/client/builder/RoleBuilder.java   |    55 +
 .../builder/SQLCheckConstraintBuilder.java      |    51 +
 .../builder/SQLDefaultConstraintBuilder.java    |    51 +
 .../client/builder/SQLForeignKeyBuilder.java    |   103 +
 .../builder/SQLNotNullConstraintBuilder.java    |    52 +
 .../client/builder/SQLPrimaryKeyBuilder.java    |    52 +
 .../builder/SQLUniqueConstraintBuilder.java     |    46 +
 .../client/builder/SchemaVersionBuilder.java    |   114 +
 .../client/builder/SerdeAndColsBuilder.java     |   124 +
 .../builder/StorageDescriptorBuilder.java       |   163 +
 .../metastore/client/builder/TableBuilder.java  |   224 +
 .../aggr/BinaryColumnStatsAggregator.java       |    61 +
 .../aggr/BooleanColumnStatsAggregator.java      |    62 +
 .../columnstats/aggr/ColumnStatsAggregator.java |    35 +
 .../aggr/ColumnStatsAggregatorFactory.java      |   113 +
 .../aggr/DateColumnStatsAggregator.java         |   360 +
 .../aggr/DecimalColumnStatsAggregator.java      |   375 +
 .../aggr/DoubleColumnStatsAggregator.java       |   348 +
 .../aggr/IExtrapolatePartStatus.java            |    47 +
 .../aggr/LongColumnStatsAggregator.java         |   348 +
 .../aggr/StringColumnStatsAggregator.java       |   304 +
 .../cache/DateColumnStatsDataInspector.java     |   124 +
 .../cache/DecimalColumnStatsDataInspector.java  |   124 +
 .../cache/DoubleColumnStatsDataInspector.java   |   124 +
 .../cache/LongColumnStatsDataInspector.java     |   124 +
 .../cache/StringColumnStatsDataInspector.java   |   125 +
 .../merge/BinaryColumnStatsMerger.java          |    35 +
 .../merge/BooleanColumnStatsMerger.java         |    35 +
 .../columnstats/merge/ColumnStatsMerger.java    |    31 +
 .../merge/ColumnStatsMergerFactory.java         |   120 +
 .../merge/DateColumnStatsMerger.java            |    59 +
 .../merge/DecimalColumnStatsMerger.java         |    85 +
 .../merge/DoubleColumnStatsMerger.java          |    54 +
 .../merge/LongColumnStatsMerger.java            |    54 +
 .../merge/StringColumnStatsMerger.java          |    54 +
 .../metastore/conf/ConfTemplatePrinter.java     |   150 +
 .../hive/metastore/conf/MetastoreConf.java      |  1688 +++
 .../hive/metastore/conf/TimeValidator.java      |    67 +
 .../datasource/BoneCPDataSourceProvider.java    |    87 +
 .../datasource/DataSourceProvider.java          |    79 +
 .../datasource/DataSourceProviderFactory.java   |    66 +
 .../datasource/DbCPDataSourceProvider.java      |   117 +
 .../datasource/HikariCPDataSourceProvider.java  |    89 +
 .../hive/metastore/datasource/package-info.java |    23 +
 .../hive/metastore/events/AbortTxnEvent.java    |    51 +
 .../hive/metastore/events/AcidWriteEvent.java   |    91 +
 .../metastore/events/AddForeignKeyEvent.java    |    41 +
 .../events/AddNotNullConstraintEvent.java       |    42 +
 .../metastore/events/AddPartitionEvent.java     |    84 +
 .../metastore/events/AddPrimaryKeyEvent.java    |    42 +
 .../metastore/events/AddSchemaVersionEvent.java |    40 +
 .../events/AddUniqueConstraintEvent.java        |    42 +
 .../metastore/events/AllocWriteIdEvent.java     |    57 +
 .../metastore/events/AlterCatalogEvent.java     |    44 +
 .../metastore/events/AlterDatabaseEvent.java    |    56 +
 .../metastore/events/AlterISchemaEvent.java     |    45 +
 .../metastore/events/AlterPartitionEvent.java   |    75 +
 .../events/AlterSchemaVersionEvent.java         |    46 +
 .../hive/metastore/events/AlterTableEvent.java  |    63 +
 .../hive/metastore/events/CommitTxnEvent.java   |    51 +
 .../metastore/events/ConfigChangeEvent.java     |    52 +
 .../metastore/events/CreateCatalogEvent.java    |    39 +
 .../metastore/events/CreateDatabaseEvent.java   |    43 +
 .../metastore/events/CreateFunctionEvent.java   |    43 +
 .../metastore/events/CreateISchemaEvent.java    |    39 +
 .../hive/metastore/events/CreateTableEvent.java |    43 +
 .../hive/metastore/events/DropCatalogEvent.java |    39 +
 .../metastore/events/DropConstraintEvent.java   |    57 +
 .../metastore/events/DropDatabaseEvent.java     |    43 +
 .../metastore/events/DropFunctionEvent.java     |    43 +
 .../hive/metastore/events/DropISchemaEvent.java |    39 +
 .../metastore/events/DropPartitionEvent.java    |    70 +
 .../events/DropSchemaVersionEvent.java          |    40 +
 .../hive/metastore/events/DropTableEvent.java   |    54 +
 .../hive/metastore/events/EventCleanerTask.java |    66 +
 .../hive/metastore/events/InsertEvent.java      |   132 +
 .../hive/metastore/events/ListenerEvent.java    |   187 +
 .../events/LoadPartitionDoneEvent.java          |    57 +
 .../hive/metastore/events/OpenTxnEvent.java     |    51 +
 .../metastore/events/PreAddPartitionEvent.java  |    79 +
 .../events/PreAddSchemaVersionEvent.java        |    39 +
 .../metastore/events/PreAlterCatalogEvent.java  |    40 +
 .../metastore/events/PreAlterDatabaseEvent.java |    47 +
 .../metastore/events/PreAlterISchemaEvent.java  |    44 +
 .../events/PreAlterPartitionEvent.java          |    65 +
 .../events/PreAlterSchemaVersionEvent.java      |    45 +
 .../metastore/events/PreAlterTableEvent.java    |    53 +
 .../events/PreAuthorizationCallEvent.java       |    33 +
 .../metastore/events/PreCreateCatalogEvent.java |    39 +
 .../events/PreCreateDatabaseEvent.java          |    43 +
 .../metastore/events/PreCreateISchemaEvent.java |    39 +
 .../metastore/events/PreCreateTableEvent.java   |    43 +
 .../metastore/events/PreDropCatalogEvent.java   |    39 +
 .../metastore/events/PreDropDatabaseEvent.java  |    43 +
 .../metastore/events/PreDropISchemaEvent.java   |    39 +
 .../metastore/events/PreDropPartitionEvent.java |    67 +
 .../events/PreDropSchemaVersionEvent.java       |    39 +
 .../metastore/events/PreDropTableEvent.java     |    55 +
 .../hive/metastore/events/PreEventContext.java  |    82 +
 .../events/PreLoadPartitionDoneEvent.java       |    64 +
 .../metastore/events/PreReadCatalogEvent.java   |    39 +
 .../metastore/events/PreReadDatabaseEvent.java  |    46 +
 .../metastore/events/PreReadISchemaEvent.java   |    39 +
 .../metastore/events/PreReadTableEvent.java     |    47 +
 .../events/PreReadhSchemaVersionEvent.java      |    36 +
 .../metastore/hooks/JDOConnectionURLHook.java   |    52 +
 .../metastore/messaging/AbortTxnMessage.java    |    36 +
 .../metastore/messaging/AcidWriteMessage.java   |    50 +
 .../messaging/AddForeignKeyMessage.java         |    36 +
 .../messaging/AddNotNullConstraintMessage.java  |    36 +
 .../messaging/AddPartitionMessage.java          |    68 +
 .../messaging/AddPrimaryKeyMessage.java         |    35 +
 .../messaging/AddUniqueConstraintMessage.java   |    36 +
 .../messaging/AllocWriteIdMessage.java          |    36 +
 .../messaging/AlterCatalogMessage.java          |    29 +
 .../messaging/AlterDatabaseMessage.java         |    36 +
 .../messaging/AlterPartitionMessage.java        |    69 +
 .../metastore/messaging/AlterTableMessage.java  |    58 +
 .../metastore/messaging/CommitTxnMessage.java   |    59 +
 .../messaging/CreateCatalogMessage.java         |    25 +
 .../messaging/CreateDatabaseMessage.java        |    31 +
 .../messaging/CreateFunctionMessage.java        |    46 +
 .../metastore/messaging/CreateTableMessage.java |    53 +
 .../metastore/messaging/DropCatalogMessage.java |    25 +
 .../messaging/DropConstraintMessage.java        |    29 +
 .../messaging/DropDatabaseMessage.java          |    27 +
 .../messaging/DropFunctionMessage.java          |    38 +
 .../messaging/DropPartitionMessage.java         |    49 +
 .../metastore/messaging/DropTableMessage.java   |    46 +
 .../hive/metastore/messaging/EventMessage.java  |   127 +
 .../hive/metastore/messaging/EventUtils.java    |   202 +
 .../hive/metastore/messaging/InsertMessage.java |    75 +
 .../messaging/MessageDeserializer.java          |   200 +
 .../metastore/messaging/MessageFactory.java     |   341 +
 .../metastore/messaging/OpenTxnMessage.java     |    38 +
 .../metastore/messaging/PartitionFiles.java     |    53 +
 .../messaging/event/filters/AndFilter.java      |    39 +
 .../messaging/event/filters/BasicFilter.java    |    33 +
 .../event/filters/DatabaseAndTableFilter.java   |    65 +
 .../event/filters/EventBoundaryFilter.java      |    34 +
 .../event/filters/MessageFormatFilter.java      |    36 +
 .../messaging/json/JSONAbortTxnMessage.java     |    88 +
 .../messaging/json/JSONAcidWriteMessage.java    |   150 +
 .../json/JSONAddForeignKeyMessage.java          |   102 +
 .../json/JSONAddNotNullConstraintMessage.java   |    97 +
 .../messaging/json/JSONAddPartitionMessage.java |   175 +
 .../json/JSONAddPrimaryKeyMessage.java          |   102 +
 .../json/JSONAddUniqueConstraintMessage.java    |    99 +
 .../messaging/json/JSONAllocWriteIdMessage.java |   113 +
 .../messaging/json/JSONAlterCatalogMessage.java |    90 +
 .../json/JSONAlterDatabaseMessage.java          |    97 +
 .../json/JSONAlterPartitionMessage.java         |   153 +
 .../messaging/json/JSONAlterTableMessage.java   |   128 +
 .../messaging/json/JSONCommitTxnMessage.java    |   183 +
 .../json/JSONCreateCatalogMessage.java          |    80 +
 .../json/JSONCreateDatabaseMessage.java         |    85 +
 .../json/JSONCreateFunctionMessage.java         |    87 +
 .../messaging/json/JSONCreateTableMessage.java  |   134 +
 .../messaging/json/JSONDropCatalogMessage.java  |    67 +
 .../json/JSONDropConstraintMessage.java         |    91 +
 .../messaging/json/JSONDropDatabaseMessage.java |    72 +
 .../messaging/json/JSONDropFunctionMessage.java |    79 +
 .../json/JSONDropPartitionMessage.java          |   135 +
 .../messaging/json/JSONDropTableMessage.java    |   121 +
 .../messaging/json/JSONInsertMessage.java       |   148 +
 .../messaging/json/JSONMessageDeserializer.java |   273 +
 .../messaging/json/JSONMessageFactory.java      |   402 +
 .../messaging/json/JSONOpenTxnMessage.java      |   106 +
 .../hive/metastore/metrics/JsonReporter.java    |   223 +
 .../hive/metastore/metrics/JvmPauseMonitor.java |   222 +
 .../hadoop/hive/metastore/metrics/Metrics.java  |   244 +
 .../metastore/metrics/MetricsConstants.java     |    46 +
 .../hive/metastore/metrics/PerfLogger.java      |   194 +
 .../hadoop/hive/metastore/model/MCatalog.java   |    58 +
 .../hive/metastore/model/MColumnDescriptor.java |    51 +
 .../hive/metastore/model/MConstraint.java       |   214 +
 .../hive/metastore/model/MCreationMetadata.java |    97 +
 .../hive/metastore/model/MDBPrivilege.java      |   142 +
 .../hadoop/hive/metastore/model/MDatabase.java  |   157 +
 .../hive/metastore/model/MDelegationToken.java  |    45 +
 .../hive/metastore/model/MFieldSchema.java      |    80 +
 .../hadoop/hive/metastore/model/MFunction.java  |   119 +
 .../hive/metastore/model/MGlobalPrivilege.java  |   130 +
 .../hadoop/hive/metastore/model/MISchema.java   |   107 +
 .../hadoop/hive/metastore/model/MIndex.java     |   200 +
 .../hadoop/hive/metastore/model/MMasterKey.java |    55 +
 .../metastore/model/MMetastoreDBProperties.java |    56 +
 .../hive/metastore/model/MNotificationLog.java  |   108 +
 .../metastore/model/MNotificationNextId.java    |    42 +
 .../hadoop/hive/metastore/model/MOrder.java     |    62 +
 .../hadoop/hive/metastore/model/MPartition.java |   162 +
 .../model/MPartitionColumnPrivilege.java        |   171 +
 .../model/MPartitionColumnStatistics.java       |   281 +
 .../hive/metastore/model/MPartitionEvent.java   |    97 +
 .../metastore/model/MPartitionPrivilege.java    |   149 +
 .../hive/metastore/model/MPrincipalDesc.java    |    59 +
 .../hive/metastore/model/MResourceUri.java      |    49 +
 .../hadoop/hive/metastore/model/MRole.java      |    80 +
 .../hadoop/hive/metastore/model/MRoleMap.java   |   120 +
 .../hive/metastore/model/MRuntimeStat.java      |    59 +
 .../hive/metastore/model/MSchemaVersion.java    |   127 +
 .../hadoop/hive/metastore/model/MSerDeInfo.java |   127 +
 .../metastore/model/MStorageDescriptor.java     |   277 +
 .../hive/metastore/model/MStringList.java       |    62 +
 .../hadoop/hive/metastore/model/MTable.java     |   283 +
 .../metastore/model/MTableColumnPrivilege.java  |   170 +
 .../metastore/model/MTableColumnStatistics.java |   272 +
 .../hive/metastore/model/MTablePrivilege.java   |   149 +
 .../model/MTxnWriteNotificationLog.java         |   123 +
 .../hadoop/hive/metastore/model/MType.java      |   105 +
 .../hive/metastore/model/MVersionTable.java     |    57 +
 .../hadoop/hive/metastore/model/MWMMapping.java |    83 +
 .../hadoop/hive/metastore/model/MWMPool.java    |    89 +
 .../hive/metastore/model/MWMResourcePlan.java   |   105 +
 .../hadoop/hive/metastore/model/MWMTrigger.java |    89 +
 .../hive/metastore/parser/ExpressionTree.java   |   606 +
 .../hadoop/hive/metastore/parser/Filter.g       |   486 +
 .../hive/metastore/parser/package-info.java     |    23 +
 .../spec/CompositePartitionSpecProxy.java       |   258 +
 .../spec/PartitionListComposingSpecProxy.java   |   209 +
 .../partition/spec/PartitionSpecProxy.java      |   220 +
 .../spec/PartitionSpecWithSharedSDProxy.java    |   192 +
 .../hive/metastore/security/DBTokenStore.java   |   180 +
 .../security/DelegationTokenIdentifier.java     |    52 +
 .../security/DelegationTokenSecretManager.java  |   134 +
 .../security/DelegationTokenSelector.java       |    33 +
 .../security/DelegationTokenStore.java          |   116 +
 .../metastore/security/DelegationTokenTool.java |   252 +
 .../security/HadoopThriftAuthBridge.java        |   700 +
 .../security/HadoopThriftAuthBridge23.java      |   114 +
 .../metastore/security/MemoryTokenStore.java    |   118 +
 .../MetastoreDelegationTokenManager.java        |   180 +
 .../security/TUGIContainingTransport.java       |    96 +
 .../TokenStoreDelegationTokenSecretManager.java |   334 +
 .../metastore/security/ZooKeeperTokenStore.java |   474 +
 .../hive/metastore/tools/HiveMetaTool.java      |   490 +
 .../hive/metastore/tools/HiveSchemaHelper.java  |   673 +
 .../metastore/tools/MetastoreSchemaTool.java    |   460 +
 .../hive/metastore/tools/SQLGenerator.java      |   187 +
 .../metastore/tools/SchemaToolCommandLine.java  |   308 +
 .../hive/metastore/tools/SchemaToolTask.java    |    32 +
 .../tools/SchemaToolTaskAlterCatalog.java       |    90 +
 .../tools/SchemaToolTaskCreateCatalog.java      |   132 +
 .../tools/SchemaToolTaskCreateUser.java         |   115 +
 .../metastore/tools/SchemaToolTaskInfo.java     |    43 +
 .../metastore/tools/SchemaToolTaskInit.java     |    73 +
 .../tools/SchemaToolTaskMoveDatabase.java       |    96 +
 .../tools/SchemaToolTaskMoveTable.java          |   142 +
 .../metastore/tools/SchemaToolTaskUpgrade.java  |   116 +
 .../metastore/tools/SchemaToolTaskValidate.java |   630 +
 .../hadoop/hive/metastore/tools/SmokeTest.java  |   102 +
 .../txn/AcidCompactionHistoryService.java       |    71 +
 .../metastore/txn/AcidHouseKeeperService.java   |    71 +
 .../txn/AcidOpenTxnsCounterService.java         |    72 +
 .../hive/metastore/txn/AcidWriteSetService.java |    69 +
 .../hive/metastore/txn/CompactionInfo.java      |   170 +
 .../metastore/txn/CompactionTxnHandler.java     |  1158 ++
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |   621 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  5094 +++++++
 .../hadoop/hive/metastore/txn/TxnStore.java     |   509 +
 .../hadoop/hive/metastore/txn/TxnUtils.java     |   481 +
 .../hive/metastore/utils/CommonCliOptions.java  |   160 +
 .../hadoop/hive/metastore/utils/FileUtils.java  |   537 +
 .../hadoop/hive/metastore/utils/HdfsUtils.java  |   395 +
 .../metastore/utils/HiveStrictManagedUtils.java |   100 +
 .../hadoop/hive/metastore/utils/LogUtils.java   |   140 +
 .../hive/metastore/utils/MetaStoreUtils.java    |  1840 +++
 .../metastore/utils/MetastoreVersionInfo.java   |   133 +
 .../hive/metastore/utils/SecurityUtils.java     |   313 +
 .../hive/metastore/utils/StringableMap.java     |    80 +
 .../main/resources/datanucleus-log4j.properties |    17 +
 .../main/resources/metastore-log4j2.properties  |    71 +
 .../src/main/resources/metastore-site.xml       |    34 +
 .../src/main/resources/package.jdo              |  1426 ++
 .../src/main/resources/saveVersion.sh           |    91 +
 .../metastore-server/src/main/scripts/base      |   237 +
 .../src/main/scripts/ext/metastore.sh           |    41 +
 .../src/main/scripts/ext/schemaTool.sh          |    33 +
 .../src/main/scripts/ext/smokeTest.sh           |    33 +
 .../src/main/scripts/metastore-config.sh        |    69 +
 .../src/main/scripts/schematool                 |    21 +
 .../src/main/scripts/start-metastore            |    22 +
 .../main/sql/derby/hive-schema-1.2.0.derby.sql  |   405 +
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |   692 +
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |   720 +
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |   720 +
 .../sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql  |    62 +
 .../sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql  |    22 +
 .../sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql  |    59 +
 .../sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql  |     5 +
 .../sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql  |   283 +
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |    68 +
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |     8 +
 .../src/main/sql/derby/upgrade.order.derby      |    18 +
 .../src/main/sql/mssql/create-user.mssql.sql    |     5 +
 .../main/sql/mssql/hive-schema-1.2.0.mssql.sql  |   947 ++
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  |  1246 ++
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |  1284 ++
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |  1284 ++
 .../sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql  |    73 +
 .../sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |    39 +
 .../sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql  |    43 +
 .../sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql  |     7 +
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql  |   352 +
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |    70 +
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |    10 +
 .../src/main/sql/mssql/upgrade.order.mssql      |    12 +
 .../src/main/sql/mysql/create-user.mysql.sql    |     8 +
 .../main/sql/mysql/hive-schema-1.2.0.mysql.sql  |   910 ++
 .../main/sql/mysql/hive-schema-3.0.0.mysql.sql  |  1183 ++
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |  1218 ++
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |  1220 ++
 .../sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql  |    75 +
 .../sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |    42 +
 .../sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql  |    43 +
 .../sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql  |     8 +
 .../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql  |   326 +
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |    71 +
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |    10 +
 .../src/main/sql/mysql/upgrade.order.mysql      |    18 +
 .../src/main/sql/oracle/create-user.oracle.sql  |     3 +
 .../sql/oracle/hive-schema-1.2.0.oracle.sql     |   856 ++
 .../sql/oracle/hive-schema-3.0.0.oracle.sql     |  1140 ++
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |  1175 ++
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |  1177 ++
 .../oracle/upgrade-1.2.0-to-2.0.0.oracle.sql    |    83 +
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql    |    39 +
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |    58 +
 .../oracle/upgrade-2.2.0-to-2.3.0.oracle.sql    |     7 +
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |   343 +
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |    70 +
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql    |     9 +
 .../src/main/sql/oracle/upgrade.order.oracle    |    14 +
 .../main/sql/postgres/create-user.postgres.sql  |     2 +
 .../sql/postgres/hive-schema-1.2.0.postgres.sql |  1562 ++
 .../sql/postgres/hive-schema-3.0.0.postgres.sql |  1827 +++
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |  1866 +++
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |  1868 +++
 .../upgrade-1.2.0-to-2.0.0.postgres.sql         |    73 +
 .../upgrade-2.0.0-to-2.1.0.postgres.sql         |    40 +
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |    39 +
 .../upgrade-2.2.0-to-2.3.0.postgres.sql         |     8 +
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |   360 +
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |    72 +
 .../upgrade-3.1.0-to-4.0.0.postgres.sql         |    10 +
 .../main/sql/postgres/upgrade.order.postgres    |    18 +
 .../hadoop/hive/common/TestStatsSetupConst.java |   114 +
 .../ndv/fm/TestFMSketchSerialization.java       |   101 +
 .../hive/common/ndv/hll/TestHLLNoBias.java      |   117 +
 .../common/ndv/hll/TestHLLSerialization.java    |   270 +
 .../hive/common/ndv/hll/TestHyperLogLog.java    |   338 +
 .../common/ndv/hll/TestHyperLogLogDense.java    |    85 +
 .../common/ndv/hll/TestHyperLogLogMerge.java    |   147 +
 .../common/ndv/hll/TestHyperLogLogSparse.java   |    84 +
 .../common/ndv/hll/TestSparseEncodeHash.java    |    59 +
 .../metastore/AlternateFailurePreListener.java  |    62 +
 .../metastore/DummyEndFunctionListener.java     |    47 +
 .../metastore/DummyJdoConnectionUrlHook.java    |    45 +
 .../hadoop/hive/metastore/DummyListener.java    |   126 +
 .../metastore/DummyMetaStoreInitListener.java   |    39 +
 .../hadoop/hive/metastore/DummyPreListener.java |    49 +
 .../DummyRawStoreControlledCommit.java          |  1268 ++
 .../DummyRawStoreForJdoConnection.java          |  1249 ++
 .../apache/hadoop/hive/metastore/FakeDerby.java |   404 +
 .../HiveMetaStoreClientPreCatalog.java          |  3546 +++++
 .../InjectableBehaviourObjectStore.java         |   218 +
 .../hive/metastore/IpAddressListener.java       |   102 +
 .../hive/metastore/MetaStoreTestUtils.java      |   291 +
 .../MockPartitionExpressionForMetastore.java    |    58 +
 .../hive/metastore/NonCatCallsWithCatalog.java  |  1158 ++
 .../hadoop/hive/metastore/TestAdminUser.java    |    49 +
 .../hive/metastore/TestAggregateStatsCache.java |   272 +
 .../metastore/TestCatalogNonDefaultClient.java  |    74 +
 .../metastore/TestCatalogNonDefaultSvr.java     |    68 +
 .../hive/metastore/TestCatalogOldClient.java    |    44 +
 .../hadoop/hive/metastore/TestDeadline.java     |   130 +
 .../metastore/TestEmbeddedHiveMetaStore.java    |    51 +
 .../hadoop/hive/metastore/TestFilterHooks.java  |   254 +
 .../hive/metastore/TestHiveAlterHandler.java    |   121 +
 .../hive/metastore/TestHiveMetaStore.java       |  3102 ++++
 .../metastore/TestHiveMetaStoreGetMetaConf.java |   115 +
 .../TestHiveMetaStorePartitionSpecs.java        |   383 +
 .../TestHiveMetaStoreSchemaMethods.java         |  1248 ++
 .../metastore/TestHiveMetaStoreTimeout.java     |   142 +
 .../hive/metastore/TestHiveMetaStoreTxns.java   |   267 +
 ...TestHiveMetaStoreWithEnvironmentContext.java |   191 +
 .../hive/metastore/TestHiveMetastoreCli.java    |    68 +
 .../hive/metastore/TestLockRequestBuilder.java  |   587 +
 .../hive/metastore/TestMarkPartition.java       |   118 +
 .../hive/metastore/TestMarkPartitionRemote.java |    34 +
 .../TestMetaStoreConnectionUrlHook.java         |    49 +
 .../TestMetaStoreEndFunctionListener.java       |   146 +
 .../metastore/TestMetaStoreEventListener.java   |   472 +
 .../TestMetaStoreEventListenerOnlyOnCommit.java |   121 +
 .../TestMetaStoreEventListenerWithOldConf.java  |   129 +
 .../metastore/TestMetaStoreInitListener.java    |    56 +
 .../metastore/TestMetaStoreListenersError.java  |    97 +
 .../metastore/TestMetaStoreSchemaFactory.java   |    72 +
 .../hive/metastore/TestMetaStoreSchemaInfo.java |    55 +
 .../hadoop/hive/metastore/TestObjectStore.java  |   904 ++
 .../metastore/TestObjectStoreInitRetry.java     |   135 +
 .../metastore/TestObjectStoreSchemaMethods.java |   602 +
 .../hadoop/hive/metastore/TestOldSchema.java    |   233 +
 .../TestPartitionNameWhitelistValidation.java   |   125 +
 .../hive/metastore/TestRawStoreProxy.java       |    67 +
 .../hive/metastore/TestRemoteHiveMetaStore.java |    64 +
 .../TestRemoteHiveMetaStoreIpAddress.java       |    66 +
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |    31 +
 .../TestRetriesInRetryingHMSHandler.java        |   111 +
 .../hive/metastore/TestRetryingHMSHandler.java  |    82 +
 .../metastore/TestSetUGIOnBothClientServer.java |    34 +
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |    35 +
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |    35 +
 .../apache/hadoop/hive/metastore/TestStats.java |   732 +
 .../hive/metastore/TestTableIterable.java       |    76 +
 .../hive/metastore/VerifyingObjectStore.java    |   219 +
 .../annotation/MetastoreCheckinTest.java        |    25 +
 .../metastore/annotation/MetastoreTest.java     |    24 +
 .../metastore/annotation/MetastoreUnitTest.java |    25 +
 .../hive/metastore/cache/TestCachedStore.java   |  1075 ++
 .../metastore/cache/TestCatalogCaching.java     |   142 +
 .../metastore/client/MetaStoreClientTest.java   |    95 +
 .../client/MetaStoreFactoryForTests.java        |   112 +
 .../metastore/client/TestAddPartitions.java     |  1736 +++
 .../client/TestAddPartitionsFromPartSpec.java   |  1267 ++
 .../metastore/client/TestAlterPartitions.java   |  1154 ++
 .../metastore/client/TestAppendPartitions.java  |   600 +
 .../hive/metastore/client/TestCatalogs.java     |   267 +
 .../metastore/client/TestCheckConstraint.java   |   363 +
 .../hive/metastore/client/TestDatabases.java    |   634 +
 .../metastore/client/TestDefaultConstraint.java |   363 +
 .../metastore/client/TestDropPartitions.java    |   659 +
 .../client/TestExchangePartitions.java          |  1337 ++
 .../hive/metastore/client/TestForeignKey.java   |   538 +
 .../hive/metastore/client/TestFunctions.java    |   765 +
 .../metastore/client/TestGetPartitions.java     |   608 +
 .../hive/metastore/client/TestGetTableMeta.java |   330 +
 .../metastore/client/TestListPartitions.java    |  1522 ++
 .../metastore/client/TestNotNullConstraint.java |   355 +
 .../hive/metastore/client/TestPrimaryKey.java   |   468 +
 .../hive/metastore/client/TestRuntimeStats.java |   154 +
 .../TestTablesCreateDropAlterTruncate.java      |  1400 ++
 .../metastore/client/TestTablesGetExists.java   |   514 +
 .../hive/metastore/client/TestTablesList.java   |   320 +
 .../metastore/client/TestUniqueConstraint.java  |   356 +
 .../hive/metastore/client/package-info.java     |    22 +
 .../merge/DecimalColumnStatsMergerTest.java     |   235 +
 .../hive/metastore/conf/TestMetastoreConf.java  |   433 +
 .../TestDataSourceProviderFactory.java          |   248 +
 .../hive/metastore/dbinstall/DbInstallBase.java |   265 +
 .../hive/metastore/dbinstall/ITestMysql.java    |    82 +
 .../hive/metastore/dbinstall/ITestOracle.java   |    83 +
 .../hive/metastore/dbinstall/ITestPostgres.java |    82 +
 .../metastore/dbinstall/ITestSqlServer.java     |    84 +
 .../json/TestJSONMessageDeserializer.java       |   115 +
 .../hive/metastore/metrics/TestMetrics.java     |   164 +
 .../minihms/AbstractMetaStoreService.java       |   173 +
 .../minihms/ClusterMetaStoreForTests.java       |    32 +
 .../minihms/EmbeddedMetaStoreForTests.java      |    33 +
 .../hadoop/hive/metastore/minihms/MiniHMS.java  |    76 +
 .../minihms/RemoteMetaStoreForTests.java        |    43 +
 .../tools/TestMetastoreSchemaTool.java          |    70 +
 .../tools/TestSchemaToolForMetastore.java       |   534 +
 .../metastore/txn/TestTxnHandlerNegative.java   |    58 +
 .../hadoop/hive/metastore/txn/TestTxnUtils.java |   239 +
 .../hive/metastore/utils/TestHdfsUtils.java     |   348 +
 .../metastore/utils/TestMetaStoreUtils.java     |   291 +
 .../src/test/resources/log4j2.properties        |    35 +
 standalone-metastore/pom.xml                    |     1 +
 upgrade-acid/pom.xml                            |     2 +-
 1427 files changed, 180054 insertions(+), 172313 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/test/results/clientpositive/acid_nullscan.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
----------------------------------------------------------------------


[19/54] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - more tests (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index b7fe6ba..923fe2c 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -22793,6 +22793,212 @@ class AlterPartitionsResponse:
   def __ne__(self, other):
     return not (self == other)
 
+class RenamePartitionRequest:
+  """
+  Attributes:
+   - catName
+   - dbName
+   - tableName
+   - partVals
+   - newPart
+   - txnId
+   - validWriteIdList
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'catName', None, None, ), # 1
+    (2, TType.STRING, 'dbName', None, None, ), # 2
+    (3, TType.STRING, 'tableName', None, None, ), # 3
+    (4, TType.LIST, 'partVals', (TType.STRING,None), None, ), # 4
+    (5, TType.STRUCT, 'newPart', (Partition, Partition.thrift_spec), None, ), # 5
+    (6, TType.I64, 'txnId', None, -1, ), # 6
+    (7, TType.STRING, 'validWriteIdList', None, None, ), # 7
+  )
+
+  def __init__(self, catName=None, dbName=None, tableName=None, partVals=None, newPart=None, txnId=thrift_spec[6][4], validWriteIdList=None,):
+    self.catName = catName
+    self.dbName = dbName
+    self.tableName = tableName
+    self.partVals = partVals
+    self.newPart = newPart
+    self.txnId = txnId
+    self.validWriteIdList = validWriteIdList
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.catName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.dbName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRING:
+          self.tableName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.LIST:
+          self.partVals = []
+          (_etype847, _size844) = iprot.readListBegin()
+          for _i848 in xrange(_size844):
+            _elem849 = iprot.readString()
+            self.partVals.append(_elem849)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.STRUCT:
+          self.newPart = Partition()
+          self.newPart.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.I64:
+          self.txnId = iprot.readI64()
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.STRING:
+          self.validWriteIdList = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('RenamePartitionRequest')
+    if self.catName is not None:
+      oprot.writeFieldBegin('catName', TType.STRING, 1)
+      oprot.writeString(self.catName)
+      oprot.writeFieldEnd()
+    if self.dbName is not None:
+      oprot.writeFieldBegin('dbName', TType.STRING, 2)
+      oprot.writeString(self.dbName)
+      oprot.writeFieldEnd()
+    if self.tableName is not None:
+      oprot.writeFieldBegin('tableName', TType.STRING, 3)
+      oprot.writeString(self.tableName)
+      oprot.writeFieldEnd()
+    if self.partVals is not None:
+      oprot.writeFieldBegin('partVals', TType.LIST, 4)
+      oprot.writeListBegin(TType.STRING, len(self.partVals))
+      for iter850 in self.partVals:
+        oprot.writeString(iter850)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.newPart is not None:
+      oprot.writeFieldBegin('newPart', TType.STRUCT, 5)
+      self.newPart.write(oprot)
+      oprot.writeFieldEnd()
+    if self.txnId is not None:
+      oprot.writeFieldBegin('txnId', TType.I64, 6)
+      oprot.writeI64(self.txnId)
+      oprot.writeFieldEnd()
+    if self.validWriteIdList is not None:
+      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7)
+      oprot.writeString(self.validWriteIdList)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.dbName is None:
+      raise TProtocol.TProtocolException(message='Required field dbName is unset!')
+    if self.tableName is None:
+      raise TProtocol.TProtocolException(message='Required field tableName is unset!')
+    if self.partVals is None:
+      raise TProtocol.TProtocolException(message='Required field partVals is unset!')
+    if self.newPart is None:
+      raise TProtocol.TProtocolException(message='Required field newPart is unset!')
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.catName)
+    value = (value * 31) ^ hash(self.dbName)
+    value = (value * 31) ^ hash(self.tableName)
+    value = (value * 31) ^ hash(self.partVals)
+    value = (value * 31) ^ hash(self.newPart)
+    value = (value * 31) ^ hash(self.txnId)
+    value = (value * 31) ^ hash(self.validWriteIdList)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class RenamePartitionResponse:
+
+  thrift_spec = (
+  )
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('RenamePartitionResponse')
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class AlterTableRequest:
   """
   Attributes:

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 345cf31..2e2392d 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -5155,6 +5155,53 @@ class AlterPartitionsResponse
   ::Thrift::Struct.generate_accessors self
 end
 
+class RenamePartitionRequest
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  CATNAME = 1
+  DBNAME = 2
+  TABLENAME = 3
+  PARTVALS = 4
+  NEWPART = 5
+  TXNID = 6
+  VALIDWRITEIDLIST = 7
+
+  FIELDS = {
+    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+    TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
+    PARTVALS => {:type => ::Thrift::Types::LIST, :name => 'partVals', :element => {:type => ::Thrift::Types::STRING}},
+    NEWPART => {:type => ::Thrift::Types::STRUCT, :name => 'newPart', :class => ::Partition},
+    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partVals is unset!') unless @partVals
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field newPart is unset!') unless @newPart
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class RenamePartitionResponse
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+
+  FIELDS = {
+
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
 class AlterTableRequest
   include ::Thrift::Struct, ::Thrift::Struct_Union
   CATNAME = 1

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index e7cfc62..92424a4 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -1514,6 +1514,23 @@ module ThriftHiveMetastore
       return
     end
 
+    def rename_partition_req(req)
+      send_rename_partition_req(req)
+      return recv_rename_partition_req()
+    end
+
+    def send_rename_partition_req(req)
+      send_message('rename_partition_req', Rename_partition_req_args, :req => req)
+    end
+
+    def recv_rename_partition_req()
+      result = receive_message(Rename_partition_req_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'rename_partition_req failed: unknown result')
+    end
+
     def partition_name_has_valid_characters(part_vals, throw_exception)
       send_partition_name_has_valid_characters(part_vals, throw_exception)
       return recv_partition_name_has_valid_characters()
@@ -4755,6 +4772,19 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'rename_partition', seqid)
     end
 
+    def process_rename_partition_req(seqid, iprot, oprot)
+      args = read_args(iprot, Rename_partition_req_args)
+      result = Rename_partition_req_result.new()
+      begin
+        result.success = @handler.rename_partition_req(args.req)
+      rescue ::InvalidOperationException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'rename_partition_req', seqid)
+    end
+
     def process_partition_name_has_valid_characters(seqid, iprot, oprot)
       args = read_args(iprot, Partition_name_has_valid_characters_args)
       result = Partition_name_has_valid_characters_result.new()
@@ -9677,6 +9707,42 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Rename_partition_req_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    REQ = 1
+
+    FIELDS = {
+      REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::RenamePartitionRequest}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Rename_partition_req_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::RenamePartitionResponse},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::InvalidOperationException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Partition_name_has_valid_characters_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     PART_VALS = 1

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index e8226f8..719f001 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -547,6 +547,7 @@ public class HiveAlterHandler implements AlterHandler {
       // 2) partition column stats if there are any because of part_name field in HMS table PART_COL_STATS
       // 3) rename the partition directory if it is not an external table
       if (!tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
+        // TODO: refactor this into a separate method after master merge, this one is too big.
         try {
           db = msdb.getDatabase(catName, dbname);
 
@@ -620,8 +621,6 @@ public class HiveAlterHandler implements AlterHandler {
       if (cs != null) {
         cs.getStatsDesc().setPartName(newPartName);
         try {
-          // Verifying ACID state again is not strictly needed here (alterPartition above does it),
-          // but we are going to use the uniform approach for simplicity.
           msdb.updatePartitionColumnStatistics(cs, new_part.getValues(),
               txnId, validWriteIds, new_part.getWriteId());
         } catch (InvalidInputException iie) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index ab25593..285f7fb 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -4809,6 +4809,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           request.getPartitionOrder(), request.getMaxParts());
     }
 
+    @Deprecated
     @Override
     public void alter_partition(final String db_name, final String tbl_name,
         final Partition new_part)
@@ -4816,6 +4817,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       rename_partition(db_name, tbl_name, null, new_part);
     }
 
+    @Deprecated
     @Override
     public void alter_partition_with_environment_context(final String dbName,
         final String tableName, final Partition newPartition,
@@ -4824,9 +4826,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       String[] parsedDbName = parseDbName(dbName, conf);
       // TODO: this method name is confusing, it actually does full alter (sortof)
       rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null, newPartition,
-          envContext);
+          envContext, -1, null);
     }
 
+    @Deprecated
     @Override
     public void rename_partition(final String db_name, final String tbl_name,
         final List<String> part_vals, final Partition new_part)
@@ -4834,13 +4837,19 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       // Call rename_partition without an environment context.
       String[] parsedDbName = parseDbName(db_name, conf);
       rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, new_part,
-          null);
+          null, -1, null);
     }
 
-    private void rename_partition(final String catName, final String db_name, final String tbl_name,
-        final List<String> part_vals, final Partition new_part,
-        final EnvironmentContext envContext)
-        throws TException {
+    public RenamePartitionResponse rename_partition_req(
+        RenamePartitionRequest req) throws InvalidOperationException ,MetaException ,TException {
+      rename_partition(req.getCatName(), req.getDbName(), req.getTableName(), req.getPartVals(),
+          req.getNewPart(), null, req.getTxnId(), req.getValidWriteIdList());
+      return new RenamePartitionResponse();
+    };
+
+    private void rename_partition(String catName, String db_name, String tbl_name,
+        List<String> part_vals, Partition new_part, EnvironmentContext envContext, long txnId,
+        String validWriteIds) throws TException {
       startTableFunction("alter_partition", catName, db_name, tbl_name);
 
       if (LOG.isInfoEnabled()) {
@@ -4874,7 +4883,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         oldPart = alterHandler.alterPartition(getMS(), wh, catName, db_name, tbl_name,
-            part_vals, new_part, envContext, this, -1, null);
+            part_vals, new_part, envContext, this, txnId, validWriteIds);
 
         // Only fetch the table if we actually have a listener
         Table table = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 1fa719d..70edb96 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -434,17 +434,21 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     client.alter_table_req(req);
   }
 
+  @Deprecated
   @Override
   public void renamePartition(final String dbname, final String tableName, final List<String> part_vals,
                               final Partition newPart) throws TException {
-    renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart);
+    renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart, -1, null);
   }
 
   @Override
   public void renamePartition(String catName, String dbname, String tableName, List<String> part_vals,
-                              Partition newPart) throws TException {
-    client.rename_partition(prependCatalogToDbName(catName, dbname, conf), tableName, part_vals, newPart);
-
+                              Partition newPart, long txnId, String validWriteIds) throws TException {
+    RenamePartitionRequest req = new RenamePartitionRequest(dbname, tableName, part_vals, newPart);
+    req.setCatName(catName);
+    req.setTxnId(txnId);
+    req.setValidWriteIdList(validWriteIds);
+    client.rename_partition_req(req);
   }
 
   private void open() throws MetaException {

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 41fd91e..70be8d8 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -2217,6 +2217,7 @@ public interface IMetaStoreClient {
    * @throws TException
    *          if error in communicating with metastore server
    */
+  @Deprecated
   void renamePartition(final String dbname, final String tableName, final List<String> part_vals,
                        final Partition newPart)
       throws InvalidOperationException, MetaException, TException;
@@ -2240,7 +2241,7 @@ public interface IMetaStoreClient {
    *          if error in communicating with metastore server
    */
   void renamePartition(String catName, String dbname, String tableName, List<String> part_vals,
-                       Partition newPart)
+                       Partition newPart, long txnId, String validWriteIds)
       throws InvalidOperationException, MetaException, TException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 92205ae..6fcfbce 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -224,6 +224,19 @@ public final class TxnDbUtil {
       }
 
       try {
+        stmt.execute("CREATE TABLE \"APP\".\"DBS\" (\"DB_ID\" BIGINT NOT NULL, \"DESC\" " +
+            "VARCHAR(4000), \"DB_LOCATION_URI\" VARCHAR(4000) NOT NULL, \"NAME\" VARCHAR(128), " +
+            "\"OWNER_NAME\" VARCHAR(128), \"OWNER_TYPE\" VARCHAR(10), " +
+            "\"CTLG_NAME\" VARCHAR(256) NOT NULL, PRIMARY KEY (DB_ID))");
+      } catch (SQLException e) {
+        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+          LOG.info("TBLS table already exist, ignoring");
+        } else {
+          throw e;
+        }
+      }
+
+      try {
         stmt.execute("CREATE TABLE \"APP\".\"PARTITIONS\" (" +
             " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " +
             " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " +

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 633fddf..4cfa6a1 100644
--- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -1623,6 +1623,19 @@ struct AlterPartitionsRequest {
 struct AlterPartitionsResponse {
 }
 
+struct RenamePartitionRequest {
+  1: optional string catName,
+  2: required string dbName,
+  3: required string tableName,
+  4: required list<string> partVals,
+  5: required Partition newPart,
+  6: optional i64 txnId=-1,
+  7: optional string validWriteIdList
+}
+
+struct RenamePartitionResponse {
+}
+
 struct AlterTableRequest {
   1: optional string catName,
   2: required string dbName,
@@ -1990,6 +2003,9 @@ service ThriftHiveMetastore extends fb303.FacebookService
   // partition keys in new_part should be the same as those in old partition.
   void rename_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:Partition new_part)
                        throws (1:InvalidOperationException o1, 2:MetaException o2)
+  
+  RenamePartitionResponse rename_partition_req(1:RenamePartitionRequest req)
+                       throws (1:InvalidOperationException o1, 2:MetaException o2)
 
   // returns whether or not the partition name is valid based on the value of the config
   // hive.metastore.partition.name.whitelist.pattern

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 3709db5..bc04e06 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -3311,8 +3311,8 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
 
   @Override
   public void renamePartition(String catName, String dbname, String tableName,
-                              List<String> part_vals, Partition newPart) throws
-      InvalidOperationException, MetaException, TException {
+      List<String> part_vals, Partition newPart, long txnId, String validWriteIds)
+          throws InvalidOperationException, MetaException, TException {
     throw new UnsupportedOperationException();
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
index 2ec20c2..0aab253 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
@@ -1080,7 +1080,7 @@ public class TestAlterPartitions extends MetaStoreClientTest {
 
     Partition partToRename = oldParts.get(3);
     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
-    client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+    client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename, -1, null);
   }
 
   @Test(expected = InvalidOperationException.class)


[26/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
index 0000000,816a735..bf302ed
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
@@@ -1,0 -1,1385 +1,1400 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.client;
+ 
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.metastore.ColumnType;
+ import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+ import org.apache.thrift.TApplicationException;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.transport.TTransportException;
+ import org.junit.After;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.BeforeClass;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.junit.runner.RunWith;
+ import org.junit.runners.Parameterized;
+ 
+ import java.io.File;
+ import java.net.URI;
+ import java.net.URISyntaxException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ 
+ /**
+  * Test class for IMetaStoreClient API. Testing the Table related functions for metadata
+  * manipulation, like creating, dropping and altering tables.
+  */
+ @RunWith(Parameterized.class)
+ @Category(MetastoreCheckinTest.class)
+ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest {
+   private static final String DEFAULT_DATABASE = "default";
+   private static final String OTHER_DATABASE = "dummy";
+   private final AbstractMetaStoreService metaStore;
+   private IMetaStoreClient client;
+   private Table[] testTables = new Table[6];
+   private Table partitionedTable = null;
+   private Table externalTable = null;
+ 
+   public TestTablesCreateDropAlterTruncate(String name, AbstractMetaStoreService metaStore) {
+     this.metaStore = metaStore;
+   }
+ 
+   @BeforeClass
+   public static void startMetaStores() {
+     Map<MetastoreConf.ConfVars, String> msConf = new HashMap<MetastoreConf.ConfVars, String>();
+     // Enable trash, so it can be tested
+     Map<String, String> extraConf = new HashMap<>();
+     extraConf.put("fs.trash.checkpoint.interval", "30");  // FS_TRASH_CHECKPOINT_INTERVAL_KEY
+     extraConf.put("fs.trash.interval", "30");             // FS_TRASH_INTERVAL_KEY (hadoop-2)
+     startMetaStores(msConf, extraConf);
+   }
+ 
+   @Before
+   public void setUp() throws Exception {
+     // Get new client
+     client = metaStore.getClient();
+ 
+     // Clean up the database
+     client.dropDatabase(OTHER_DATABASE, true, true, true);
+     // Drop every table in the default database
+     for(String tableName : client.getAllTables(DEFAULT_DATABASE)) {
+       client.dropTable(DEFAULT_DATABASE, tableName, true, true, true);
+     }
+ 
+     // Clean up trash
+     metaStore.cleanWarehouseDirs();
+ 
+     testTables[0] =
+         new TableBuilder()
+             .setTableName("test_table")
+             .addCol("test_col", "int")
+             .create(client, metaStore.getConf());
+ 
+     testTables[1] =
+         new TableBuilder()
+             .setTableName("test_view")
+             .addCol("test_col", "int")
+             .setType("VIRTUAL_VIEW")
+             .create(client, metaStore.getConf());
+ 
+     testTables[2] =
+         new TableBuilder()
+             .setTableName("test_table_to_find_1")
+             .addCol("test_col", "int")
+             .create(client, metaStore.getConf());
+ 
+     testTables[3] =
+         new TableBuilder()
+             .setTableName("test_partitioned_table")
+             .addCol("test_col1", "int")
+             .addCol("test_col2", "int")
+             .addPartCol("test_part_col", "int")
+             .create(client, metaStore.getConf());
+ 
+     testTables[4] =
+         new TableBuilder()
+             .setTableName("external_table_for_test")
+             .addCol("test_col", "int")
+             .setLocation(metaStore.getWarehouseRoot() + "/external/table_dir")
+             .addTableParam("EXTERNAL", "TRUE")
+             .setType("EXTERNAL_TABLE")
+             .create(client, metaStore.getConf());
+ 
+ 
+     new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf());
+ 
+     testTables[5] =
+         new TableBuilder()
+             .setDbName(OTHER_DATABASE)
+             .setTableName("test_table")
+             .addCol("test_col", "int")
+             .create(client, metaStore.getConf());
+ 
+     // Create partitions for the partitioned table
+     for(int i=0; i < 2; i++) {
+       new PartitionBuilder()
+               .inTable(testTables[3])
+               .addValue("a" + i)
+               .addToTable(client, metaStore.getConf());
+     }
+     // Add an external partition too
+     new PartitionBuilder()
+         .inTable(testTables[3])
+         .addValue("a2")
+         .setLocation(metaStore.getWarehouseRoot() + "/external/a2")
+         .addToTable(client, metaStore.getConf());
+ 
+     // Add data files to the partitioned table
+     List<Partition> partitions =
+         client.listPartitions(testTables[3].getDbName(), testTables[3].getTableName(), (short)-1);
+     for(Partition partition : partitions) {
+       Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile");
+       metaStore.createFile(dataFile, "100");
+     }
+ 
+     // Reload tables from the MetaStore, and create data files
+     for(int i=0; i < testTables.length; i++) {
+       testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName());
+       if (testTables[i].getPartitionKeys().isEmpty()) {
+         if (testTables[i].getSd().getLocation() != null) {
+           Path dataFile = new Path(testTables[i].getSd().getLocation() + "/dataFile");
+           metaStore.createFile(dataFile, "100");
+         }
+       }
+     }
+     partitionedTable = testTables[3];
+     externalTable = testTables[4];
+   }
+ 
+   @After
+   public void tearDown() throws Exception {
+     try {
+       if (client != null) {
+         try {
+           client.close();
+         } catch (Exception e) {
+           // HIVE-19729: Shallow the exceptions based on the discussion in the Jira
+         }
+       }
+     } finally {
+       client = null;
+     }
+   }
+ 
+   /**
+    * This test creates and queries a table and then drops it. Good for testing the happy path
+    */
+   @Test
+   public void testCreateGetDeleteTable() throws Exception {
+     // Try to create a table with all of the parameters set
+     Table table = getTableWithAllParametersSet();
+     client.createTable(table);
+     Table createdTable = client.getTable(table.getDbName(), table.getTableName());
+     // The createTime will be set on the server side, so the comparison should skip it
+     table.setCreateTime(createdTable.getCreateTime());
+     // The extra parameters will be added on server side, so check that the required ones are
+     // present
+     for(String key: table.getParameters().keySet()) {
+       Assert.assertEquals("parameters are the same",
+           table.getParameters().get(key), createdTable.getParameters().get(key));
+     }
+     // Reset the parameters, so we can compare
+     table.setParameters(createdTable.getParameters());
+     table.setCreationMetadata(createdTable.getCreationMetadata());
++    table.setWriteId(createdTable.getWriteId());
+     Assert.assertEquals("create/get table data", table, createdTable);
+ 
+     // Check that the directory is created
+     Assert.assertTrue("The directory should not be created",
+         metaStore.isPathExists(new Path(createdTable.getSd().getLocation())));
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, false);
+     try {
+       client.getTable(table.getDbName(), table.getTableName());
+       Assert.fail("Expected a NoSuchObjectException to be thrown");
+     } catch (NoSuchObjectException exception) {
+       // Expected exception
+     }
+   }
+ 
+   @Test
+   public void testCreateTableDefaultValues() throws Exception {
+     Table table = new Table();
+     StorageDescriptor sd = new StorageDescriptor();
+     List<FieldSchema> cols = new ArrayList<>();
+ 
+     table.setDbName(DEFAULT_DATABASE);
+     table.setTableName("test_table_2");
+     cols.add(new FieldSchema("column_name", "int", null));
+     sd.setCols(cols);
+     sd.setSerdeInfo(new SerDeInfo());
+     table.setSd(sd);
+ 
+     client.createTable(table);
+     Table createdTable = client.getTable(table.getDbName(), table.getTableName());
+ 
+     Assert.assertEquals("Comparing OwnerType", PrincipalType.USER, createdTable.getOwnerType());
+     Assert.assertNull("Comparing OwnerName", createdTable.getOwner());
+     Assert.assertNotEquals("Comparing CreateTime", 0, createdTable.getCreateTime());
+     Assert.assertEquals("Comparing LastAccessTime", 0, createdTable.getLastAccessTime());
+     Assert.assertEquals("Comparing Retention", 0, createdTable.getRetention());
+     Assert.assertEquals("Comparing PartitionKeys", 0, createdTable.getPartitionKeys().size());
+     // TODO: If this test method is the first to run, then the parameters does not contain totalSize
+     // and numFiles, if this runs after other tests (setUp/dropDatabase is successful), then the
+     // totalSize and the numFiles are set.
+     Assert.assertEquals("Comparing Parameters length", 1, createdTable.getParameters().size());
+     Assert.assertNotEquals("Comparing Parameters(transient_lastDdlTime)", "0",
+         createdTable.getParameters().get("transient_lastDdlTime"));
+ //    Assert.assertEquals("Comparing Parameters(totalSize)", "0",
+ //        createdTable.getParameters().get("totalSize"));
+ //    Assert.assertEquals("Comparing Parameters(numFiles)", "0",
+ //        createdTable.getParameters().get("numFiles"));
+     Assert.assertNull("Comparing ViewOriginalText", createdTable.getViewOriginalText());
+     Assert.assertNull("Comparing ViewExpandedText", createdTable.getViewExpandedText());
+     Assert.assertEquals("Comparing TableType", "MANAGED_TABLE", createdTable.getTableType());
+     Assert.assertTrue("Creation metadata should be empty", createdTable.getCreationMetadata() == null);
+ 
+     // Storage Descriptor data
+     StorageDescriptor createdSd = createdTable.getSd();
+     Assert.assertEquals("Storage descriptor cols", 1, createdSd.getCols().size());
+     Assert.assertNull("Storage descriptor cols[0].comment",
+         createdSd.getCols().get(0).getComment());
+     Assert.assertEquals("Storage descriptor location", metaStore.getWarehouseRoot()
+         + "/" + table.getTableName(), createdSd.getLocation());
+     Assert.assertTrue("Table path should be created",
+         metaStore.isPathExists(new Path(createdSd.getLocation())));
+     // TODO: Embedded MetaStore changes the table object when client.createTable is called
+     //Assert.assertNull("Original table storage descriptor location should be null",
+     //    table.getSd().getLocation());
+ 
+     Assert.assertNull("Storage descriptor input format", createdSd.getInputFormat());
+     Assert.assertNull("Storage descriptor output format", createdSd.getOutputFormat());
+     Assert.assertFalse("Storage descriptor compressed", createdSd.isCompressed());
+     Assert.assertEquals("Storage descriptor num buckets", 0, createdSd.getNumBuckets());
+     Assert.assertEquals("Storage descriptor bucket cols", 0, createdSd.getBucketCols().size());
+     Assert.assertEquals("Storage descriptor sort cols", 0, createdSd.getSortCols().size());
+     Assert.assertEquals("Storage descriptor parameters", 0, createdSd.getParameters().size());
+     Assert.assertFalse("Storage descriptor stored as subdir", createdSd.isStoredAsSubDirectories());
+ 
+     // Serde info
+     SerDeInfo serDeInfo = createdSd.getSerdeInfo();
+     Assert.assertNull("SerDeInfo name", serDeInfo.getName());
+     Assert.assertNull("SerDeInfo serialization lib", serDeInfo.getSerializationLib());
+     Assert.assertEquals("SerDeInfo parameters", 0, serDeInfo.getParameters().size());
+ 
+     // Skewed info
+     SkewedInfo skewedInfo = createdSd.getSkewedInfo();
+     Assert.assertEquals("Skewed info col names", 0, skewedInfo.getSkewedColNames().size());
+     Assert.assertEquals("Skewed info col values", 0, skewedInfo.getSkewedColValues().size());
+     Assert.assertEquals("Skewed info col value maps", 0,
+         skewedInfo.getSkewedColValueLocationMaps().size());
+   }
+ 
+   @Test
+   public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception {
+     Table table = new Table();
+     StorageDescriptor sd = new StorageDescriptor();
+     List<FieldSchema> cols = new ArrayList<>();
+ 
+     table.setDbName(OTHER_DATABASE);
+     table.setTableName("test_table_2");
+     cols.add(new FieldSchema("column_name", "int", null));
+     sd.setCols(cols);
+     sd.setSerdeInfo(new SerDeInfo());
+     table.setSd(sd);
+ 
+     client.createTable(table);
+     Table createdTable = client.getTable(table.getDbName(), table.getTableName());
+     Assert.assertEquals("Storage descriptor location", metaStore.getWarehouseRoot()
+         + "/" + table.getDbName() + ".db/" + table.getTableName(),
+         createdTable.getSd().getLocation());
+   }
+ 
+   @Test
+   public void testCreateTableDefaultValuesView() throws Exception {
+     Table table = new Table();
+     StorageDescriptor sd = new StorageDescriptor();
+     List<FieldSchema> cols = new ArrayList<>();
+ 
+     table.setDbName(DEFAULT_DATABASE);
+     table.setTableName("test_table_2");
+     table.setTableType("VIRTUAL_VIEW");
+     cols.add(new FieldSchema("column_name", "int", null));
+     sd.setCols(cols);
+     sd.setSerdeInfo(new SerDeInfo());
+     table.setSd(sd);
+ 
+     client.createTable(table);
+     Table createdTable = client.getTable(table.getDbName(), table.getTableName());
+ 
+     // No location should be created for views
+     Assert.assertNull("Storage descriptor location should be null",
+         createdTable.getSd().getLocation());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableNullDatabase() throws Exception {
+     Table table = testTables[0];
+     table.setDbName(null);
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableNullTableName() throws Exception {
+     Table table = testTables[0];
+     table.setTableName(null);
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testCreateTableInvalidTableName() throws Exception {
+     Table table = testTables[0];
+     table.setTableName("test_table;");
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testCreateTableEmptyName() throws Exception {
+     Table table = testTables[0];
+     table.setTableName("");
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableNullStorageDescriptor() throws Exception {
+     Table table = testTables[0];
+     table.setSd(null);
+ 
+     client.createTable(table);
+   }
+ 
+   private Table getNewTable() throws MetaException {
+     return new TableBuilder()
+                .setTableName("test_table_with_invalid_sd")
+                .addCol("test_col", "int")
+                .build(metaStore.getConf());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableInvalidStorageDescriptorNullColumns() throws Exception {
+     Table table = getNewTable();
+     table.getSd().setCols(null);
+     client.createTable(table);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableInvalidStorageDescriptorNullSerdeInfo() throws Exception {
+     Table table = getNewTable();
+     table.getSd().setSerdeInfo(null);
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableInvalidStorageDescriptorNullColumnType() throws Exception {
+     Table table = getNewTable();
+     table.getSd().getCols().get(0).setType(null);
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testCreateTableInvalidStorageDescriptorInvalidColumnType() throws Exception {
+     Table table = getNewTable();
+     table.getSd().getCols().get(0).setType("xyz");
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testCreateTableNoSuchDatabase() throws Exception {
+     Table table = testTables[0];
+     table.setDbName("no_such_database");
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = AlreadyExistsException.class)
+   public void testCreateTableAlreadyExists() throws Exception {
+     Table table = testTables[0];
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void testDropTableNoSuchDatabase() throws Exception {
+     Table table = testTables[2];
+ 
+     client.dropTable("no_such_database", table.getTableName(), true, false);
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void testDropTableNoSuchTable() throws Exception {
+     Table table = testTables[2];
+ 
+     client.dropTable(table.getDbName(), "no_such_table", true, false);
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void testDropTableNoSuchTableInTheDatabase() throws Exception {
+     Table table = testTables[2];
+ 
+     client.dropTable(OTHER_DATABASE, table.getTableName(), true, false);
+   }
+ 
+   @Test
+   public void testDropTableNullDatabase() throws Exception {
+     // Missing database in the query
+     try {
+       client.dropTable(null, OTHER_DATABASE, true, false);
+       // TODO: Should be checked on server side. On Embedded metastore it throws MetaException,
+       // on Remote metastore it throws TProtocolException
+       Assert.fail("Expected an MetaException or TProtocolException to be thrown");
+     } catch (MetaException exception) {
+       // Expected exception - Embedded MetaStore
+     } catch (TProtocolException exception) {
+       // Expected exception - Remote MetaStore
+     }
+   }
+ 
+   @Test
+   public void testDropTableNullTableName() throws Exception {
+     try {
+       client.dropTable(DEFAULT_DATABASE, null, true, false);
+       // TODO: Should be checked on server side. On Embedded metastore it throws MetaException,
+       // on Remote metastore it throws TProtocolException
+       Assert.fail("Expected an MetaException or TProtocolException to be thrown");
+     } catch (MetaException exception) {
+       // Expected exception - Embedded MetaStore
+     } catch (TProtocolException exception) {
+       // Expected exception - Remote MetaStore
+     }
+   }
+ 
+   @Test
+   public void testDropTableCaseInsensitive() throws Exception {
+     Table table = testTables[0];
+ 
+     // Test in upper case
+     client.dropTable(table.getDbName().toUpperCase(), table.getTableName().toUpperCase());
+     try {
+       client.getTable(table.getDbName(), table.getTableName());
+       Assert.fail("Expected a NoSuchObjectException to be thrown");
+     } catch (NoSuchObjectException exception) {
+       // Expected exception
+     }
+ 
+     // Test in mixed case
+     client.createTable(table);
+     client.dropTable("DeFaUlt", "TeST_tAbLE");
+     try {
+       client.getTable(table.getDbName(), table.getTableName());
+       Assert.fail("Expected a NoSuchObjectException to be thrown");
+     } catch (NoSuchObjectException exception) {
+       // Expected exception
+     }
+   }
+ 
+   @Test
+   public void testDropTableDeleteDir() throws Exception {
+     Table table = testTables[0];
+     Partition externalPartition = client.getPartition(partitionedTable.getDbName(),
+         partitionedTable.getTableName(), "test_part_col=a2");
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, false);
+ 
+     Assert.assertFalse("Table path should be removed",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+ 
+     client.createTable(table);
+     client.dropTable(table.getDbName(), table.getTableName(), false, false);
+ 
+     Assert.assertTrue("Table path should be kept",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+ 
+     // Drop table with partitions
+     client.dropTable(partitionedTable.getDbName(), partitionedTable.getTableName(), true, false);
+ 
+     Assert.assertFalse("Table path should be removed",
+         metaStore.isPathExists(new Path(partitionedTable.getSd().getLocation())));
+ 
+     Assert.assertFalse("Extra partition path should be removed",
+         metaStore.isPathExists(new Path(externalPartition.getSd().getLocation())));
+   }
+ 
+   @Test
+   public void testDropTableIgnoreUnknown() throws Exception {
+     Table table = testTables[0];
+ 
+     // Check what happens, when we ignore these errors
+     client.dropTable("no_such_database", table.getTableName(), true, true);
+     client.dropTable(table.getDbName(), "no_such_table", false, true);
+     client.dropTable(OTHER_DATABASE, table.getTableName(), true, true);
+ 
+     // TODO: Strangely the default parametrization is to ignore missing tables
+     client.dropTable("no_such_database", table.getTableName());
+     client.dropTable(table.getDbName(), "no_such_table");
+     client.dropTable(OTHER_DATABASE, table.getTableName());
+   }
+ 
+   @Test
+   public void testDropTableWithPurge() throws Exception {
+     Table table = testTables[0];
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, true, true);
+ 
+     Assert.assertFalse("Table path should be removed",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+     Assert.assertFalse("Table path should not be in trash",
+         metaStore.isPathExistsInTrash(new Path(table.getSd().getLocation())));
+   }
+ 
+   @Test
+   public void testDropTableWithoutPurge() throws Exception {
+     Table table = testTables[0];
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, true, false);
+ 
+     Assert.assertFalse("Table path should be removed",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+     Assert.assertTrue("Table path should be in trash",
+         metaStore.isPathExistsInTrash(new Path(table.getSd().getLocation())));
+   }
+ 
+   @Test
+   public void testDropTableExternalWithPurge() throws Exception {
+     Table table = externalTable;
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, true, true);
+ 
+     Assert.assertTrue("Table path should not be removed",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+     Assert.assertFalse("Table path should not be in trash",
+         metaStore.isPathExistsInTrash(new Path(table.getSd().getLocation())));
+   }
+ 
+   @Test
+   public void testDropTableExternalWithoutPurge() throws Exception {
+     Table table = externalTable;
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, true, false);
+ 
+     Assert.assertTrue("Table path should not be removed",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+     Assert.assertFalse("Table path should be in trash",
+         metaStore.isPathExistsInTrash(new Path(table.getSd().getLocation())));
+   }
+ 
+   @Test
+   public void testTruncateTableUnpartitioned() throws Exception {
+     // Unpartitioned table
+     Path dataFile = new Path(testTables[0].getSd().getLocation() + "/dataFile");
+     client.truncateTable(testTables[0].getDbName(), testTables[0].getTableName(), null);
+     Assert.assertTrue("Location should exist",
+         metaStore.isPathExists(new Path(testTables[0].getSd().getLocation())));
+     Assert.assertFalse("DataFile should be removed", metaStore.isPathExists(dataFile));
+ 
+   }
+ 
+   @Test
+   public void testTruncateTablePartitioned() throws Exception {
+     // Partitioned table - delete specific partitions a0, a2
+     List<String> partitionsToDelete = new ArrayList<>();
+     partitionsToDelete.add("test_part_col=a0");
+     partitionsToDelete.add("test_part_col=a2");
+     client.truncateTable(partitionedTable.getDbName(), partitionedTable.getTableName(),
+         partitionsToDelete);
+     Assert.assertTrue("Location should exist",
+         metaStore.isPathExists(new Path(testTables[0].getSd().getLocation())));
+     List<Partition> partitions =
+         client.listPartitions(partitionedTable.getDbName(), partitionedTable.getTableName(),
+             (short)-1);
+     for(Partition partition : partitions) {
+       Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile");
+       if (partition.getValues().contains("a0") || partition.getValues().contains("a2")) {
+         // a0, a2 should be empty
+         Assert.assertFalse("DataFile should be removed", metaStore.isPathExists(dataFile));
+       } else {
+         // Others (a1) should be kept
+         Assert.assertTrue("DataFile should not be removed", metaStore.isPathExists(dataFile));
+       }
+     }
+ 
+   }
+ 
+   @Test
+   public void testTruncateTablePartitionedDeleteAll() throws Exception {
+     // Partitioned table - delete all
+     client.truncateTable(partitionedTable.getDbName(), partitionedTable.getTableName(), null);
+     Assert.assertTrue("Location should exist",
+         metaStore.isPathExists(new Path(testTables[0].getSd().getLocation())));
+     List<Partition> partitions =
+         client.listPartitions(partitionedTable.getDbName(), partitionedTable.getTableName(),
+             (short)-1);
+     for(Partition partition : partitions) {
+       Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile");
+       Assert.assertFalse("Every dataFile should be removed", metaStore.isPathExists(dataFile));
+     }
+   }
+ 
+   @Test
+   public void testAlterTable() throws Exception {
+     Table originalTable = testTables[2];
+     String originalTableName = originalTable.getTableName();
+     String originalDatabase = originalTable.getDbName();
+ 
+     Table newTable = getTableWithAllParametersSet();
+     newTable.setTableName(originalTableName);
+     newTable.setDbName(originalDatabase);
+     // Partition keys can not be set, but getTableWithAllParametersSet is added one, so remove for
+     // this test
+     newTable.setPartitionKeys(originalTable.getPartitionKeys());
+     client.alter_table(originalDatabase, originalTableName, newTable);
+     Table alteredTable = client.getTable(originalDatabase, originalTableName);
+ 
+     // The extra parameters will be added on server side, so check that the required ones are
+     // present
+     for(String key: newTable.getParameters().keySet()) {
+       Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
+           alteredTable.getParameters().get(key));
+     }
+     // The parameters are checked manually, so do not check them
+     newTable.setParameters(alteredTable.getParameters());
+ 
+     // Some of the data is set on the server side, so reset those
+     newTable.setCreateTime(alteredTable.getCreateTime());
+     newTable.setCreationMetadata(alteredTable.getCreationMetadata());
++    newTable.setWriteId(alteredTable.getWriteId());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @Test
+   public void testAlterTableRename() throws Exception {
+     Table originalTable = testTables[2];
+     String originalTableName = originalTable.getTableName();
+     String originalDatabase = originalTable.getDbName();
+ 
+     Table newTable = originalTable.deepCopy();
+     // Do not change the location, so it is tested that the location will be changed even if the
+     // location is not set to null, just remain the same
+     newTable.setTableName("new_table");
+     client.alter_table(originalDatabase, originalTableName, newTable);
+     List<String> tableNames = client.getTables(originalDatabase, originalTableName);
+     Assert.assertEquals("Original table should be removed", 0, tableNames.size());
+     Assert.assertFalse("Original table directory should be removed",
+         metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertTrue("New table directory should exist",
+         metaStore.isPathExists(new Path(alteredTable.getSd().getLocation())));
+     Assert.assertEquals("New directory should be set", new Path(metaStore.getWarehouseRoot()
+         + "/" + alteredTable.getTableName()), new Path(alteredTable.getSd().getLocation()));
+ 
+     Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
+     Assert.assertTrue("New directory should contain data", metaStore.isPathExists(dataFile));
+ 
+     // The following data should be changed
+     newTable.getSd().setLocation(alteredTable.getSd().getLocation());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @Test
+   public void testAlterTableChangingDatabase() throws Exception {
+     Table originalTable = testTables[2];
+     String originalTableName = originalTable.getTableName();
+     String originalDatabase = originalTable.getDbName();
+ 
+     Table newTable = originalTable.deepCopy();
+     newTable.setDbName(OTHER_DATABASE);
+     client.alter_table(originalDatabase, originalTableName, newTable);
+     List<String> tableNames = client.getTables(originalDatabase, originalTableName);
+     Assert.assertEquals("Original table should be removed", 0, tableNames.size());
+     Assert.assertFalse("Original table directory should be removed",
+         metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertTrue("New table directory should exist",
+         metaStore.isPathExists(new Path(alteredTable.getSd().getLocation())));
+     Assert.assertEquals("New directory should be set", new Path(metaStore.getWarehouseRoot()
+         + "/" + alteredTable.getDbName() + ".db/" + alteredTable.getTableName()),
+         new Path(alteredTable.getSd().getLocation()));
+     Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
+     Assert.assertTrue("New directory should contain data", metaStore.isPathExists(dataFile));
+ 
+     // The following data should be changed, other data should be the same
+     newTable.getSd().setLocation(alteredTable.getSd().getLocation());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @Test
+   public void testAlterTableExternalTable() throws Exception {
+     Table originalTable = externalTable;
+     String originalTableName = originalTable.getTableName();
+     String originalDatabase = originalTable.getDbName();
+ 
+     Table newTable = originalTable.deepCopy();
+     newTable.setTableName("new_external_table_for_test");
+     client.alter_table(originalDatabase, originalTableName, newTable);
+     List<String> tableNames = client.getTables(originalDatabase, originalTableName);
+     Assert.assertEquals("Original table should be removed", 0, tableNames.size());
+     Assert.assertTrue("Original table directory should be kept",
+         metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertEquals("New location should be the same", originalTable.getSd().getLocation(),
+         alteredTable.getSd().getLocation());
+     Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
+     Assert.assertTrue("The location should contain data", metaStore.isPathExists(dataFile));
+ 
+     // The extra parameters will be added on server side, so check that the required ones are
+     // present
+     for(String key: newTable.getParameters().keySet()) {
+       Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
+           alteredTable.getParameters().get(key));
+     }
+     // The parameters are checked manually, so do not check them
+     newTable.setParameters(alteredTable.getParameters());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @Test
+   public void testAlterTableExternalTableChangeLocation() throws Exception {
+     Table originalTable = externalTable;
+ 
+     // Change the location, and see the results
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().setLocation(newTable.getSd().getLocation() + "_modified");
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertTrue("Original table directory should be kept",
+         metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
+     Assert.assertEquals("New location should be the new one", newTable.getSd().getLocation(),
+         alteredTable.getSd().getLocation());
+     Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
+     Assert.assertFalse("The location should not contain data", metaStore.isPathExists(dataFile));
+ 
+     // The extra parameters will be added on server side, so check that the required ones are
+     // present
+     for(String key: newTable.getParameters().keySet()) {
+       Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
+           alteredTable.getParameters().get(key));
+     }
+     // The parameters are checked manually, so do not check them
+     newTable.setParameters(alteredTable.getParameters());
+ 
+     // The following data should be changed, other data should be the same
+     newTable.getSd().setLocation(alteredTable.getSd().getLocation());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @Test
+   public void testAlterTableChangeCols() throws Exception {
+     Table originalTable = partitionedTable;
+ 
+     Table newTable = originalTable.deepCopy();
+ 
+     List<FieldSchema> cols = newTable.getSd().getCols();
+     // Change a column
+     cols.get(0).setName("modified_col");
+     // Remove a column
+     cols.remove(1);
+     // Add a new column
+     cols.add(new FieldSchema("new_col", "int", null));
+     // Store the changes
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertTrue("Original table directory should be kept",
+         metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
+ 
+     // The following data might be changed
+     alteredTable.setParameters(newTable.getParameters());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+ 
+     // Modify partition column type, and comment
+     newTable.getPartitionKeys().get(0).setType("string");
+     newTable.getPartitionKeys().get(0).setComment("changed comment");
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+     alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     // The following data might be changed
+     alteredTable.setParameters(newTable.getParameters());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @SuppressWarnings("deprecation")
+   @Test
+   public void testAlterTableCascade() throws Exception {
+     Table originalTable = partitionedTable;
+ 
+     Table newTable = originalTable.deepCopy();
+     List<FieldSchema> cols = newTable.getSd().getCols();
+     cols.add(new FieldSchema("new_col_1", "int", null));
+ 
+     // Run without cascade
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, false);
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertEquals("The table data should be changed", newTable, alteredTable);
+ 
+     List<Partition> partitions =
+         client.listPartitions(originalTable.getDbName(), originalTable.getTableName(), (short)-1);
+     for(Partition partition : partitions) {
+       Assert.assertEquals("Partition columns should not be changed", 2,
+           partition.getSd().getCols().size());
+     }
+ 
+     // Run with cascade
+     cols.add(new FieldSchema("new_col_2", "int", null));
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, true);
+     alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertEquals("The table data should be changed", newTable, alteredTable);
+ 
+     partitions =
+         client.listPartitions(originalTable.getDbName(), originalTable.getTableName(), (short)-1);
+     for(Partition partition : partitions) {
+       Assert.assertEquals("Partition columns should be changed", 4,
+           partition.getSd().getCols().size());
+     }
+ 
+     // Run using environment context with cascade
+     cols.add(new FieldSchema("new_col_3", "int", null));
+     EnvironmentContext context = new EnvironmentContext();
+     context.putToProperties(StatsSetupConst.CASCADE, "true");
+     client.alter_table_with_environmentContext(originalTable.getDbName(),
+         originalTable.getTableName(), newTable, context);
+     alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertEquals("The table data should be changed", newTable, alteredTable);
+ 
+     partitions =
+         client.listPartitions(originalTable.getDbName(), originalTable.getTableName(), (short)-1);
+     for(Partition partition : partitions) {
+       Assert.assertEquals("Partition columns should be changed", 5,
+           partition.getSd().getCols().size());
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableNullDatabaseInNew() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.setDbName(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterTableNullTableNameInNew() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.setTableName(null);
+ 
 -    client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
++    try {
++      client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++      // Expected.
++    }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableInvalidTableNameInNew() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.setTableName("test_table;");
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableEmptyTableNameInNew() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.setTableName("");
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableNullStorageDescriptorInNew() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.setSd(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterTableNullDatabase() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
 -
 -    client.alter_table(null, originalTable.getTableName(), newTable);
++    try {
++      client.alter_table(null, originalTable.getTableName(), newTable);
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterTableNullTableName() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+ 
 -    client.alter_table(originalTable.getDbName(), null, newTable);
++    try {
++      client.alter_table(originalTable.getDbName(), null, newTable);
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++      // Expected.
++    }
+   }
+ 
+   @Test
+   public void testAlterTableNullNewTable() throws Exception {
+     Table originalTable = testTables[0];
+     try {
+       client.alter_table(originalTable.getDbName(), originalTable.getTableName(), null);
+       // TODO: Should be checked on server side. On Embedded metastore it throws
+       // NullPointerException, on Remote metastore it throws TTransportException
+       Assert.fail("Expected a NullPointerException or TTransportException to be thrown");
+     } catch (NullPointerException exception) {
+       // Expected exception - Embedded MetaStore
 -    } catch (TTransportException exception) {
++    } catch (TProtocolException exception) {
+       // Expected exception - Remote MetaStore
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableInvalidStorageDescriptorNullCols() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().setCols(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableInvalidStorageDescriptorNullSerdeInfo() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().setSerdeInfo(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableInvalidStorageDescriptorNullColumnType() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().getCols().get(0).setType(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableInvalidStorageDescriptorNullLocation() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().setLocation(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableInvalidStorageDescriptorInvalidColumnType() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().getCols().get(0).setType("xyz");
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableInvalidStorageDescriptorAddPartitionColumns() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.addToPartitionKeys(new FieldSchema("new_part", "int", "comment"));
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableInvalidStorageDescriptorAlterPartitionColumnName() throws Exception {
+     Table originalTable = partitionedTable;
+     Table newTable = originalTable.deepCopy();
+     newTable.getPartitionKeys().get(0).setName("altered_name");
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableInvalidStorageDescriptorRemovePartitionColumn() throws Exception {
+     Table originalTable = partitionedTable;
+     Table newTable = originalTable.deepCopy();
+     newTable.getPartitionKeys().remove(0);
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableNoSuchDatabase() throws Exception {
+     Table originalTable = testTables[2];
+     Table newTable = originalTable.deepCopy();
+ 
+     client.alter_table("no_such_database", originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableNoSuchTable() throws Exception {
+     Table originalTable = testTables[2];
+     Table newTable = originalTable.deepCopy();
+ 
+     client.alter_table(originalTable.getDbName(), "no_such_table_name", newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableNoSuchTableInThisDatabase() throws Exception {
+     Table originalTable = testTables[2];
+     Table newTable = originalTable.deepCopy();
+ 
+     client.alter_table(OTHER_DATABASE, originalTable.getTableName(), newTable);
+   }
+ 
+   @Test
+   public void testAlterTableAlreadyExists() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+ 
+     newTable.setTableName(testTables[2].getTableName());
+     try {
+       // Already existing table
+       client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+       // TODO: Maybe throw AlreadyExistsException.
+       Assert.fail("Expected an InvalidOperationException to be thrown");
+     } catch (InvalidOperationException exception) {
+       // Expected exception
+     }
+   }
+ 
+   @Test
+   public void tablesInOtherCatalogs() throws TException, URISyntaxException {
+     String catName = "create_etc_tables_in_other_catalogs";
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName))
+         .build();
+     client.createCatalog(cat);
+ 
+     String dbName = "db_in_other_catalog";
+     // For this one don't specify a location to make sure it gets put in the catalog directory
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setCatalogName(catName)
+         .create(client, metaStore.getConf());
+ 
+     String[] tableNames = new String[4];
+     for (int i = 0; i < tableNames.length; i++) {
+       tableNames[i] = "table_in_other_catalog_" + i;
+       TableBuilder builder = new TableBuilder()
+           .inDb(db)
+           .setTableName(tableNames[i])
+           .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME)
+           .addCol("col2_" + i, ColumnType.INT_TYPE_NAME);
+       // Make one have a non-standard location
+       if (i == 0) builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i]));
+       // Make one partitioned
+       if (i == 2) builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME);
+       // Make one a materialized view
+       if (i == 3) {
+         builder.setType(TableType.MATERIALIZED_VIEW.name())
+             .setRewriteEnabled(true)
+             .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]);
+       }
+       client.createTable(builder.build(metaStore.getConf()));
+     }
+ 
+     // Add partitions for the partitioned table
+     String[] partVals = new String[3];
+     Table partitionedTable = client.getTable(catName, dbName, tableNames[2]);
+     for (int i = 0; i < partVals.length; i++) {
+       partVals[i] = "part" + i;
+       new PartitionBuilder()
+           .inTable(partitionedTable)
+           .addValue(partVals[i])
+           .addToTable(client, metaStore.getConf());
+     }
+ 
+     // Get tables, make sure the locations are correct
+     for (int i = 0; i < tableNames.length; i++) {
+       Table t = client.getTable(catName, dbName, tableNames[i]);
+       Assert.assertEquals(catName, t.getCatName());
+       String expectedLocation = (i < 1) ?
+         new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString()
+         :
+         new File(cat.getLocationUri() + File.separatorChar + dbName + ".db",
+             tableNames[i]).toURI().toString();
+ 
+       Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/");
+       File dir = new File(new URI(t.getSd().getLocation()).getPath());
+       Assert.assertTrue(dir.exists() && dir.isDirectory());
+ 
+     }
+ 
+     // Make sure getting table in the wrong catalog does not work
+     try {
+       Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]);
+       Assert.fail();
+     } catch (NoSuchObjectException e) {
+       // NOP
+     }
+ 
+     // test getAllTables
+     Set<String> fetchedNames = new HashSet<>(client.getAllTables(catName, dbName));
+     Assert.assertEquals(tableNames.length, fetchedNames.size());
+     for (String tableName : tableNames) Assert.assertTrue(fetchedNames.contains(tableName));
+ 
+     fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME));
+     for (String tableName : tableNames) Assert.assertFalse(fetchedNames.contains(tableName));
+ 
+     // test getMaterializedViewsForRewriting
+     List<String> materializedViews = client.getMaterializedViewsForRewriting(catName, dbName);
+     Assert.assertEquals(1, materializedViews.size());
+     Assert.assertEquals(tableNames[3], materializedViews.get(0));
+ 
+     fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME));
+     Assert.assertFalse(fetchedNames.contains(tableNames[3]));
+ 
+     // test getTableObjectsByName
+     List<Table> fetchedTables = client.getTableObjectsByName(catName, dbName,
+         Arrays.asList(tableNames[0], tableNames[1]));
+     Assert.assertEquals(2, fetchedTables.size());
+     Collections.sort(fetchedTables);
+     Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName());
+     Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName());
+ 
+     fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME,
+         Arrays.asList(tableNames[0], tableNames[1]));
+     Assert.assertEquals(0, fetchedTables.size());
+ 
+     // Test altering the table
+     Table t = client.getTable(catName, dbName, tableNames[0]).deepCopy();
+     t.getParameters().put("test", "test");
+     client.alter_table(catName, dbName, tableNames[0], t);
+     t = client.getTable(catName, dbName, tableNames[0]).deepCopy();
+     Assert.assertEquals("test", t.getParameters().get("test"));
+ 
+     // Alter a table in the wrong catalog
+     try {
+       client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t);
+       Assert.fail();
+     } catch (InvalidOperationException e) {
+       // NOP
+     }
+ 
+     // Update the metadata for the materialized view
+     CreationMetadata cm = client.getTable(catName, dbName, tableNames[3]).getCreationMetadata();
+     cm.addToTablesUsed(dbName + "." + tableNames[1]);
+     cm.unsetMaterializationTime();
+     client.updateCreationMetadata(catName, dbName, tableNames[3], cm);
+ 
+     List<String> partNames = new ArrayList<>();
+     for (String partVal : partVals) partNames.add("pcol1=" + partVal);
+     // Truncate a table
+     client.truncateTable(catName, dbName, tableNames[0], partNames);
+ 
+     // Truncate a table in the wrong catalog
+     try {
+       client.truncateTable(DEFAULT_DATABASE_NAME, tableNames[0], partNames);
+       Assert.fail();
+     } catch (NoSuchObjectException|TApplicationException e) {
+       // NOP
+     }
+ 
+     // Drop a table from the wrong catalog
+     try {
+       client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], true, false);
+       Assert.fail();
+     } catch (NoSuchObjectException|TApplicationException e) {
+       // NOP
+     }
+ 
+     // Should ignore the failure
+     client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], false, true);
+ 
+     // Have to do this in reverse order so that we drop the materialized view first.
+     for (int i = tableNames.length - 1; i >= 0; i--) {
+       t = client.getTable(catName, dbName, tableNames[i]);
+       File tableDir = new File(new URI(t.getSd().getLocation()).getPath());
+       Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
+ 
+       if (tableNames[i].equalsIgnoreCase(tableNames[0])) {
+         client.dropTable(catName, dbName, tableNames[i], false, false);
+         Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
+       } else {
+         client.dropTable(catName, dbName, tableNames[i]);
+         Assert.assertFalse(tableDir.exists());
+       }
+     }
+     Assert.assertEquals(0, client.getAllTables(catName, dbName).size());
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void createTableInBogusCatalog() throws TException {
+     new TableBuilder()
+         .setCatName("nosuch")
+         .setTableName("doomed")
+         .addCol("col1", ColumnType.STRING_TYPE_NAME)
+         .addCol("col2", ColumnType.INT_TYPE_NAME)
+         .create(client, metaStore.getConf());
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void getTableInBogusCatalog() throws TException {
+     client.getTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName());
+   }
+ 
+   @Test
+   public void getAllTablesInBogusCatalog() throws TException {
+     List<String> names = client.getAllTables("nosuch", testTables[0].getDbName());
+     Assert.assertTrue(names.isEmpty());
+   }
+ 
+   @Test(expected = UnknownDBException.class)
+   public void getTableObjectsByNameBogusCatalog() throws TException {
+     client.getTableObjectsByName("nosuch", testTables[0].getDbName(),
+         Arrays.asList(testTables[0].getTableName(), testTables[1].getTableName()));
+   }
+ 
+   @Test
+   public void getMaterializedViewsInBogusCatalog() throws TException {
+     List<String> names = client.getMaterializedViewsForRewriting("nosuch", DEFAULT_DATABASE_NAME);
+     Assert.assertTrue(names.isEmpty());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void alterTableBogusCatalog() throws TException {
+     Table t = testTables[0].deepCopy();
+     t.getParameters().put("a", "b");
+     client.alter_table("nosuch", t.getDbName(), t.getTableName(), t);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void moveTablesBetweenCatalogsOnAlter() throws TException {
+     String catName = "move_table_between_catalogs_on_alter";
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName))
+         .build();
+     client.createCatalog(cat);
+ 
+     String dbName = "a_db";
+     // For this one don't specify a location to make sure it gets put in the catalog directory
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setCatalogName(catName)
+         .create(client, metaStore.getConf());
+ 
+     String tableName = "non_movable_table";
+     Table before = new TableBuilder()
+         .inDb(db)
+         .setTableName(tableName)
+         .addCol("col1", ColumnType.STRING_TYPE_NAME)
+         .addCol("col2", ColumnType.INT_TYPE_NAME)
+         .create(client, metaStore.getConf());
+     Table after = before.deepCopy();
+     after.setCatName(DEFAULT_CATALOG_NAME);
+     client.alter_table(catName, dbName, tableName, after);
+ 
+   }
+ 
+   @Test
+   public void truncateTableBogusCatalog() throws TException {
+     try {
+       List<String> partNames = client.listPartitionNames(partitionedTable.getDbName(),
+           partitionedTable.getTableName(), (short) -1);
+       client.truncateTable("nosuch", partitionedTable.getDbName(), partitionedTable.getTableName(),
+           partNames);
+       Assert.fail(); // For reasons I don't understand and am too lazy to debug at the moment the
+       // NoSuchObjectException gets swallowed by a TApplicationException in remote mode.
+     } catch (TApplicationException|NoSuchObjectException e) {
+       //NOP
+     }
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void dropTableBogusCatalog() throws TException {
+     client.dropTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName(), true, false);
+   }
+ 
+   /**
+    * Creates a Table with all of the parameters set. The temporary table is available only on HS2
+    * server, so do not use it.
+    * @return The Table object
+    */
+   private Table getTableWithAllParametersSet() throws MetaException {
+     return new TableBuilder()
+                .setDbName(DEFAULT_DATABASE)
+                .setTableName("test_table_with_all_parameters_set")
+                .setCreateTime(100)
+                .setOwnerType(PrincipalType.ROLE)
+                .setOwner("owner")
+                .setLastAccessTime(200)
+                .addPartCol("part_col", "int", "part col comment")
+                .addCol("test_col", "int", "test col comment")
+                .addCol("test_bucket_col", "int", "test bucket col comment")
+                .addCol("test_skewed_col", "int", "test skewed col comment")
+                .addCol("test_sort_col", "int", "test sort col comment")
+                .addBucketCol("test_bucket_col")
+                .addSkewedColName("test_skewed_col")
+                .addSortCol("test_sort_col", 1)
+                .setCompressed(true)
+                .setInputFormat("inputFormat")
+                .setInputFormat("outputFormat")
+                .setLocation(metaStore.getWarehouseRoot() + "/location")
+                .setNumBuckets(4)
+                .setRetention(30000)
+                .setRewriteEnabled(true)
+                .setType("VIEW")
+                .setViewExpandedText("viewExplainedText")
+                .setViewOriginalText("viewOriginalText")
+                .setSerdeLib("serdelib")
+                .setSerdeName("serdename")
+                .setStoredAsSubDirectories(true)
+                .addSerdeParam("serdeParam", "serdeParamValue")
+                .addTableParam("tableParam", "tableParamValue")
+                .addStorageDescriptorParam("sdParam", "sdParamValue")
+                .build(metaStore.getConf());
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/pom.xml
----------------------------------------------------------------------


[06/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query66.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query66.q.out b/ql/src/test/results/clientpositive/perf/tez/query66.q.out
index 5dec2f8..432dd7e 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query66.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query66.q.out
@@ -471,240 +471,244 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 9 vectorized
-      File Output Operator [FS_274]
-        Select Operator [SEL_273] (rows=100 width=135)
+      File Output Operator [FS_278]
+        Select Operator [SEL_277] (rows=100 width=135)
           Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"]
-          Limit [LIM_272] (rows=100 width=135)
+          Limit [LIM_276] (rows=100 width=135)
             Number of rows:100
-            Select Operator [SEL_271] (rows=158120068 width=135)
+            Select Operator [SEL_275] (rows=158120068 width=135)
               Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"]
             <-Reducer 8 [SIMPLE_EDGE] vectorized
-              SHUFFLE [RS_270]
-                Group By Operator [GBY_269] (rows=158120068 width=135)
+              SHUFFLE [RS_274]
+                Group By Operator [GBY_273] (rows=158120068 width=135)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)","sum(VALUE._col24)","sum(VALUE._col25)","sum(VALUE._col26)","sum(VALUE._col27)","sum(VALUE._col28)","sum(VALUE._col29)
 ","sum(VALUE._col30)","sum(VALUE._col31)","sum(VALUE._col32)","sum(VALUE._col33)","sum(VALUE._col34)","sum(VALUE._col35)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5
                 <-Union 7 [SIMPLE_EDGE]
                   <-Reducer 16 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_289]
+                    Reduce Output Operator [RS_294]
                       PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
-                      Group By Operator [GBY_288] (rows=316240137 width=135)
+                      Group By Operator [GBY_293] (rows=316240137 width=135)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)","sum(_col31)","sum(_col32)","sum(_col33)","sum(_col34)","sum(_col35)","sum(_col36)","sum(_col37)","sum(_col38)","sum(_col39)","sum(_col40)","sum(_col41)"],keys:_col0, _col1, _col2, _col3, _col4, _col5
-                        Select Operator [SEL_287] (rows=316240137 width=135)
-                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"]
-                          Group By Operator [GBY_286] (rows=210822976 width=135)
-                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5
-                          <-Reducer 15 [SIMPLE_EDGE]
-                            SHUFFLE [RS_63]
-                              PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
-                              Group By Operator [GBY_62] (rows=421645953 width=135)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)"],keys:_col0, _col1, _col2, _col3, _col4, _col5
-                                Select Operator [SEL_60] (rows=421645953 width=135)
-                                  Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"]
-                                  Merge Join Operator [MERGEJOIN_203] (rows=421645953 width=135)
-                                    Conds:RS_57._col3=RS_254._col0(Inner),Output:["_col4","_col5","_col6","_col11","_col15","_col16","_col17","_col18","_col19","_col20"]
-                                  <-Map 24 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_254]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_251] (rows=27 width=1029)
-                                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                        Filter Operator [FIL_250] (rows=27 width=1029)
-                                          predicate:w_warehouse_sk is not null
-                                          TableScan [TS_12] (rows=27 width=1029)
-                                            default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name","w_warehouse_sq_ft","w_city","w_county","w_state","w_country"]
-                                  <-Reducer 14 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_57]
-                                      PartitionCols:_col3
-                                      Merge Join Operator [MERGEJOIN_202] (rows=383314495 width=135)
-                                        Conds:RS_54._col2=RS_242._col0(Inner),Output:["_col3","_col4","_col5","_col6","_col11"]
-                                      <-Map 21 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_242]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_239] (rows=1 width=0)
-                                            Output:["_col0"]
-                                            Filter Operator [FIL_238] (rows=1 width=0)
-                                              predicate:((sm_carrier) IN ('DIAMOND', 'AIRBORNE') and sm_ship_mode_sk is not null)
-                                              TableScan [TS_9] (rows=1 width=0)
-                                                default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_carrier"]
-                                      <-Reducer 13 [SIMPLE_EDGE]
-                                        SHUFFLE [RS_54]
-                                          PartitionCols:_col2
-                                          Merge Join Operator [MERGEJOIN_201] (rows=348467716 width=135)
-                                            Conds:RS_51._col0=RS_230._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6","_col11"]
-                                          <-Map 18 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_230]
-                                              PartitionCols:_col0
-                                              Select Operator [SEL_227] (rows=36524 width=1119)
-                                                Output:["_col0","_col2"]
-                                                Filter Operator [FIL_226] (rows=36524 width=1119)
-                                                  predicate:((d_year = 2002) and d_date_sk is not null)
-                                                  TableScan [TS_6] (rows=73049 width=1119)
-                                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                                          <-Reducer 12 [SIMPLE_EDGE]
-                                            SHUFFLE [RS_51]
-                                              PartitionCols:_col0
-                                              Merge Join Operator [MERGEJOIN_200] (rows=316788826 width=135)
-                                                Conds:RS_285._col1=RS_218._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6"]
-                                              <-Map 10 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_218]
-                                                  PartitionCols:_col0
-                                                  Select Operator [SEL_215] (rows=9600 width=471)
-                                                    Output:["_col0"]
-                                                    Filter Operator [FIL_214] (rows=9600 width=471)
-                                                      predicate:(t_time BETWEEN 49530 AND 78330 and t_time_sk is not null)
-                                                      TableScan [TS_3] (rows=86400 width=471)
-                                                        default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_time"]
-                                              <-Map 27 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_285]
-                                                  PartitionCols:_col1
-                                                  Select Operator [SEL_284] (rows=287989836 width=135)
-                                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                    Filter Operator [FIL_283] (rows=287989836 width=135)
-                                                      predicate:((cs_ship_mode_sk BETWEEN DynamicValue(RS_55_ship_mode_sm_ship_mode_sk_min) AND DynamicValue(RS_55_ship_mode_sm_ship_mode_sk_max) and in_bloom_filter(cs_ship_mode_sk, DynamicValue(RS_55_ship_mode_sm_ship_mode_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_52_date_dim_d_date_sk_min) AND DynamicValue(RS_52_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_52_date_dim_d_date_sk_bloom_filter))) and (cs_sold_time_sk BETWEEN DynamicValue(RS_49_time_dim_t_time_sk_min) AND DynamicValue(RS_49_time_dim_t_time_sk_max) and in_bloom_filter(cs_sold_time_sk, DynamicValue(RS_49_time_dim_t_time_sk_bloom_filter))) and (cs_warehouse_sk BETWEEN DynamicValue(RS_58_warehouse_w_warehouse_sk_min) AND DynamicValue(RS_58_warehouse_w_warehouse_sk_max) and in_bloom_filter(cs_warehouse_sk, DynamicValue(RS_58_warehouse_w_warehouse_sk_bloom_filter))) and cs_ship_mode_sk is not null and cs_sold_date_sk is no
 t null and cs_sold_time_sk is not null and cs_warehouse_sk is not null)
-                                                      TableScan [TS_33] (rows=287989836 width=135)
-                                                        default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_sold_time_sk","cs_ship_mode_sk","cs_warehouse_sk","cs_quantity","cs_ext_sales_price","cs_net_paid_inc_ship_tax"]
-                                                      <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_276]
-                                                          Group By Operator [GBY_275] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_223]
-                                                              Group By Operator [GBY_221] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_219] (rows=9600 width=471)
-                                                                  Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_215]
-                                                      <-Reducer 20 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_278]
-                                                          Group By Operator [GBY_277] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_235]
-                                                              Group By Operator [GBY_233] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_231] (rows=36524 width=1119)
-                                                                  Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_227]
-                                                      <-Reducer 23 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_280]
-                                                          Group By Operator [GBY_279] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_247]
-                                                              Group By Operator [GBY_245] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_243] (rows=1 width=0)
-                                                                  Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_239]
-                                                      <-Reducer 26 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_282]
-                                                          Group By Operator [GBY_281] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 24 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_259]
-                                                              Group By Operator [GBY_257] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_255] (rows=27 width=1029)
-                                                                  Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_251]
+                        Top N Key Operator [TNK_292] (rows=316240137 width=135)
+                          keys:_col0, _col1, _col2, _col3, _col4, _col5,sort order:++++++,top n:100
+                          Select Operator [SEL_291] (rows=316240137 width=135)
+                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"]
+                            Group By Operator [GBY_290] (rows=210822976 width=135)
+                              Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5
+                            <-Reducer 15 [SIMPLE_EDGE]
+                              SHUFFLE [RS_63]
+                                PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
+                                Group By Operator [GBY_62] (rows=421645953 width=135)
+                                  Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)"],keys:_col0, _col1, _col2, _col3, _col4, _col5
+                                  Select Operator [SEL_60] (rows=421645953 width=135)
+                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"]
+                                    Merge Join Operator [MERGEJOIN_204] (rows=421645953 width=135)
+                                      Conds:RS_57._col3=RS_257._col0(Inner),Output:["_col4","_col5","_col6","_col11","_col15","_col16","_col17","_col18","_col19","_col20"]
+                                    <-Map 24 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_257]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_254] (rows=27 width=1029)
+                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+                                          Filter Operator [FIL_253] (rows=27 width=1029)
+                                            predicate:w_warehouse_sk is not null
+                                            TableScan [TS_12] (rows=27 width=1029)
+                                              default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name","w_warehouse_sq_ft","w_city","w_county","w_state","w_country"]
+                                    <-Reducer 14 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_57]
+                                        PartitionCols:_col3
+                                        Merge Join Operator [MERGEJOIN_203] (rows=383314495 width=135)
+                                          Conds:RS_54._col2=RS_245._col0(Inner),Output:["_col3","_col4","_col5","_col6","_col11"]
+                                        <-Map 21 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_245]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_242] (rows=1 width=0)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_241] (rows=1 width=0)
+                                                predicate:((sm_carrier) IN ('DIAMOND', 'AIRBORNE') and sm_ship_mode_sk is not null)
+                                                TableScan [TS_9] (rows=1 width=0)
+                                                  default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_carrier"]
+                                        <-Reducer 13 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_54]
+                                            PartitionCols:_col2
+                                            Merge Join Operator [MERGEJOIN_202] (rows=348467716 width=135)
+                                              Conds:RS_51._col0=RS_233._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6","_col11"]
+                                            <-Map 18 [SIMPLE_EDGE] vectorized
+                                              SHUFFLE [RS_233]
+                                                PartitionCols:_col0
+                                                Select Operator [SEL_230] (rows=36524 width=1119)
+                                                  Output:["_col0","_col2"]
+                                                  Filter Operator [FIL_229] (rows=36524 width=1119)
+                                                    predicate:((d_year = 2002) and d_date_sk is not null)
+                                                    TableScan [TS_6] (rows=73049 width=1119)
+                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                            <-Reducer 12 [SIMPLE_EDGE]
+                                              SHUFFLE [RS_51]
+                                                PartitionCols:_col0
+                                                Merge Join Operator [MERGEJOIN_201] (rows=316788826 width=135)
+                                                  Conds:RS_289._col1=RS_221._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6"]
+                                                <-Map 10 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_221]
+                                                    PartitionCols:_col0
+                                                    Select Operator [SEL_218] (rows=9600 width=471)
+                                                      Output:["_col0"]
+                                                      Filter Operator [FIL_217] (rows=9600 width=471)
+                                                        predicate:(t_time BETWEEN 49530 AND 78330 and t_time_sk is not null)
+                                                        TableScan [TS_3] (rows=86400 width=471)
+                                                          default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_time"]
+                                                <-Map 27 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_289]
+                                                    PartitionCols:_col1
+                                                    Select Operator [SEL_288] (rows=287989836 width=135)
+                                                      Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+                                                      Filter Operator [FIL_287] (rows=287989836 width=135)
+                                                        predicate:((cs_ship_mode_sk BETWEEN DynamicValue(RS_55_ship_mode_sm_ship_mode_sk_min) AND DynamicValue(RS_55_ship_mode_sm_ship_mode_sk_max) and in_bloom_filter(cs_ship_mode_sk, DynamicValue(RS_55_ship_mode_sm_ship_mode_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_52_date_dim_d_date_sk_min) AND DynamicValue(RS_52_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_52_date_dim_d_date_sk_bloom_filter))) and (cs_sold_time_sk BETWEEN DynamicValue(RS_49_time_dim_t_time_sk_min) AND DynamicValue(RS_49_time_dim_t_time_sk_max) and in_bloom_filter(cs_sold_time_sk, DynamicValue(RS_49_time_dim_t_time_sk_bloom_filter))) and (cs_warehouse_sk BETWEEN DynamicValue(RS_58_warehouse_w_warehouse_sk_min) AND DynamicValue(RS_58_warehouse_w_warehouse_sk_max) and in_bloom_filter(cs_warehouse_sk, DynamicValue(RS_58_warehouse_w_warehouse_sk_bloom_filter))) and cs_ship_mode_sk is not null and cs_sold_date_sk is 
 not null and cs_sold_time_sk is not null and cs_warehouse_sk is not null)
+                                                        TableScan [TS_33] (rows=287989836 width=135)
+                                                          default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_sold_time_sk","cs_ship_mode_sk","cs_warehouse_sk","cs_quantity","cs_ext_sales_price","cs_net_paid_inc_ship_tax"]
+                                                        <-Reducer 17 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_280]
+                                                            Group By Operator [GBY_279] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_226]
+                                                                Group By Operator [GBY_224] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_222] (rows=9600 width=471)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_218]
+                                                        <-Reducer 20 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_282]
+                                                            Group By Operator [GBY_281] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_238]
+                                                                Group By Operator [GBY_236] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_234] (rows=36524 width=1119)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_230]
+                                                        <-Reducer 23 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_284]
+                                                            Group By Operator [GBY_283] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_250]
+                                                                Group By Operator [GBY_248] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_246] (rows=1 width=0)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_242]
+                                                        <-Reducer 26 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_286]
+                                                            Group By Operator [GBY_285] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 24 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_262]
+                                                                Group By Operator [GBY_260] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_258] (rows=27 width=1029)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_254]
                   <-Reducer 6 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_268]
+                    Reduce Output Operator [RS_272]
                       PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
-                      Group By Operator [GBY_267] (rows=316240137 width=135)
+                      Group By Operator [GBY_271] (rows=316240137 width=135)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)","sum(_col31)","sum(_col32)","sum(_col33)","sum(_col34)","sum(_col35)","sum(_col36)","sum(_col37)","sum(_col38)","sum(_col39)","sum(_col40)","sum(_col41)"],keys:_col0, _col1, _col2, _col3, _col4, _col5
-                        Select Operator [SEL_266] (rows=316240137 width=135)
-                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"]
-                          Group By Operator [GBY_265] (rows=105417161 width=135)
-                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5
-                          <-Reducer 5 [SIMPLE_EDGE]
-                            SHUFFLE [RS_30]
-                              PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
-                              Group By Operator [GBY_29] (rows=210834322 width=135)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)"],keys:_col0, _col1, _col2, _col3, _col4, _col5
-                                Select Operator [SEL_27] (rows=210834322 width=135)
-                                  Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"]
-                                  Merge Join Operator [MERGEJOIN_199] (rows=210834322 width=135)
-                                    Conds:RS_24._col3=RS_252._col0(Inner),Output:["_col4","_col5","_col6","_col11","_col15","_col16","_col17","_col18","_col19","_col20"]
-                                  <-Map 24 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_252]
-                                      PartitionCols:_col0
-                                       Please refer to the previous Select Operator [SEL_251]
-                                  <-Reducer 4 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_24]
-                                      PartitionCols:_col3
-                                      Merge Join Operator [MERGEJOIN_198] (rows=191667562 width=135)
-                                        Conds:RS_21._col2=RS_240._col0(Inner),Output:["_col3","_col4","_col5","_col6","_col11"]
-                                      <-Map 21 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_240]
-                                          PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_239]
-                                      <-Reducer 3 [SIMPLE_EDGE]
-                                        SHUFFLE [RS_21]
-                                          PartitionCols:_col2
-                                          Merge Join Operator [MERGEJOIN_197] (rows=174243235 width=135)
-                                            Conds:RS_18._col0=RS_228._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6","_col11"]
-                                          <-Map 18 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_228]
-                                              PartitionCols:_col0
-                                               Please refer to the previous Select Operator [SEL_227]
-                                          <-Reducer 2 [SIMPLE_EDGE]
-                                            SHUFFLE [RS_18]
-                                              PartitionCols:_col0
-                                              Merge Join Operator [MERGEJOIN_196] (rows=158402938 width=135)
-                                                Conds:RS_264._col1=RS_216._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6"]
-                                              <-Map 10 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_216]
-                                                  PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_215]
-                                              <-Map 1 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_264]
-                                                  PartitionCols:_col1
-                                                  Select Operator [SEL_263] (rows=144002668 width=135)
-                                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                    Filter Operator [FIL_262] (rows=144002668 width=135)
-                                                      predicate:((ws_ship_mode_sk BETWEEN DynamicValue(RS_22_ship_mode_sm_ship_mode_sk_min) AND DynamicValue(RS_22_ship_mode_sm_ship_mode_sk_max) and in_bloom_filter(ws_ship_mode_sk, DynamicValue(RS_22_ship_mode_sm_ship_mode_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and (ws_sold_time_sk BETWEEN DynamicValue(RS_16_time_dim_t_time_sk_min) AND DynamicValue(RS_16_time_dim_t_time_sk_max) and in_bloom_filter(ws_sold_time_sk, DynamicValue(RS_16_time_dim_t_time_sk_bloom_filter))) and (ws_warehouse_sk BETWEEN DynamicValue(RS_25_warehouse_w_warehouse_sk_min) AND DynamicValue(RS_25_warehouse_w_warehouse_sk_max) and in_bloom_filter(ws_warehouse_sk, DynamicValue(RS_25_warehouse_w_warehouse_sk_bloom_filter))) and ws_ship_mode_sk is not null and ws_sold_date_sk is no
 t null and ws_sold_time_sk is not null and ws_warehouse_sk is not null)
-                                                      TableScan [TS_0] (rows=144002668 width=135)
-                                                        default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_sold_time_sk","ws_ship_mode_sk","ws_warehouse_sk","ws_quantity","ws_sales_price","ws_net_paid_inc_tax"]
-                                                      <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_225]
-                                                          Group By Operator [GBY_224] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_222]
-                                                              Group By Operator [GBY_220] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_217] (rows=9600 width=471)
-                                                                  Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_215]
-                                                      <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_237]
-                                                          Group By Operator [GBY_236] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_234]
-                                                              Group By Operator [GBY_232] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_229] (rows=36524 width=1119)
-                                                                  Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_227]
-                                                      <-Reducer 22 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_249]
-                                                          Group By Operator [GBY_248] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_246]
-                                                              Group By Operator [GBY_244] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_241] (rows=1 width=0)
-                                                                  Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_239]
-                                                      <-Reducer 25 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_261]
-                                                          Group By Operator [GBY_260] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 24 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_258]
-                                                              Group By Operator [GBY_256] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_253] (rows=27 width=1029)
-                                                                  Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_251]
+                        Top N Key Operator [TNK_270] (rows=316240137 width=135)
+                          keys:_col0, _col1, _col2, _col3, _col4, _col5,sort order:++++++,top n:100
+                          Select Operator [SEL_269] (rows=316240137 width=135)
+                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"]
+                            Group By Operator [GBY_268] (rows=105417161 width=135)
+                              Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5
+                            <-Reducer 5 [SIMPLE_EDGE]
+                              SHUFFLE [RS_30]
+                                PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
+                                Group By Operator [GBY_29] (rows=210834322 width=135)
+                                  Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)"],keys:_col0, _col1, _col2, _col3, _col4, _col5
+                                  Select Operator [SEL_27] (rows=210834322 width=135)
+                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"]
+                                    Merge Join Operator [MERGEJOIN_200] (rows=210834322 width=135)
+                                      Conds:RS_24._col3=RS_255._col0(Inner),Output:["_col4","_col5","_col6","_col11","_col15","_col16","_col17","_col18","_col19","_col20"]
+                                    <-Map 24 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_255]
+                                        PartitionCols:_col0
+                                         Please refer to the previous Select Operator [SEL_254]
+                                    <-Reducer 4 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_24]
+                                        PartitionCols:_col3
+                                        Merge Join Operator [MERGEJOIN_199] (rows=191667562 width=135)
+                                          Conds:RS_21._col2=RS_243._col0(Inner),Output:["_col3","_col4","_col5","_col6","_col11"]
+                                        <-Map 21 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_243]
+                                            PartitionCols:_col0
+                                             Please refer to the previous Select Operator [SEL_242]
+                                        <-Reducer 3 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_21]
+                                            PartitionCols:_col2
+                                            Merge Join Operator [MERGEJOIN_198] (rows=174243235 width=135)
+                                              Conds:RS_18._col0=RS_231._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6","_col11"]
+                                            <-Map 18 [SIMPLE_EDGE] vectorized
+                                              SHUFFLE [RS_231]
+                                                PartitionCols:_col0
+                                                 Please refer to the previous Select Operator [SEL_230]
+                                            <-Reducer 2 [SIMPLE_EDGE]
+                                              SHUFFLE [RS_18]
+                                                PartitionCols:_col0
+                                                Merge Join Operator [MERGEJOIN_197] (rows=158402938 width=135)
+                                                  Conds:RS_267._col1=RS_219._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6"]
+                                                <-Map 10 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_219]
+                                                    PartitionCols:_col0
+                                                     Please refer to the previous Select Operator [SEL_218]
+                                                <-Map 1 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_267]
+                                                    PartitionCols:_col1
+                                                    Select Operator [SEL_266] (rows=144002668 width=135)
+                                                      Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+                                                      Filter Operator [FIL_265] (rows=144002668 width=135)
+                                                        predicate:((ws_ship_mode_sk BETWEEN DynamicValue(RS_22_ship_mode_sm_ship_mode_sk_min) AND DynamicValue(RS_22_ship_mode_sm_ship_mode_sk_max) and in_bloom_filter(ws_ship_mode_sk, DynamicValue(RS_22_ship_mode_sm_ship_mode_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and (ws_sold_time_sk BETWEEN DynamicValue(RS_16_time_dim_t_time_sk_min) AND DynamicValue(RS_16_time_dim_t_time_sk_max) and in_bloom_filter(ws_sold_time_sk, DynamicValue(RS_16_time_dim_t_time_sk_bloom_filter))) and (ws_warehouse_sk BETWEEN DynamicValue(RS_25_warehouse_w_warehouse_sk_min) AND DynamicValue(RS_25_warehouse_w_warehouse_sk_max) and in_bloom_filter(ws_warehouse_sk, DynamicValue(RS_25_warehouse_w_warehouse_sk_bloom_filter))) and ws_ship_mode_sk is not null and ws_sold_date_sk is 
 not null and ws_sold_time_sk is not null and ws_warehouse_sk is not null)
+                                                        TableScan [TS_0] (rows=144002668 width=135)
+                                                          default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_sold_time_sk","ws_ship_mode_sk","ws_warehouse_sk","ws_quantity","ws_sales_price","ws_net_paid_inc_tax"]
+                                                        <-Reducer 11 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_228]
+                                                            Group By Operator [GBY_227] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_225]
+                                                                Group By Operator [GBY_223] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_220] (rows=9600 width=471)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_218]
+                                                        <-Reducer 19 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_240]
+                                                            Group By Operator [GBY_239] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_237]
+                                                                Group By Operator [GBY_235] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_232] (rows=36524 width=1119)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_230]
+                                                        <-Reducer 22 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_252]
+                                                            Group By Operator [GBY_251] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_249]
+                                                                Group By Operator [GBY_247] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_244] (rows=1 width=0)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_242]
+                                                        <-Reducer 25 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_264]
+                                                            Group By Operator [GBY_263] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 24 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_261]
+                                                                Group By Operator [GBY_259] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_256] (rows=27 width=1029)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_254]
 


[43/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
index 0000000,4a97f89..267c9e8
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
@@@ -1,0 -1,155 +1,162 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.model;
+ 
+ import java.util.List;
+ import java.util.Map;
+ 
+ public class MPartition {
+ 
+   private String partitionName; // partitionname ==>  (key=value/)*(key=value)
+   private MTable table; 
+   private List<String> values;
+   private int createTime;
+   private int lastAccessTime;
+   private MStorageDescriptor sd;
+   private Map<String, String> parameters;
 -  
++  private long writeId;
+   
+   public MPartition() {}
+   
+   /**
+    * @param partitionName
+    * @param table
+    * @param values
+    * @param createTime
+    * @param lastAccessTime
+    * @param sd
+    * @param parameters
+    */
+   public MPartition(String partitionName, MTable table, List<String> values, int createTime,
+       int lastAccessTime, MStorageDescriptor sd, Map<String, String> parameters) {
+     this.partitionName = partitionName;
+     this.table = table;
+     this.values = values;
+     this.createTime = createTime;
+     this.lastAccessTime = lastAccessTime;
+     this.sd = sd;
+     this.parameters = parameters;
+   }
+ 
+   /**
+    * @return the lastAccessTime
+    */
+   public int getLastAccessTime() {
+     return lastAccessTime;
+   }
+ 
+   /**
+    * @param lastAccessTime the lastAccessTime to set
+    */
+   public void setLastAccessTime(int lastAccessTime) {
+     this.lastAccessTime = lastAccessTime;
+   }
+ 
+   /**
+    * @return the values
+    */
+   public List<String> getValues() {
+     return values;
+   }
+ 
+   /**
+    * @param values the values to set
+    */
+   public void setValues(List<String> values) {
+     this.values = values;
+   }
+ 
+   /**
+    * @return the table
+    */
+   public MTable getTable() {
+     return table;
+   }
+ 
+   /**
+    * @param table the table to set
+    */
+   public void setTable(MTable table) {
+     this.table = table;
+   }
+ 
+   /**
+    * @return the sd
+    */
+   public MStorageDescriptor getSd() {
+     return sd;
+   }
+ 
+   /**
+    * @param sd the sd to set
+    */
+   public void setSd(MStorageDescriptor sd) {
+     this.sd = sd;
+   }
+ 
+   /**
+    * @return the parameters
+    */
+   public Map<String, String> getParameters() {
+     return parameters;
+   }
+ 
+   /**
+    * @param parameters the parameters to set
+    */
+   public void setParameters(Map<String, String> parameters) {
+     this.parameters = parameters;
+   }
+ 
+   /**
+    * @return the partitionName
+    */
+   public String getPartitionName() {
+     return partitionName;
+   }
+ 
+   /**
+    * @param partitionName the partitionName to set
+    */
+   public void setPartitionName(String partitionName) {
+     this.partitionName = partitionName;
+   }
+ 
+   /**
+    * @return the createTime
+    */
+   public int getCreateTime() {
+     return createTime;
+   }
+ 
+   /**
+    * @param createTime the createTime to set
+    */
+   public void setCreateTime(int createTime) {
+     this.createTime = createTime;
+   }
+ 
++  public long getWriteId() {
++    return writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++  }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
index 0000000,38ad479..deeb971
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
@@@ -1,0 -1,273 +1,283 @@@
++
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.model;
+ 
+ import java.util.List;
+ import java.util.Map;
+ 
+ public class MTable {
+   
+   private String tableName;
+   private MDatabase database;
+   private MStorageDescriptor sd;
+   private String owner;
+   private String ownerType;
+   private int createTime;
+   private int lastAccessTime;
+   private int retention;
+   private List<MFieldSchema> partitionKeys;
+   private Map<String, String> parameters;
+   private String viewOriginalText;
+   private String viewExpandedText;
+   private boolean rewriteEnabled;
+   private String tableType;
++  private long writeId;
+ 
+   public MTable() {}
+ 
+   /**
+    * @param tableName
+    * @param database
+    * @param sd
+    * @param owner
+    * @param ownerType
+    * @param createTime
+    * @param lastAccessTime
+    * @param retention
+    * @param partitionKeys
+    * @param parameters
+    * @param viewOriginalText
+    * @param viewExpandedText
+    * @param tableType
+    */
+   public MTable(String tableName, MDatabase database, MStorageDescriptor sd, String owner, String ownerType,
+       int createTime, int lastAccessTime, int retention, List<MFieldSchema> partitionKeys,
+       Map<String, String> parameters, String viewOriginalText, String viewExpandedText,
+       boolean rewriteEnabled, String tableType) {
+     this.tableName = tableName;
+     this.database = database;
+     this.sd = sd;
+     this.owner = owner;
+     this.ownerType = ownerType;
+     this.createTime = createTime;
+     this.setLastAccessTime(lastAccessTime);
+     this.retention = retention;
+     this.partitionKeys = partitionKeys;
+     this.parameters = parameters;
+     this.viewOriginalText = viewOriginalText;
+     this.viewExpandedText = viewExpandedText;
+     this.rewriteEnabled = rewriteEnabled;
+     this.tableType = tableType;
+   }
+ 
+   /**
+    * @return the tableName
+    */
+   public String getTableName() {
+     return tableName;
+   }
+ 
+   /**
+    * @param tableName the tableName to set
+    */
+   public void setTableName(String tableName) {
+     this.tableName = tableName;
+   }
+ 
+   /**
+    * @return the sd
+    */
+   public MStorageDescriptor getSd() {
+     return sd;
+   }
+ 
+   /**
+    * @param sd the sd to set
+    */
+   public void setSd(MStorageDescriptor sd) {
+     this.sd = sd;
+   }
+ 
+   /**
+    * @return the partKeys
+    */
+   public List<MFieldSchema> getPartitionKeys() {
+     return partitionKeys;
+   }
+ 
+   /**
+    * @param partKeys the partKeys to set
+    */
+   public void setPartitionKeys(List<MFieldSchema> partKeys) {
+     this.partitionKeys = partKeys;
+   }
+ 
+   /**
+    * @return the parameters
+    */
+   public Map<String, String> getParameters() {
+     return parameters;
+   }
+ 
+   /**
+    * @param parameters the parameters to set
+    */
+   public void setParameters(Map<String, String> parameters) {
+     this.parameters = parameters;
+   }
+ 
+   /**
+    * @return the original view text, or null if this table is not a view
+    */
+   public String getViewOriginalText() {
+     return viewOriginalText;
+   }
+ 
+   /**
+    * @param viewOriginalText the original view text to set
+    */
+   public void setViewOriginalText(String viewOriginalText) {
+     this.viewOriginalText = viewOriginalText;
+   }
+ 
+   /**
+    * @return the expanded view text, or null if this table is not a view
+    */
+   public String getViewExpandedText() {
+     return viewExpandedText;
+   }
+ 
+   /**
+    * @param viewExpandedText the expanded view text to set
+    */
+   public void setViewExpandedText(String viewExpandedText) {
+     this.viewExpandedText = viewExpandedText;
+   }
+ 
+   /**
+    * @return whether the view can be used for rewriting queries
+    */
+   public boolean isRewriteEnabled() {
+     return rewriteEnabled;
+   }
+ 
+   /**
+    * @param rewriteEnabled whether the view can be used for rewriting queries
+    */
+   public void setRewriteEnabled(boolean rewriteEnabled) {
+     this.rewriteEnabled = rewriteEnabled;
+   }
+ 
+   /**
+    * @return the owner
+    */
+   public String getOwner() {
+     return owner;
+   }
+ 
+   /**
+    * @param owner the owner to set
+    */
+   public void setOwner(String owner) {
+     this.owner = owner;
+   }
+ 
+   /**
+    * @return the owner type
+    */
+   public String getOwnerType() {
+     return ownerType;
+   }
+ 
+   /**
+    * @param ownerType the owner type to set
+    */
+   public void setOwnerType(String ownerType) {
+     this.ownerType = ownerType;
+   }
+ 
+   /**
+    * @return the createTime
+    */
+   public int getCreateTime() {
+     return createTime;
+   }
+ 
+   /**
+    * @param createTime the createTime to set
+    */
+   public void setCreateTime(int createTime) {
+     this.createTime = createTime;
+   }
+ 
+   /**
+    * @return the database
+    */
+   public MDatabase getDatabase() {
+     return database;
+   }
+ 
+   /**
+    * @param database the database to set
+    */
+   public void setDatabase(MDatabase database) {
+     this.database = database;
+   }
+ 
+   /**
+    * @return the retention
+    */
+   public int getRetention() {
+     return retention;
+   }
+ 
+   /**
+    * @param retention the retention to set
+    */
+   public void setRetention(int retention) {
+     this.retention = retention;
+   }
+ 
+   /**
+    * @param lastAccessTime the lastAccessTime to set
+    */
+   public void setLastAccessTime(int lastAccessTime) {
+     this.lastAccessTime = lastAccessTime;
+   }
+ 
+   /**
+    * @return the lastAccessTime
+    */
+   public int getLastAccessTime() {
+     return lastAccessTime;
+   }
+ 
+   /**
+    * @param tableType the tableType to set
+    */
+   public void setTableType(String tableType) {
+     this.tableType = tableType;
+   }
+ 
+   /**
+    * @return the tableType
+    */
+   public String getTableType() {
+     return tableType;
+   }
++
++  public long getWriteId() {
++    return writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++  }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index 0000000,4e3068d..1f559e9
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@@ -1,0 -1,1107 +1,1158 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
++import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.api.CompactionType;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.util.StringUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import java.sql.Connection;
+ import java.sql.PreparedStatement;
+ import java.sql.ResultSet;
+ import java.sql.SQLException;
+ import java.sql.Statement;
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Set;
+ 
+ /**
+  * Extends the transaction handler with methods needed only by the compactor threads.  These
+  * methods are not available through the thrift interface.
+  */
+ class CompactionTxnHandler extends TxnHandler {
+   static final private String CLASS_NAME = CompactionTxnHandler.class.getName();
+   static final private Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
+ 
+   public CompactionTxnHandler() {
+   }
+ 
+   /**
+    * This will look through the completed_txn_components table and look for partitions or tables
+    * that may be ready for compaction.  Also, look through txns and txn_components tables for
+    * aborted transactions that we should add to the list.
+    * @param maxAborted Maximum number of aborted queries to allow before marking this as a
+    *                   potential compaction.
+    * @return list of CompactionInfo structs.  These will not have id, type,
+    * or runAs set since these are only potential compactions not actual ones.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public Set<CompactionInfo> findPotentialCompactions(int maxAborted) throws MetaException {
+     Connection dbConn = null;
+     Set<CompactionInfo> response = new HashSet<>();
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         // Check for completed transactions
+         String s = "select distinct ctc_database, ctc_table, " +
+           "ctc_partition from COMPLETED_TXN_COMPONENTS";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           CompactionInfo info = new CompactionInfo();
+           info.dbname = rs.getString(1);
+           info.tableName = rs.getString(2);
+           info.partName = rs.getString(3);
+           response.add(info);
+         }
+         rs.close();
+ 
+         // Check for aborted txns
+         s = "select tc_database, tc_table, tc_partition " +
+           "from TXNS, TXN_COMPONENTS " +
+           "where txn_id = tc_txnid and txn_state = '" + TXN_ABORTED + "' " +
+           "group by tc_database, tc_table, tc_partition " +
+           "having count(*) > " + maxAborted;
+ 
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           CompactionInfo info = new CompactionInfo();
+           info.dbname = rs.getString(1);
+           info.tableName = rs.getString(2);
+           info.partName = rs.getString(3);
+           info.tooManyAborts = true;
+           response.add(info);
+         }
+ 
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+       } catch (SQLException e) {
+         LOG.error("Unable to connect to transaction database " + e.getMessage());
+         checkRetryable(dbConn, e, "findPotentialCompactions(maxAborted:" + maxAborted + ")");
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+       return response;
+     }
+     catch (RetryException e) {
+       return findPotentialCompactions(maxAborted);
+     }
+   }
+ 
+   /**
+    * Sets the user to run as.  This is for the case
+    * where the request was generated by the user and so the worker must set this value later.
+    * @param cq_id id of this entry in the queue
+    * @param user user to run the jobs as
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void setRunAs(long cq_id, String user) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_run_as = '" + user + "' where cq_id = " + cq_id;
+         LOG.debug("Going to execute update <" + s + ">");
+         int updCnt = stmt.executeUpdate(s);
+         if (updCnt != 1) {
+           LOG.error("Unable to set cq_run_as=" + user + " for compaction record with cq_id=" + cq_id + ".  updCnt=" + updCnt);
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to update compaction queue, " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "setRunAs(cq_id:" + cq_id + ",user:" + user +")");
+       } finally {
+         closeDbConn(dbConn);
+         closeStmt(stmt);
+       }
+     } catch (RetryException e) {
+       setRunAs(cq_id, user);
+     }
+   }
+ 
+   /**
+    * This will grab the next compaction request off of
+    * the queue, and assign it to the worker.
+    * @param workerId id of the worker calling this, will be recorded in the db
+    * @return an info element for this compaction request, or null if there is no work to do now.
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public CompactionInfo findNextToCompact(String workerId) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       //need a separate stmt for executeUpdate() otherwise it will close the ResultSet(HIVE-12725)
+       Statement updStmt = null;
+       ResultSet rs = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select cq_id, cq_database, cq_table, cq_partition, " +
+           "cq_type, cq_tblproperties from COMPACTION_QUEUE where cq_state = '" + INITIATED_STATE + "'";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           LOG.debug("No compactions found ready to compact");
+           dbConn.rollback();
+           return null;
+         }
+         updStmt = dbConn.createStatement();
+         do {
+           CompactionInfo info = new CompactionInfo();
+           info.id = rs.getLong(1);
+           info.dbname = rs.getString(2);
+           info.tableName = rs.getString(3);
+           info.partName = rs.getString(4);
+           info.type = dbCompactionType2ThriftType(rs.getString(5).charAt(0));
+           info.properties = rs.getString(6);
+           // Now, update this record as being worked on by this worker.
+           long now = getDbTime(dbConn);
+           s = "update COMPACTION_QUEUE set cq_worker_id = '" + workerId + "', " +
+             "cq_start = " + now + ", cq_state = '" + WORKING_STATE + "' where cq_id = " + info.id +
+             " AND cq_state='" + INITIATED_STATE + "'";
+           LOG.debug("Going to execute update <" + s + ">");
+           int updCount = updStmt.executeUpdate(s);
+           if(updCount == 1) {
+             dbConn.commit();
+             return info;
+           }
+           if(updCount == 0) {
+             LOG.debug("Another Worker picked up " + info);
+             continue;
+           }
+           LOG.error("Unable to set to cq_state=" + WORKING_STATE + " for compaction record: " +
+             info + ". updCnt=" + updCount + ".");
+           dbConn.rollback();
+           return null;
+         } while( rs.next());
+         dbConn.rollback();
+         return null;
+       } catch (SQLException e) {
+         LOG.error("Unable to select next element for compaction, " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "findNextToCompact(workerId:" + workerId + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(updStmt);
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return findNextToCompact(workerId);
+     }
+   }
+ 
+   /**
+    * This will mark an entry in the queue as compacted
+    * and put it in the ready to clean state.
+    * @param info info on the compaction entry to mark as compacted.
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void markCompacted(CompactionInfo info) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_state = '" + READY_FOR_CLEANING + "', " +
+           "cq_worker_id = null where cq_id = " + info.id;
+         LOG.debug("Going to execute update <" + s + ">");
+         int updCnt = stmt.executeUpdate(s);
+         if (updCnt != 1) {
+           LOG.error("Unable to set cq_state=" + READY_FOR_CLEANING + " for compaction record: " + info + ". updCnt=" + updCnt);
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to update compaction queue " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "markCompacted(" + info + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(stmt);
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       markCompacted(info);
+     }
+   }
+ 
+   /**
+    * Find entries in the queue that are ready to
+    * be cleaned.
+    * @return information on the entry in the queue.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public List<CompactionInfo> findReadyToClean() throws MetaException {
+     Connection dbConn = null;
+     List<CompactionInfo> rc = new ArrayList<>();
+ 
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select cq_id, cq_database, cq_table, cq_partition, "
+                 + "cq_type, cq_run_as, cq_highest_write_id from COMPACTION_QUEUE where cq_state = '"
+                 + READY_FOR_CLEANING + "'";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           CompactionInfo info = new CompactionInfo();
+           info.id = rs.getLong(1);
+           info.dbname = rs.getString(2);
+           info.tableName = rs.getString(3);
+           info.partName = rs.getString(4);
+           switch (rs.getString(5).charAt(0)) {
+             case MAJOR_TYPE: info.type = CompactionType.MAJOR; break;
+             case MINOR_TYPE: info.type = CompactionType.MINOR; break;
+             default: throw new MetaException("Unexpected compaction type " + rs.getString(5));
+           }
+           info.runAs = rs.getString(6);
+           info.highestWriteId = rs.getLong(7);
+           rc.add(info);
+         }
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         return rc;
+       } catch (SQLException e) {
+         LOG.error("Unable to select next element for cleaning, " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "findReadyToClean");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return findReadyToClean();
+     }
+   }
+ 
+   /**
+    * This will remove an entry from the queue after
+    * it has been compacted.
+    * 
+    * @param info info on the compaction entry to remove
+    */
+   @Override
+   @RetrySemantics.CannotRetry
+   public void markCleaned(CompactionInfo info) throws MetaException {
+     try {
+       Connection dbConn = null;
+       PreparedStatement pStmt = null;
+       ResultSet rs = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?");
+         pStmt.setLong(1, info.id);
+         rs = pStmt.executeQuery();
+         if(rs.next()) {
+           info = CompactionInfo.loadFullFromCompactionQueue(rs);
+         }
+         else {
+           throw new IllegalStateException("No record with CQ_ID=" + info.id + " found in COMPACTION_QUEUE");
+         }
+         close(rs);
+         String s = "delete from COMPACTION_QUEUE where cq_id = ?";
+         pStmt = dbConn.prepareStatement(s);
+         pStmt.setLong(1, info.id);
+         LOG.debug("Going to execute update <" + s + ">");
+         int updCount = pStmt.executeUpdate();
+         if (updCount != 1) {
+           LOG.error("Unable to delete compaction record: " + info +  ".  Update count=" + updCount);
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+         }
+         pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
+         info.state = SUCCEEDED_STATE;
+         CompactionInfo.insertIntoCompletedCompactions(pStmt, info, getDbTime(dbConn));
+         updCount = pStmt.executeUpdate();
+ 
+         // Remove entries from completed_txn_components as well, so we don't start looking there
+         // again but only up to the highest write ID include in this compaction job.
+         //highestWriteId will be NULL in upgrade scenarios
+         s = "delete from COMPLETED_TXN_COMPONENTS where ctc_database = ? and " +
+             "ctc_table = ?";
+         if (info.partName != null) {
+           s += " and ctc_partition = ?";
+         }
+         if(info.highestWriteId != 0) {
+           s += " and ctc_writeid <= ?";
+         }
+         pStmt = dbConn.prepareStatement(s);
+         int paramCount = 1;
+         pStmt.setString(paramCount++, info.dbname);
+         pStmt.setString(paramCount++, info.tableName);
+         if (info.partName != null) {
+           pStmt.setString(paramCount++, info.partName);
+         }
+         if(info.highestWriteId != 0) {
+           pStmt.setLong(paramCount++, info.highestWriteId);
+         }
+         LOG.debug("Going to execute update <" + s + ">");
+         if (pStmt.executeUpdate() < 1) {
+           LOG.error("Expected to remove at least one row from completed_txn_components when " +
+             "marking compaction entry as clean!");
+         }
+ 
+         s = "select distinct txn_id from TXNS, TXN_COMPONENTS where txn_id = tc_txnid and txn_state = '" +
+           TXN_ABORTED + "' and tc_database = ? and tc_table = ?";
+         if (info.highestWriteId != 0) s += " and tc_writeid <= ?";
+         if (info.partName != null) s += " and tc_partition = ?";
+ 
+         pStmt = dbConn.prepareStatement(s);
+         paramCount = 1;
+         pStmt.setString(paramCount++, info.dbname);
+         pStmt.setString(paramCount++, info.tableName);
+         if(info.highestWriteId != 0) {
+           pStmt.setLong(paramCount++, info.highestWriteId);
+         }
+         if (info.partName != null) {
+           pStmt.setString(paramCount++, info.partName);
+         }
+ 
+         LOG.debug("Going to execute update <" + s + ">");
+         rs = pStmt.executeQuery();
+         List<Long> txnids = new ArrayList<>();
+         List<String> questions = new ArrayList<>();
+         while (rs.next()) {
+           long id = rs.getLong(1);
+           txnids.add(id);
+           questions.add("?");
+         }
+         // Remove entries from txn_components, as there may be aborted txn components
+         if (txnids.size() > 0) {
+           List<String> queries = new ArrayList<>();
+ 
+           // Prepare prefix and suffix
+           StringBuilder prefix = new StringBuilder();
+           StringBuilder suffix = new StringBuilder();
+ 
+           prefix.append("delete from TXN_COMPONENTS where ");
+ 
+           //because 1 txn may include different partitions/tables even in auto commit mode
+           suffix.append(" and tc_database = ?");
+           suffix.append(" and tc_table = ?");
+           if (info.partName != null) {
+             suffix.append(" and tc_partition = ?");
+           }
+ 
+           // Populate the complete query with provided prefix and suffix
+           List<Integer> counts = TxnUtils
+               .buildQueryWithINClauseStrings(conf, queries, prefix, suffix, questions, "tc_txnid",
+                   true, false);
+           int totalCount = 0;
+           for (int i = 0; i < queries.size(); i++) {
+             String query = queries.get(i);
+             int insertCount = counts.get(i);
+ 
+             LOG.debug("Going to execute update <" + query + ">");
+             pStmt = dbConn.prepareStatement(query);
+             for (int j = 0; j < insertCount; j++) {
+               pStmt.setLong(j + 1, txnids.get(totalCount + j));
+             }
+             totalCount += insertCount;
+             paramCount = insertCount + 1;
+             pStmt.setString(paramCount++, info.dbname);
+             pStmt.setString(paramCount++, info.tableName);
+             if (info.partName != null) {
+               pStmt.setString(paramCount++, info.partName);
+             }
+             int rc = pStmt.executeUpdate();
+             LOG.debug("Removed " + rc + " records from txn_components");
+ 
+             // Don't bother cleaning from the txns table.  A separate call will do that.  We don't
+             // know here which txns still have components from other tables or partitions in the
+             // table, so we don't know which ones we can and cannot clean.
+           }
+         }
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to delete from compaction queue " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "markCleaned(" + info + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, pStmt, dbConn);
+       }
+     } catch (RetryException e) {
+       markCleaned(info);
+     }
+   }
+ 
+   /**
+    * Clean up entries from TXN_TO_WRITE_ID table less than min_uncommited_txnid as found by
+    * min(NEXT_TXN_ID.ntxn_next, min(MIN_HISTORY_LEVEL.mhl_min_open_txnid), min(Aborted TXNS.txn_id)).
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void cleanTxnToWriteIdTable() throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+ 
+       try {
+         // We query for minimum values in all the queries and they can only increase by any concurrent
+         // operations. So, READ COMMITTED is sufficient.
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         // First need to find the min_uncommitted_txnid which is currently seen by any open transactions.
+         // If there are no txns which are currently open or aborted in the system, then current value of
+         // NEXT_TXN_ID.ntxn_next could be min_uncommitted_txnid.
+         String s = "select ntxn_next from NEXT_TXN_ID";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           throw new MetaException("Transaction tables not properly " +
+                   "initialized, no record found in next_txn_id");
+         }
+         long minUncommittedTxnId = rs.getLong(1);
+ 
+         // If there are any open txns, then the minimum of min_open_txnid from MIN_HISTORY_LEVEL table
+         // could be the min_uncommitted_txnid if lesser than NEXT_TXN_ID.ntxn_next.
+         s = "select min(mhl_min_open_txnid) from MIN_HISTORY_LEVEL";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (rs.next()) {
+           long minOpenTxnId = rs.getLong(1);
+           if (minOpenTxnId > 0) {
+             minUncommittedTxnId = Math.min(minOpenTxnId, minUncommittedTxnId);
+           }
+         }
+ 
+         // If there are aborted txns, then the minimum aborted txnid could be the min_uncommitted_txnid
+         // if lesser than both NEXT_TXN_ID.ntxn_next and min(MIN_HISTORY_LEVEL .mhl_min_open_txnid).
+         s = "select min(txn_id) from TXNS where txn_state = " + quoteChar(TXN_ABORTED);
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (rs.next()) {
+           long minAbortedTxnId = rs.getLong(1);
+           if (minAbortedTxnId > 0) {
+             minUncommittedTxnId = Math.min(minAbortedTxnId, minUncommittedTxnId);
+           }
+         }
+ 
+         // As all txns below min_uncommitted_txnid are either committed or empty_aborted, we are allowed
+         // to cleanup the entries less than min_uncommitted_txnid from the TXN_TO_WRITE_ID table.
+         s = "delete from TXN_TO_WRITE_ID where t2w_txnid < " + minUncommittedTxnId;
+         LOG.debug("Going to execute delete <" + s + ">");
+         int rc = stmt.executeUpdate(s);
+         LOG.info("Removed " + rc + " rows from TXN_TO_WRITE_ID with Txn Low-Water-Mark: " + minUncommittedTxnId);
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to delete from txns table " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "cleanTxnToWriteIdTable");
+         throw new MetaException("Unable to connect to transaction database " +
+                 StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       cleanTxnToWriteIdTable();
+     }
+   }
+ 
+   /**
+    * Clean up aborted transactions from txns that have no components in txn_components. The reason such
+    * txns exist can be that now work was done in this txn (e.g. Streaming opened TransactionBatch and
+    * abandoned it w/o doing any work) or due to {@link #markCleaned(CompactionInfo)} being called.
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void cleanEmptyAbortedTxns() throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         //Aborted is a terminal state, so nothing about the txn can change
+         //after that, so READ COMMITTED is sufficient.
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select txn_id from TXNS where " +
 -          "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
 -          "txn_state = '" + TXN_ABORTED + "'";
++            "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
++            "txn_state = '" + TXN_ABORTED + "'";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         List<Long> txnids = new ArrayList<>();
+         while (rs.next()) txnids.add(rs.getLong(1));
+         close(rs);
+         if(txnids.size() <= 0) {
+           return;
+         }
+         Collections.sort(txnids);//easier to read logs
++
+         List<String> queries = new ArrayList<>();
+         StringBuilder prefix = new StringBuilder();
+         StringBuilder suffix = new StringBuilder();
+ 
++        // Turn off COLUMN_STATS_ACCURATE for txnids' components in TBLS and PARTITIONS
++        prefix.append("select tbl_id from TBLS inner join DBS on TBLS.DB_ID = DBS.DB_ID "
++            + "inner join TXN_TO_WRITE_ID on t2w_database = DBS.NAME and t2w_table = TBLS.TBL_NAME"
++            + " and t2w_writeid = TBLS.WRITE_ID where ");
++        suffix.append("");
++        TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "t2w_txnid", true, false);
++
++        // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from TABLE_PARAMS for the txnids.
++        List<StringBuilder> finalCommands = new ArrayList<>(queries.size());
++        for (int i = 0; i < queries.size(); i++) {
++          String query = queries.get(i);
++          finalCommands.add(i, new StringBuilder("delete from TABLE_PARAMS " +
++                  " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and tbl_id in ("));
++          finalCommands.get(i).append(query + ")");
++          LOG.debug("Going to execute update <" + finalCommands.get(i) + ">");
++          int rc = stmt.executeUpdate(finalCommands.get(i).toString());
++          LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from TBLS");
++        }
++
++        queries.clear();
++        prefix.setLength(0);
++        suffix.setLength(0);
++        finalCommands.clear();
++
++        // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from PARTITIONS_PARAMS for the txnids.
++        prefix.append("select part_id from PARTITIONS "
++            + "inner join TBLS on PARTITIONS.TBL_ID = TBLS.TBL_ID "
++            + "inner join DBS on TBLS.DB_ID = DBS.DB_ID "
++            + "inner join TXN_TO_WRITE_ID on t2w_database = DBS.NAME and t2w_table = TBLS.TBL_NAME"
++            + " and t2w_writeid = TBLS.WRITE_ID where ");
++        suffix.append("");
++        TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "t2w_txnid", true, false);
++
++        for (int i = 0; i < queries.size(); i++) {
++          String query = queries.get(i);
++          finalCommands.add(i, new StringBuilder("delete from PARTITION_PARAMS " +
++                  " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and part_id in ("));
++          finalCommands.get(i).append(query + ")");
++          LOG.debug("Going to execute update <" + finalCommands.get(i) + ">");
++          int rc = stmt.executeUpdate(finalCommands.get(i).toString());
++          LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from PARTITIONS");
++        }
++
++        queries.clear();
++        prefix.setLength(0);
++        suffix.setLength(0);
++        finalCommands.clear();
++
++        // Delete from TXNS.
+         prefix.append("delete from TXNS where ");
+         suffix.append("");
+ 
+         TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "txn_id", false, false);
+ 
+         for (String query : queries) {
+           LOG.debug("Going to execute update <" + query + ">");
+           int rc = stmt.executeUpdate(query);
+           LOG.info("Removed " + rc + "  empty Aborted transactions from TXNS");
+         }
+         LOG.info("Aborted transactions removed from TXNS: " + txnids);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to delete from txns table " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "cleanEmptyAbortedTxns");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       cleanEmptyAbortedTxns();
+     }
+   }
+ 
+   /**
+    * This will take all entries assigned to workers
+    * on a host return them to INITIATED state.  The initiator should use this at start up to
+    * clean entries from any workers that were in the middle of compacting when the metastore
+    * shutdown.  It does not reset entries from worker threads on other hosts as those may still
+    * be working.
+    * @param hostname Name of this host.  It is assumed this prefixes the thread's worker id,
+    *                 so that like hostname% will match the worker id.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void revokeFromLocalWorkers(String hostname) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_worker_id = null, cq_start = null, cq_state = '"
+           + INITIATED_STATE+ "' where cq_state = '" + WORKING_STATE + "' and cq_worker_id like '"
+           +  hostname + "%'";
+         LOG.debug("Going to execute update <" + s + ">");
+         // It isn't an error if the following returns no rows, as the local workers could have died
+         // with  nothing assigned to them.
+         stmt.executeUpdate(s);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to change dead worker's records back to initiated state " +
+           e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "revokeFromLocalWorkers(hostname:" + hostname +")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(stmt);
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       revokeFromLocalWorkers(hostname);
+     }
+   }
+ 
+   /**
+    * This call will return all compaction queue
+    * entries assigned to a worker but over the timeout back to the initiated state.
+    * This should be called by the initiator on start up and occasionally when running to clean up
+    * after dead threads.  At start up {@link #revokeFromLocalWorkers(String)} should be called
+    * first.
+    * @param timeout number of milliseconds since start time that should elapse before a worker is
+    *                declared dead.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void revokeTimedoutWorkers(long timeout) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         long latestValidStart = getDbTime(dbConn) - timeout;
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_worker_id = null, cq_start = null, cq_state = '"
+           + INITIATED_STATE+ "' where cq_state = '" + WORKING_STATE + "' and cq_start < "
+           +  latestValidStart;
+         LOG.debug("Going to execute update <" + s + ">");
+         // It isn't an error if the following returns no rows, as the local workers could have died
+         // with  nothing assigned to them.
+         stmt.executeUpdate(s);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to change dead worker's records back to initiated state " +
+           e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "revokeTimedoutWorkers(timeout:" + timeout + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(stmt);
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       revokeTimedoutWorkers(timeout);
+     }
+   }
+ 
+   /**
+    * Queries metastore DB directly to find columns in the table which have statistics information.
+    * If {@code ci} includes partition info then per partition stats info is examined, otherwise
+    * table level stats are examined.
+    * @throws MetaException
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public List<String> findColumnsWithStats(CompactionInfo ci) throws MetaException {
+     Connection dbConn = null;
+     PreparedStatement pStmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         String quote = getIdentifierQuoteString(dbConn);
+         StringBuilder bldr = new StringBuilder();
+         bldr.append("SELECT ").append(quote).append("COLUMN_NAME").append(quote)
+           .append(" FROM ")
+           .append(quote).append((ci.partName == null ? "TAB_COL_STATS" : "PART_COL_STATS"))
+           .append(quote)
+           .append(" WHERE ")
+           .append(quote).append("DB_NAME").append(quote).append(" = ?")
+           .append(" AND ").append(quote).append("TABLE_NAME").append(quote)
+           .append(" = ?");
+         if (ci.partName != null) {
+           bldr.append(" AND ").append(quote).append("PARTITION_NAME").append(quote).append(" = ?");
+         }
+         String s = bldr.toString();
+         pStmt = dbConn.prepareStatement(s);
+         pStmt.setString(1, ci.dbname);
+         pStmt.setString(2, ci.tableName);
+         if (ci.partName != null) {
+           pStmt.setString(3, ci.partName);
+         }
+ 
+       /*String s = "SELECT COLUMN_NAME FROM " + (ci.partName == null ? "TAB_COL_STATS" :
+           "PART_COL_STATS")
+          + " WHERE DB_NAME='" + ci.dbname + "' AND TABLE_NAME='" + ci.tableName + "'"
+         + (ci.partName == null ? "" : " AND PARTITION_NAME='" + ci.partName + "'");*/
+         LOG.debug("Going to execute <" + s + ">");
+         rs = pStmt.executeQuery();
+         List<String> columns = new ArrayList<>();
+         while (rs.next()) {
+           columns.add(rs.getString(1));
+         }
+         LOG.debug("Found columns to update stats: " + columns + " on " + ci.tableName +
+           (ci.partName == null ? "" : "/" + ci.partName));
+         dbConn.commit();
+         return columns;
+       } catch (SQLException e) {
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "findColumnsWithStats(" + ci.tableName +
+           (ci.partName == null ? "" : "/" + ci.partName) + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, pStmt, dbConn);
+       }
+     } catch (RetryException ex) {
+       return findColumnsWithStats(ci);
+     }
+   }
+ 
+   /**
+    * Record the highest txn id that the {@code ci} compaction job will pay attention to.
+    * This is the highest resolved txn id, i.e. such that there are no open txns with lower ids.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void setCompactionHighestWriteId(CompactionInfo ci, long highestWriteId) throws MetaException {
+     Connection dbConn = null;
+     Statement stmt = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         int updCount = stmt.executeUpdate("UPDATE COMPACTION_QUEUE SET CQ_HIGHEST_WRITE_ID = " + highestWriteId +
+           " WHERE CQ_ID = " + ci.id);
+         if(updCount != 1) {
+           throw new IllegalStateException("Could not find record in COMPACTION_QUEUE for " + ci);
+         }
+         dbConn.commit();
+       } catch (SQLException e) {
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "setCompactionHighestWriteId(" + ci + "," + highestWriteId + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+       }
+     } catch (RetryException ex) {
+       setCompactionHighestWriteId(ci, highestWriteId);
+     }
+   }
+   private static class RetentionCounters {
+     int attemptedRetention = 0;
+     int failedRetention = 0;
+     int succeededRetention = 0;
+     RetentionCounters(int attemptedRetention, int failedRetention, int succeededRetention) {
+       this.attemptedRetention = attemptedRetention;
+       this.failedRetention = failedRetention;
+       this.succeededRetention = succeededRetention;
+     }
+   }
+   private void checkForDeletion(List<Long> deleteSet, CompactionInfo ci, RetentionCounters rc) {
+     switch (ci.state) {
+       case ATTEMPTED_STATE:
+         if(--rc.attemptedRetention < 0) {
+           deleteSet.add(ci.id);
+         }
+         break;
+       case FAILED_STATE:
+         if(--rc.failedRetention < 0) {
+           deleteSet.add(ci.id);
+         }
+         break;
+       case SUCCEEDED_STATE:
+         if(--rc.succeededRetention < 0) {
+           deleteSet.add(ci.id);
+         }
+         break;
+       default:
+         //do nothing to hanlde future RU/D where we may want to add new state types
+     }
+   }
+ 
+   /**
+    * For any given compactable entity (partition; table if not partitioned) the history of compactions
+    * may look like "sssfffaaasffss", for example.  The idea is to retain the tail (most recent) of the
+    * history such that a configurable number of each type of state is present.  Any other entries
+    * can be purged.  This scheme has advantage of always retaining the last failure/success even if
+    * it's not recent.
+    * @throws MetaException
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void purgeCompactionHistory() throws MetaException {
+     Connection dbConn = null;
+     Statement stmt = null;
+     PreparedStatement pStmt = null;
+     ResultSet rs = null;
+     List<Long> deleteSet = new ArrayList<>();
+     RetentionCounters rc = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         /*cc_id is monotonically increasing so for any entity sorts in order of compaction history,
+         thus this query groups by entity and withing group sorts most recent first*/
+         rs = stmt.executeQuery("select cc_id, cc_database, cc_table, cc_partition, cc_state from " +
+           "COMPLETED_COMPACTIONS order by cc_database, cc_table, cc_partition, cc_id desc");
+         String lastCompactedEntity = null;
+         /*In each group, walk from most recent and count occurences of each state type.  Once you
+         * have counted enough (for each state) to satisfy retention policy, delete all other
+         * instances of this status.*/
+         while(rs.next()) {
+           CompactionInfo ci = new CompactionInfo(rs.getLong(1), rs.getString(2), rs.getString(3), rs.getString(4), rs.getString(5).charAt(0));
+           if(!ci.getFullPartitionName().equals(lastCompactedEntity)) {
+             lastCompactedEntity = ci.getFullPartitionName();
+             rc = new RetentionCounters(MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED),
+               getFailedCompactionRetention(),
+               MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_SUCCEEDED));
+           }
+           checkForDeletion(deleteSet, ci, rc);
+         }
+         close(rs);
+ 
+         if (deleteSet.size() <= 0) {
+           return;
+         }
+ 
+         List<String> queries = new ArrayList<>();
+ 
+         StringBuilder prefix = new StringBuilder();
+         StringBuilder suffix = new StringBuilder();
+ 
+         prefix.append("delete from COMPLETED_COMPACTIONS where ");
+         suffix.append("");
+ 
+         List<String> questions = new ArrayList<>(deleteSet.size());
+         for (int  i = 0; i < deleteSet.size(); i++) {
+           questions.add("?");
+         }
+         List<Integer> counts = TxnUtils.buildQueryWithINClauseStrings(conf, queries, prefix, suffix, questions, "cc_id", false, false);
+         int totalCount = 0;
+         for (int i = 0; i < queries.size(); i++) {
+           String query = queries.get(i);
+           long insertCount = counts.get(i);
+           LOG.debug("Going to execute update <" + query + ">");
+           pStmt = dbConn.prepareStatement(query);
+           for (int j = 0; j < insertCount; j++) {
+             pStmt.setLong(j + 1, deleteSet.get(totalCount + j));
+           }
+           totalCount += insertCount;
+           int count = pStmt.executeUpdate();
+           LOG.debug("Removed " + count + " records from COMPLETED_COMPACTIONS");
+         }
+         dbConn.commit();
+       } catch (SQLException e) {
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "purgeCompactionHistory()");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         closeStmt(pStmt);
+       }
+     } catch (RetryException ex) {
+       purgeCompactionHistory();
+     }
+   }
+   /**
+    * this ensures that the number of failed compaction entries retained is > than number of failed
+    * compaction threshold which prevents new compactions from being scheduled.
+    */
+   private int getFailedCompactionRetention() {
+     int failedThreshold = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
+     int failedRetention = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED);
+     if(failedRetention < failedThreshold) {
+       LOG.warn("Invalid configuration " + ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.getVarname() +
+         "=" + failedRetention + " < " + ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED + "=" +
+         failedRetention + ".  Will use " + ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.getVarname() +
+         "=" + failedRetention);
+       failedRetention = failedThreshold;
+     }
+     return failedRetention;
+   }
+   /**
+    * Returns {@code true} if there already exists sufficient number of consecutive failures for
+    * this table/partition so that no new automatic compactions will be scheduled.
+    * User initiated compactions don't do this check.
+    *
+    * Do we allow compacting whole table (when it's partitioned)?  No, though perhaps we should.
+    * That would be a meta operations, i.e. first find all partitions for this table (which have 
+    * txn info) and schedule each compaction separately.  This avoids complications in this logic.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public boolean checkFailedCompactions(CompactionInfo ci) throws MetaException {
+     Connection dbConn = null;
+     PreparedStatement pStmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         pStmt = dbConn.prepareStatement("select CC_STATE from COMPLETED_COMPACTIONS where " +
+           "CC_DATABASE = ? and " +
+           "CC_TABLE = ? " +
+           (ci.partName != null ? "and CC_PARTITION = ?" : "") +
+           " and CC_STATE != " + quoteChar(ATTEMPTED_STATE) + " order by CC_ID desc");
+         pStmt.setString(1, ci.dbname);
+         pStmt.setString(2, ci.tableName);
+         if (ci.partName != null) {
+           pStmt.setString(3, ci.partName);
+         }
+         rs = pStmt.executeQuery();
+         int numFailed = 0;
+         int numTotal = 0;
+         int failedThreshold = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
+         while(rs.next() && ++numTotal <= failedThreshold) {
+           if(rs.getString(1).charAt(0) == FAILED_STATE) {
+             numFailed++;
+           }
+           else {
+             numFailed--;
+           }
+         }
+         return numFailed == failedThreshold;
+       }
+       catch (SQLException e) {
+         LOG.error("Unable to check for failed compactions " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "checkFailedCompactions(" + ci + ")");
+         LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(e));
+         return false;//weren't able to check
+       } finally {
+         close(rs, pStmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return checkFailedCompactions(ci);
+     }
+   }
+   /**
+    * If there is an entry in compaction_queue with ci.id, remove it
+    * Make entry in completed_compactions with status 'f'.
+    * If there is no entry in compaction_queue, it means Initiator failed to even schedule a compaction,
+    * which we record as ATTEMPTED_STATE entry in history.
+    */
+   @Override
+   @RetrySemantics.CannotRetry
+   public void markFailed(CompactionInfo ci) throws MetaException {//todo: this should not throw
+     //todo: this should take "comment" as parameter to set in CC_META_INFO to provide some context for the failure
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       PreparedStatement pStmt = null;
+       ResultSet rs = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?");
+         pStmt.setLong(1, ci.id);
+         rs = pStmt.executeQuery();
+         if(rs.next()) {
+           ci = CompactionInfo.loadFullFromCompactionQueue(rs);
+           String s = "delete from COMPACTION_QUEUE where cq_id = ?";
+           pStmt = dbConn.prepareStatement(s);
+           pStmt.setLong(1, ci.id);
+           LOG.debug("Going to execute update <" + s + ">");
+           int updCnt = pStmt.executeUpdate();
+         }
+         else {
+           if(ci.id > 0) {
+             //the record with valid CQ_ID has disappeared - this is a sign of something wrong
+             throw new IllegalStateException("No record with CQ_ID=" + ci.id + " found in COMPACTION_QUEUE");
+           }
+         }
+         if(ci.id == 0) {
+           //The failure occurred before we even made an entry in COMPACTION_QUEUE
+           //generate ID so that we can make an entry in COMPLETED_COMPACTIONS
+           ci.id = generateCompactionQueueId(stmt);
+           //mostly this indicates that the Initiator is paying attention to some table even though
+           //compactions are not happening.
+           ci.state = ATTEMPTED_STATE;
+           //this is not strictly accurate, but 'type' cannot be null.
+           if(ci.type == null) { ci.type = CompactionType.MINOR; }
+           ci.start = getDbTime(dbConn);
+         }
+         else {
+           ci.state = FAILED_STATE;
+         }
+         close(rs, stmt, null);
+         closeStmt(pStmt);
+ 
+         pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
+         CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn));
+         int updCount = pStmt.executeUpdate();
+         LOG.debug("Going to commit");
+         closeStmt(pStmt);
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.warn("markFailed(" + ci.id + "):" + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         try {
+           checkRetryable(dbConn, e, "markFailed(" + ci + ")");
+         }
+         catch(MetaException ex) {
+           LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex));
+         }
+         LOG.error("markFailed(" + ci + ") failed: " + e.getMessage(), e);
+       } finally {
+         close(rs, stmt, null);
+         close(null, pStmt, dbConn);
+       }
+     } catch (RetryException e) {
+       markFailed(ci);
+     }
+   }
+   @Override
+   @RetrySemantics.Idempotent
+   public void setHadoopJobId(String hadoopJobId, long id) {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set CQ_HADOOP_JOB_ID = " + quoteString(hadoopJobId) + " WHERE CQ_ID = " + id;
+         LOG.debug("Going to execute <" + s + ">");
+         int updateCount = stmt.executeUpdate(s);
+         LOG.debug("Going to commit");
+         closeStmt(stmt);
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.warn("setHadoopJobId(" + hadoopJobId + "," + id + "):" + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         try {
+           checkRetryable(dbConn, e, "setHadoopJobId(" + hadoopJobId + "," + id + ")");
+         }
+         catch(MetaException ex) {
+           LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex));
+         }
+         LOG.error("setHadoopJobId(" + hadoopJobId + "," + id + ") failed: " + e.getMessage(), e);
+       } finally {
+         close(null, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       setHadoopJobId(hadoopJobId, id);
+     }
+   }
+ }
+ 
+ 


[40/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
index 0000000,33f24fb..080cc52
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
@@@ -1,0 -1,504 +1,509 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import com.google.common.annotations.VisibleForTesting;
++
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.events.AcidWriteEvent;
+ 
+ import java.sql.SQLException;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ 
+ /**
+  * A handler to answer transaction related calls that come into the metastore
+  * server.
+  */
+ @InterfaceAudience.Private
+ @InterfaceStability.Evolving
+ public interface TxnStore extends Configurable {
+ 
+   enum MUTEX_KEY {
+     Initiator, Cleaner, HouseKeeper, CompactionHistory, CheckLock,
+     WriteSetCleaner, CompactionScheduler, WriteIdAllocator, MaterializationRebuild
+   }
+   // Compactor states (Should really be enum)
+   String INITIATED_RESPONSE = "initiated";
+   String WORKING_RESPONSE = "working";
+   String CLEANING_RESPONSE = "ready for cleaning";
+   String FAILED_RESPONSE = "failed";
+   String SUCCEEDED_RESPONSE = "succeeded";
+   String ATTEMPTED_RESPONSE = "attempted";
+ 
+   int TIMED_OUT_TXN_ABORT_BATCH_SIZE = 50000;
+ 
+   /**
+    * Get information about open transactions.  This gives extensive information about the
+    * transactions rather than just the list of transactions.  This should be used when the need
+    * is to see information about the transactions (e.g. show transactions).
+    * @return information about open transactions
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException;
+ 
+   /**
+    * Get list of valid transactions.  This gives just the list of transactions that are open.
+    * @return list of open transactions, as well as a high water mark.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   GetOpenTxnsResponse getOpenTxns() throws MetaException;
+ 
+   /**
+    * Get the count for open transactions.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   void countOpenTxns() throws MetaException;
+ 
+   /**
+    * Open a set of transactions
+    * @param rqst request to open transactions
+    * @return information on opened transactions
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException;
+ 
+   @RetrySemantics.Idempotent
+   long getTargetTxnId(String replPolicy, long sourceTxnId) throws MetaException;
+ 
+   /**
+    * Abort (rollback) a transaction.
+    * @param rqst info on transaction to abort
+    * @throws NoSuchTxnException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException;
+ 
+   /**
+    * Abort (rollback) a list of transactions in one request.
+    * @param rqst info on transactions to abort
+    * @throws NoSuchTxnException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException;
+ 
+   /**
+    * Commit a transaction
+    * @param rqst info on transaction to commit
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void commitTxn(CommitTxnRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException,  MetaException;
+ 
+   /**
+    * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark.
+    * @param rqst info on table/partitions and writeid snapshot to replicate.
+    * @throws MetaException in case of failure
+    */
+   @RetrySemantics.Idempotent
+   void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException;
+ 
+   /**
+    * Get invalidation info for the materialization. Currently, the materialization information
+    * only contains information about whether there was update/delete operations on the source
+    * tables used by the materialization since it was created.
+    * @param cm creation metadata for the materialization
+    * @param validTxnList valid transaction list for snapshot taken for current query
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   Materialization getMaterializationInvalidationInfo(
+       final CreationMetadata cm, final String validTxnList)
+           throws MetaException;
+ 
++  @RetrySemantics.ReadOnly
++  long getTxnIdForWriteId(String dbName, String tblName, long writeId)
++      throws MetaException;
++
+   LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId)
+       throws MetaException;
+ 
+   boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId)
+       throws MetaException;
+ 
+   long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout)
+       throws MetaException;
+ 
+     /**
+      * Gets the list of valid write ids for the given table wrt to current txn
+      * @param rqst info on transaction and list of table names associated with given transaction
+      * @throws NoSuchTxnException
+      * @throws MetaException
+      */
+   @RetrySemantics.ReadOnly
+   GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst)
+           throws NoSuchTxnException,  MetaException;
+ 
+   /**
+    * Allocate a write ID for the given table and associate it with a transaction
+    * @param rqst info on transaction and table to allocate write id
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Called on conversion of existing table to full acid.  Sets initial write ID to a high
+    * enough value so that we can assign unique ROW__IDs to data in existing files.
+    */
+   void seedWriteIdOnAcidConversion(InitializeTableWriteIdsRequest rqst) throws MetaException;
+ 
+   /**
+    * Obtain a lock.
+    * @param rqst information on the lock to obtain.  If the requester is part of a transaction
+    *             the txn information must be included in the lock request.
+    * @return info on the lock, including whether it was obtained.
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.CannotRetry
+   LockResponse lock(LockRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Check whether a lock has been obtained.  This is used after {@link #lock} returned a wait
+    * state.
+    * @param rqst info on the lock to check
+    * @return info on the state of the lock
+    * @throws NoSuchTxnException
+    * @throws NoSuchLockException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   LockResponse checkLock(CheckLockRequest rqst)
+     throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Unlock a lock.  It is not legal to call this if the caller is part of a txn.  In that case
+    * the txn should be committed or aborted instead.  (Note someday this will change since
+    * multi-statement transactions will allow unlocking in the transaction.)
+    * @param rqst lock to unlock
+    * @throws NoSuchLockException
+    * @throws TxnOpenException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void unlock(UnlockRequest rqst)
+     throws NoSuchLockException, TxnOpenException, MetaException;
+ 
+   /**
+    * Get information on current locks.
+    * @param rqst lock information to retrieve
+    * @return lock information.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException;
+ 
+   /**
+    * Send a heartbeat for a lock or a transaction
+    * @param ids lock and/or txn id to heartbeat
+    * @throws NoSuchTxnException
+    * @throws NoSuchLockException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   void heartbeat(HeartbeatRequest ids)
+     throws NoSuchTxnException,  NoSuchLockException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Heartbeat a group of transactions together
+    * @param rqst set of transactions to heartbat
+    * @return info on txns that were heartbeated
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst)
+     throws MetaException;
+ 
+   /**
+    * Submit a compaction request into the queue.  This is called when a user manually requests a
+    * compaction.
+    * @param rqst information on what to compact
+    * @return id of the compaction that has been started or existing id if this resource is already scheduled
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   CompactionResponse compact(CompactionRequest rqst) throws MetaException;
+ 
+   /**
+    * Show list of current compactions.
+    * @param rqst info on which compactions to show
+    * @return compaction information
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException;
+ 
+   /**
+    * Add information on a set of dynamic partitions that participated in a transaction.
+    * @param rqst dynamic partition info.
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   void addDynamicPartitions(AddDynamicPartitions rqst)
+       throws NoSuchTxnException,  TxnAbortedException, MetaException;
+ 
+   /**
+    * Clean up corresponding records in metastore tables.
+    * @param type Hive object type
+    * @param db database object
+    * @param table table object
+    * @param partitionIterator partition iterator
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void cleanupRecords(HiveObjectType type, Database db, Table table,
+                              Iterator<Partition> partitionIterator) throws MetaException;
+ 
+   @RetrySemantics.Idempotent
+   void onRename(String oldCatName, String oldDbName, String oldTabName, String oldPartName,
+       String newCatName, String newDbName, String newTabName, String newPartName)
+       throws MetaException;
+ 
+   /**
+    * Timeout transactions and/or locks.  This should only be called by the compactor.
+    */
+   @RetrySemantics.Idempotent
+   void performTimeOuts();
+ 
+   /**
+    * This will look through the completed_txn_components table and look for partitions or tables
+    * that may be ready for compaction.  Also, look through txns and txn_components tables for
+    * aborted transactions that we should add to the list.
+    * @param maxAborted Maximum number of aborted queries to allow before marking this as a
+    *                   potential compaction.
+    * @return list of CompactionInfo structs.  These will not have id, type,
+    * or runAs set since these are only potential compactions not actual ones.
+    */
+   @RetrySemantics.ReadOnly
+   Set<CompactionInfo> findPotentialCompactions(int maxAborted) throws MetaException;
+ 
+   /**
+    * Sets the user to run as.  This is for the case
+    * where the request was generated by the user and so the worker must set this value later.
+    * @param cq_id id of this entry in the queue
+    * @param user user to run the jobs as
+    */
+   @RetrySemantics.Idempotent
+   void setRunAs(long cq_id, String user) throws MetaException;
+ 
+   /**
+    * This will grab the next compaction request off of
+    * the queue, and assign it to the worker.
+    * @param workerId id of the worker calling this, will be recorded in the db
+    * @return an info element for this compaction request, or null if there is no work to do now.
+    */
+   @RetrySemantics.ReadOnly
+   CompactionInfo findNextToCompact(String workerId) throws MetaException;
+ 
+   /**
+    * This will mark an entry in the queue as compacted
+    * and put it in the ready to clean state.
+    * @param info info on the compaction entry to mark as compacted.
+    */
+   @RetrySemantics.SafeToRetry
+   void markCompacted(CompactionInfo info) throws MetaException;
+ 
+   /**
+    * Find entries in the queue that are ready to
+    * be cleaned.
+    * @return information on the entry in the queue.
+    */
+   @RetrySemantics.ReadOnly
+   List<CompactionInfo> findReadyToClean() throws MetaException;
+ 
+   /**
+    * This will remove an entry from the queue after
+    * it has been compacted.
+    * 
+    * @param info info on the compaction entry to remove
+    */
+   @RetrySemantics.CannotRetry
+   void markCleaned(CompactionInfo info) throws MetaException;
+ 
+   /**
+    * Mark a compaction entry as failed.  This will move it to the compaction history queue with a
+    * failed status.  It will NOT clean up aborted transactions in the table/partition associated
+    * with this compaction.
+    * @param info information on the compaction that failed.
+    * @throws MetaException
+    */
+   @RetrySemantics.CannotRetry
+   void markFailed(CompactionInfo info) throws MetaException;
+ 
+   /**
+    * Clean up entries from TXN_TO_WRITE_ID table less than min_uncommited_txnid as found by
+    * min(NEXT_TXN_ID.ntxn_next, min(MIN_HISTORY_LEVEL.mhl_min_open_txnid), min(Aborted TXNS.txn_id)).
+    */
+   @RetrySemantics.SafeToRetry
+   void cleanTxnToWriteIdTable() throws MetaException;
+ 
+   /**
+    * Clean up aborted transactions from txns that have no components in txn_components.  The reson such
+    * txns exist can be that now work was done in this txn (e.g. Streaming opened TransactionBatch and
+    * abandoned it w/o doing any work) or due to {@link #markCleaned(CompactionInfo)} being called.
+    */
+   @RetrySemantics.SafeToRetry
+   void cleanEmptyAbortedTxns() throws MetaException;
+ 
+   /**
+    * This will take all entries assigned to workers
+    * on a host return them to INITIATED state.  The initiator should use this at start up to
+    * clean entries from any workers that were in the middle of compacting when the metastore
+    * shutdown.  It does not reset entries from worker threads on other hosts as those may still
+    * be working.
+    * @param hostname Name of this host.  It is assumed this prefixes the thread's worker id,
+    *                 so that like hostname% will match the worker id.
+    */
+   @RetrySemantics.Idempotent
+   void revokeFromLocalWorkers(String hostname) throws MetaException;
+ 
+   /**
+    * This call will return all compaction queue
+    * entries assigned to a worker but over the timeout back to the initiated state.
+    * This should be called by the initiator on start up and occasionally when running to clean up
+    * after dead threads.  At start up {@link #revokeFromLocalWorkers(String)} should be called
+    * first.
+    * @param timeout number of milliseconds since start time that should elapse before a worker is
+    *                declared dead.
+    */
+   @RetrySemantics.Idempotent
+   void revokeTimedoutWorkers(long timeout) throws MetaException;
+ 
+   /**
+    * Queries metastore DB directly to find columns in the table which have statistics information.
+    * If {@code ci} includes partition info then per partition stats info is examined, otherwise
+    * table level stats are examined.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   List<String> findColumnsWithStats(CompactionInfo ci) throws MetaException;
+ 
+   /**
+    * Record the highest write id that the {@code ci} compaction job will pay attention to.
+    */
+   @RetrySemantics.Idempotent
+   void setCompactionHighestWriteId(CompactionInfo ci, long highestWriteId) throws MetaException;
+ 
+   /**
+    * For any given compactable entity (partition, table if not partitioned) the history of compactions
+    * may look like "sssfffaaasffss", for example.  The idea is to retain the tail (most recent) of the
+    * history such that a configurable number of each type of state is present.  Any other entries
+    * can be purged.  This scheme has advantage of always retaining the last failure/success even if
+    * it's not recent.
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   void purgeCompactionHistory() throws MetaException;
+ 
+   /**
+    * WriteSet tracking is used to ensure proper transaction isolation.  This method deletes the 
+    * transaction metadata once it becomes unnecessary.  
+    */
+   @RetrySemantics.SafeToRetry
+   void performWriteSetGC();
+ 
+   /**
+    * Determine if there are enough consecutive failures compacting a table or partition that no
+    * new automatic compactions should be scheduled.  User initiated compactions do not do this
+    * check.
+    * @param ci  Table or partition to check.
+    * @return true if it is ok to compact, false if there have been too many failures.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   boolean checkFailedCompactions(CompactionInfo ci) throws MetaException;
+ 
+   @VisibleForTesting
+   int numLocksInLockTable() throws SQLException, MetaException;
+ 
+   @VisibleForTesting
+   long setTimeout(long milliseconds);
+ 
+   @RetrySemantics.Idempotent
+   MutexAPI getMutexAPI();
+ 
+   /**
+    * This is primarily designed to provide coarse grained mutex support to operations running
+    * inside the Metastore (of which there could be several instances).  The initial goal is to 
+    * ensure that various sub-processes of the Compactor don't step on each other.
+    * 
+    * In RDMBS world each {@code LockHandle} uses a java.sql.Connection so use it sparingly.
+    */
+   interface MutexAPI {
+     /**
+      * The {@code key} is name of the lock. Will acquire and exclusive lock or block.  It retuns
+      * a handle which must be used to release the lock.  Each invocation returns a new handle.
+      */
+     LockHandle acquireLock(String key) throws MetaException;
+ 
+     /**
+      * Same as {@link #acquireLock(String)} but takes an already existing handle as input.  This 
+      * will associate the lock on {@code key} with the same handle.  All locks associated with
+      * the same handle will be released together.
+      * @param handle not NULL
+      */
+     void acquireLock(String key, LockHandle handle) throws MetaException;
+     interface LockHandle {
+       /**
+        * Releases all locks associated with this handle.
+        */
+       void releaseLocks();
+     }
+   }
+ 
+   /**
+    * Once a {@link java.util.concurrent.ThreadPoolExecutor} Worker submits a job to the cluster,
+    * it calls this to update the metadata.
+    * @param id {@link CompactionInfo#id}
+    */
+   @RetrySemantics.Idempotent
+   void setHadoopJobId(String hadoopJobId, long id);
+ 
+   /**
+    * Add the ACID write event information to writeNotificationLog table.
+    * @param acidWriteEvent
+    */
+   @RetrySemantics.Idempotent
+   void addWriteNotificationLog(AcidWriteEvent acidWriteEvent) throws MetaException;
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index 0000000,fa291d5..aac5811
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@@ -1,0 -1,471 +1,481 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  * <p/>
+  * http://www.apache.org/licenses/LICENSE-2.0
+  * <p/>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ValidCompactorWriteIdList;
+ import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
+ import org.apache.hadoop.hive.common.ValidReadTxnList;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
 -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
 -import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
 -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
++import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import java.util.Collections;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.BitSet;
+ import java.util.List;
+ import java.util.Map;
+ 
+ public class TxnUtils {
+   private static final Logger LOG = LoggerFactory.getLogger(TxnUtils.class);
+ 
++  // Transactional stats states
++  static final public char STAT_OPEN = 'o';
++  static final public char STAT_INVALID = 'i';
++  static final public char STAT_COMMITTED = 'c';
++  static final public char STAT_OBSOLETE = 's';
++
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse} to a
+    * {@link org.apache.hadoop.hive.common.ValidTxnList}.  This assumes that the caller intends to
+    * read the files, and thus treats both open and aborted transactions as invalid.
+    * @param txns txn list from the metastore
+    * @param currentTxn Current transaction that the user has open.  If this is greater than 0 it
+    *                   will be removed from the exceptions list so that the user sees his own
+    *                   transaction as valid.
+    * @return a valid txn list.
+    */
+   public static ValidTxnList createValidReadTxnList(GetOpenTxnsResponse txns, long currentTxn) {
+     /*
+      * The highWaterMark should be min(currentTxn,txns.getTxn_high_water_mark()) assuming currentTxn>0
+      * otherwise if currentTxn=7 and 8 commits before 7, then 7 will see result of 8 which
+      * doesn't make sense for Snapshot Isolation. Of course for Read Committed, the list should
+      * include the latest committed set.
+      */
+     long highWaterMark = (currentTxn > 0) ? Math.min(currentTxn, txns.getTxn_high_water_mark())
+                                           : txns.getTxn_high_water_mark();
+ 
+     // Open txns are already sorted in ascending order. This list may or may not include HWM
+     // but it is guaranteed that list won't have txn > HWM. But, if we overwrite the HWM with currentTxn
+     // then need to truncate the exceptions list accordingly.
+     List<Long> openTxns = txns.getOpen_txns();
+ 
+     // We care only about open/aborted txns below currentTxn and hence the size should be determined
+     // for the exceptions list. The currentTxn will be missing in openTxns list only in rare case like
+     // txn is aborted by AcidHouseKeeperService and compactor actually cleans up the aborted txns.
+     // So, for such cases, we get negative value for sizeToHwm with found position for currentTxn, and so,
+     // we just negate it to get the size.
+     int sizeToHwm = (currentTxn > 0) ? Collections.binarySearch(openTxns, currentTxn) : openTxns.size();
+     sizeToHwm = (sizeToHwm < 0) ? (-sizeToHwm) : sizeToHwm;
+     long[] exceptions = new long[sizeToHwm];
+     BitSet inAbortedBits = BitSet.valueOf(txns.getAbortedBits());
+     BitSet outAbortedBits = new BitSet();
+     long minOpenTxnId = Long.MAX_VALUE;
+     int i = 0;
+     for (long txn : openTxns) {
+       // For snapshot isolation, we don't care about txns greater than current txn and so stop here.
+       // Also, we need not include current txn to exceptions list.
+       if ((currentTxn > 0) && (txn >= currentTxn)) {
+         break;
+       }
+       if (inAbortedBits.get(i)) {
+         outAbortedBits.set(i);
+       } else if (minOpenTxnId == Long.MAX_VALUE) {
+         minOpenTxnId = txn;
+       }
+       exceptions[i++] = txn;
+     }
+     return new ValidReadTxnList(exceptions, outAbortedBits, highWaterMark, minOpenTxnId);
+   }
+ 
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse} to a
+    * {@link org.apache.hadoop.hive.common.ValidTxnWriteIdList}.  This assumes that the caller intends to
+    * read the files, and thus treats both open and aborted transactions as invalid.
+    * @param currentTxnId current txn ID for which we get the valid write ids list
+    * @param list valid write ids list from the metastore
+    * @return a valid write IDs list for the whole transaction.
+    */
+   public static ValidTxnWriteIdList createValidTxnWriteIdList(Long currentTxnId,
+                                                               List<TableValidWriteIds> validIds) {
+     ValidTxnWriteIdList validTxnWriteIdList = new ValidTxnWriteIdList(currentTxnId);
+     for (TableValidWriteIds tableWriteIds : validIds) {
+       validTxnWriteIdList.addTableValidWriteIdList(createValidReaderWriteIdList(tableWriteIds));
+     }
+     return validTxnWriteIdList;
+   }
+ 
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.TableValidWriteIds} to a
+    * {@link org.apache.hadoop.hive.common.ValidReaderWriteIdList}.  This assumes that the caller intends to
+    * read the files, and thus treats both open and aborted write ids as invalid.
+    * @param tableWriteIds valid write ids for the given table from the metastore
+    * @return a valid write IDs list for the input table
+    */
+   public static ValidReaderWriteIdList createValidReaderWriteIdList(TableValidWriteIds tableWriteIds) {
+     String fullTableName = tableWriteIds.getFullTableName();
+     long highWater = tableWriteIds.getWriteIdHighWaterMark();
+     List<Long> invalids = tableWriteIds.getInvalidWriteIds();
+     BitSet abortedBits = BitSet.valueOf(tableWriteIds.getAbortedBits());
+     long[] exceptions = new long[invalids.size()];
+     int i = 0;
+     for (long writeId : invalids) {
+       exceptions[i++] = writeId;
+     }
+     if (tableWriteIds.isSetMinOpenWriteId()) {
+       return new ValidReaderWriteIdList(fullTableName, exceptions, abortedBits, highWater,
+                                         tableWriteIds.getMinOpenWriteId());
+     } else {
+       return new ValidReaderWriteIdList(fullTableName, exceptions, abortedBits, highWater);
+     }
+   }
+ 
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.TableValidWriteIds} to a
+    * {@link org.apache.hadoop.hive.common.ValidCompactorWriteIdList}.  This assumes that the caller intends to
+    * compact the files, and thus treats only open transactions/write ids as invalid.  Additionally any
+    * writeId &gt; highestOpenWriteId is also invalid.  This is to avoid creating something like
+    * delta_17_120 where writeId 80, for example, is still open.
+    * @param tableValidWriteIds table write id list from the metastore
+    * @return a valid write id list.
+    */
+   public static ValidCompactorWriteIdList createValidCompactWriteIdList(TableValidWriteIds tableValidWriteIds) {
+     String fullTableName = tableValidWriteIds.getFullTableName();
+     long highWater = tableValidWriteIds.getWriteIdHighWaterMark();
+     long minOpenWriteId = Long.MAX_VALUE;
+     List<Long> invalids = tableValidWriteIds.getInvalidWriteIds();
+     BitSet abortedBits = BitSet.valueOf(tableValidWriteIds.getAbortedBits());
+     long[] exceptions = new long[invalids.size()];
+     int i = 0;
+     for (long writeId : invalids) {
+       if (abortedBits.get(i)) {
+         // Only need aborted since we don't consider anything above minOpenWriteId
+         exceptions[i++] = writeId;
+       } else {
+         minOpenWriteId = Math.min(minOpenWriteId, writeId);
+       }
+     }
+     if(i < exceptions.length) {
+       exceptions = Arrays.copyOf(exceptions, i);
+     }
+     highWater = minOpenWriteId == Long.MAX_VALUE ? highWater : minOpenWriteId - 1;
+     BitSet bitSet = new BitSet(exceptions.length);
+     bitSet.set(0, exceptions.length); // for ValidCompactorWriteIdList, everything in exceptions are aborted
+     if (minOpenWriteId == Long.MAX_VALUE) {
+       return new ValidCompactorWriteIdList(fullTableName, exceptions, bitSet, highWater);
+     } else {
+       return new ValidCompactorWriteIdList(fullTableName, exceptions, bitSet, highWater, minOpenWriteId);
+     }
+   }
+ 
+   public static ValidReaderWriteIdList updateForCompactionQuery(ValidReaderWriteIdList ids) {
+     // This is based on the existing valid write ID list that was built for a select query;
+     // therefore we assume all the aborted txns, etc. were already accounted for.
+     // All we do is adjust the high watermark to only include contiguous txns.
+     Long minOpenWriteId = ids.getMinOpenWriteId();
+     if (minOpenWriteId != null && minOpenWriteId != Long.MAX_VALUE) {
+       return ids.updateHighWatermark(ids.getMinOpenWriteId() - 1);
+     }
+     return ids;
+   }
+ 
+   /**
+    * Get an instance of the TxnStore that is appropriate for this store
+    * @param conf configuration
+    * @return txn store
+    */
+   public static TxnStore getTxnStore(Configuration conf) {
+     String className = MetastoreConf.getVar(conf, ConfVars.TXN_STORE_IMPL);
+     try {
+       TxnStore handler = JavaUtils.getClass(className, TxnStore.class).newInstance();
+       handler.setConf(conf);
+       return handler;
+     } catch (Exception e) {
+       LOG.error("Unable to instantiate raw store directly in fastpath mode", e);
+       throw new RuntimeException(e);
+     }
+   }
+ 
+   /**
+    * Note, users are responsible for using the correct TxnManager. We do not look at
+    * SessionState.get().getTxnMgr().supportsAcid() here
+    * Should produce the same result as
+    * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isTransactionalTable(org.apache.hadoop.hive.ql.metadata.Table)}.
+    * @return true if table is a transactional table, false otherwise
+    */
+   public static boolean isTransactionalTable(Table table) {
+     if (table == null) {
+       return false;
+     }
+     Map<String, String> parameters = table.getParameters();
+     String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
++    return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
++  }
++
++  public static boolean isTransactionalTable(Map<String, String> parameters) {
++    if (parameters == null) {
++      return false;
++    }
++    String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+     return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
+   }
+ 
+   /**
+    * Should produce the same result as
+    * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}.
+    */
+   public static boolean isAcidTable(Table table) {
+     return TxnUtils.isTransactionalTable(table) &&
+       TransactionalValidationListener.DEFAULT_TRANSACTIONAL_PROPERTY.equals(table.getParameters()
+       .get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES));
+   }
+ 
+   /**
+    * Should produce the result as <dbName>.<tableName>.
+    */
+   public static String getFullTableName(String dbName, String tableName) {
+     return dbName.toLowerCase() + "." + tableName.toLowerCase();
+   }
+ 
+   public static String[] getDbTableName(String fullTableName) {
+     return fullTableName.split("\\.");
+   }
+ 
+ 
+ 
+   /**
+    * Build a query (or queries if one query is too big but only for the case of 'IN'
+    * composite clause. For the case of 'NOT IN' clauses, multiple queries change
+    * the semantics of the intended query.
+    * E.g., Let's assume that input "inList" parameter has [5, 6] and that
+    * _DIRECT_SQL_MAX_QUERY_LENGTH_ configuration parameter only allows one value in a 'NOT IN' clause,
+    * Then having two delete statements changes the semantics of the inteneded SQL statement.
+    * I.e. 'delete from T where a not in (5)' and 'delete from T where a not in (6)' sequence
+    * is not equal to 'delete from T where a not in (5, 6)'.)
+    * with one or multiple 'IN' or 'NOT IN' clauses with the given input parameters.
+    *
+    * Note that this method currently support only single column for
+    * IN/NOT IN clauses and that only covers OR-based composite 'IN' clause and
+    * AND-based composite 'NOT IN' clause.
+    * For example, for 'IN' clause case, the method will build a query with OR.
+    * E.g., "id in (1,2,3) OR id in (4,5,6)".
+    * For 'NOT IN' case, NOT IN list is broken into multiple 'NOT IN" clauses connected by AND.
+    *
+    * Note that, in this method, "a composite 'IN' clause" is defined as "a list of multiple 'IN'
+    * clauses in a query".
+    *
+    * @param queries   OUT: Array of query strings
+    * @param prefix    IN:  Part of the query that comes before IN list
+    * @param suffix    IN:  Part of the query that comes after IN list
+    * @param inList    IN:  the list with IN list values
+    * @param inColumn  IN:  single column name of IN list operator
+    * @param addParens IN:  add a pair of parenthesis outside the IN lists
+    *                       e.g. "(id in (1,2,3) OR id in (4,5,6))"
+    * @param notIn     IN:  is this for building a 'NOT IN' composite clause?
+    * @return          OUT: a list of the count of IN list values that are in each of the corresponding queries
+    */
+   public static List<Integer> buildQueryWithINClause(Configuration conf,
+                                             List<String> queries,
+                                             StringBuilder prefix,
+                                             StringBuilder suffix,
+                                             List<Long> inList,
+                                             String inColumn,
+                                             boolean addParens,
+                                             boolean notIn) {
+     List<String> inListStrings = new ArrayList<>(inList.size());
+     for (Long aLong : inList) {
+       inListStrings.add(aLong.toString());
+     }
+     return buildQueryWithINClauseStrings(conf, queries, prefix, suffix,
+         inListStrings, inColumn, addParens, notIn);
+ 
+   }
+   /**
+    * Build a query (or queries if one query is too big but only for the case of 'IN'
+    * composite clause. For the case of 'NOT IN' clauses, multiple queries change
+    * the semantics of the intended query.
+    * E.g., Let's assume that input "inList" parameter has [5, 6] and that
+    * _DIRECT_SQL_MAX_QUERY_LENGTH_ configuration parameter only allows one value in a 'NOT IN' clause,
+    * Then having two delete statements changes the semantics of the inteneded SQL statement.
+    * I.e. 'delete from T where a not in (5)' and 'delete from T where a not in (6)' sequence
+    * is not equal to 'delete from T where a not in (5, 6)'.)
+    * with one or multiple 'IN' or 'NOT IN' clauses with the given input parameters.
+    *
+    * Note that this method currently support only single column for
+    * IN/NOT IN clauses and that only covers OR-based composite 'IN' clause and
+    * AND-based composite 'NOT IN' clause.
+    * For example, for 'IN' clause case, the method will build a query with OR.
+    * E.g., "id in (1,2,3) OR id in (4,5,6)".
+    * For 'NOT IN' case, NOT IN list is broken into multiple 'NOT IN" clauses connected by AND.
+    *
+    * Note that, in this method, "a composite 'IN' clause" is defined as "a list of multiple 'IN'
+    * clauses in a query".
+    *
+    * @param queries   OUT: Array of query strings
+    * @param prefix    IN:  Part of the query that comes before IN list
+    * @param suffix    IN:  Part of the query that comes after IN list
+    * @param inList    IN:  the list with IN list values
+    * @param inColumn  IN:  single column name of IN list operator
+    * @param addParens IN:  add a pair of parenthesis outside the IN lists
+    *                       e.g. "(id in (1,2,3) OR id in (4,5,6))"
+    * @param notIn     IN:  is this for building a 'NOT IN' composite clause?
+    * @return          OUT: a list of the count of IN list values that are in each of the corresponding queries
+    */
+   public static List<Integer> buildQueryWithINClauseStrings(Configuration conf, List<String> queries, StringBuilder prefix,
+       StringBuilder suffix, List<String> inList, String inColumn, boolean addParens, boolean notIn) {
+     // Get configuration parameters
+     int maxQueryLength = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH);
+     int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE);
+ 
+     // Check parameter set validity as a public method.
+     if (inList == null || inList.size() == 0 || maxQueryLength <= 0 || batchSize <= 0) {
+       throw new IllegalArgumentException("The IN list is empty!");
+     }
+ 
+     // Define constants and local variables.
+     int inListSize = inList.size();
+     StringBuilder buf = new StringBuilder();
+ 
+     int cursor4InListArray = 0,  // cursor for the "inList" array.
+         cursor4InClauseElements = 0,  // cursor for an element list per an 'IN'/'NOT IN'-clause.
+         cursor4queryOfInClauses = 0;  // cursor for in-clause lists per a query.
+     boolean nextItemNeeded = true;
+     boolean newInclausePrefixJustAppended = false;
+     StringBuilder nextValue = new StringBuilder("");
+     StringBuilder newInclausePrefix =
+       new StringBuilder(notIn ? " and " + inColumn + " not in (":
+ 	                        " or " + inColumn + " in (");
+     List<Integer> ret = new ArrayList<>();
+     int currentCount = 0;
+ 
+     // Loop over the given inList elements.
+     while( cursor4InListArray < inListSize || !nextItemNeeded) {
+       if (cursor4queryOfInClauses == 0) {
+         // Append prefix
+         buf.append(prefix);
+         if (addParens) {
+           buf.append("(");
+         }
+         buf.append(inColumn);
+ 
+         if (notIn) {
+           buf.append(" not in (");
+         } else {
+           buf.append(" in (");
+         }
+         cursor4queryOfInClauses++;
+         newInclausePrefixJustAppended = false;
+       }
+ 
+       // Get the next "inList" value element if needed.
+       if (nextItemNeeded) {
+         nextValue.setLength(0);
+         nextValue.append(String.valueOf(inList.get(cursor4InListArray++)));
+         nextItemNeeded = false;
+       }
+ 
+       // Compute the size of a query when the 'nextValue' is added to the current query.
+       int querySize = querySizeExpected(buf.length(), nextValue.length(), suffix.length(), addParens);
+ 
+       if (querySize > maxQueryLength * 1024) {
+         // Check an edge case where the DIRECT_SQL_MAX_QUERY_LENGTH does not allow one 'IN' clause with single value.
+         if (cursor4queryOfInClauses == 1 && cursor4InClauseElements == 0) {
+           throw new IllegalArgumentException("The current " + ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH.getVarname() + " is set too small to have one IN clause with single value!");
+         }
+ 
+         // Check en edge case to throw Exception if we can not build a single query for 'NOT IN' clause cases as mentioned at the method comments.
+         if (notIn) {
+           throw new IllegalArgumentException("The NOT IN list has too many elements for the current " + ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH.getVarname() + "!");
+         }
+ 
+         // Wrap up the current query string since we can not add another "inList" element value.
+         if (newInclausePrefixJustAppended) {
+           buf.delete(buf.length()-newInclausePrefix.length(), buf.length());
+         }
+ 
+         buf.setCharAt(buf.length() - 1, ')'); // replace the "commar" to finish a 'IN' clause string.
+ 
+         if (addParens) {
+           buf.append(")");
+         }
+ 
+         buf.append(suffix);
+         queries.add(buf.toString());
+         ret.add(currentCount);
+ 
+         // Prepare a new query string.
+         buf.setLength(0);
+         currentCount = 0;
+         cursor4queryOfInClauses = cursor4InClauseElements = 0;
+         querySize = 0;
+         newInclausePrefixJustAppended = false;
+         continue;
+       } else if (cursor4InClauseElements >= batchSize-1 && cursor4InClauseElements != 0) {
+         // Finish the current 'IN'/'NOT IN' clause and start a new clause.
+         buf.setCharAt(buf.length() - 1, ')'); // replace the "commar".
+         buf.append(newInclausePrefix.toString());
+ 
+         newInclausePrefixJustAppended = true;
+ 
+         // increment cursor for per-query IN-clause list
+         cursor4queryOfInClauses++;
+         cursor4InClauseElements = 0;
+       } else {
+         buf.append(nextValue.toString()).append(",");
+         currentCount++;
+         nextItemNeeded = true;
+         newInclausePrefixJustAppended = false;
+         // increment cursor for elements per 'IN'/'NOT IN' clause.
+         cursor4InClauseElements++;
+       }
+     }
+ 
+     // Finish the last query.
+     if (newInclausePrefixJustAppended) {
+         buf.delete(buf.length()-newInclausePrefix.length(), buf.length());
+       }
+     buf.setCharAt(buf.length() - 1, ')'); // replace the commar.
+     if (addParens) {
+       buf.append(")");
+     }
+     buf.append(suffix);
+     queries.add(buf.toString());
+     ret.add(currentCount);
+     return ret;
+   }
+ 
+   /**
+    * Compute and return the size of a query statement with the given parameters as input variables.
+    *
+    * @param sizeSoFar     size of the current contents of the buf
+    * @param sizeNextItem      size of the next 'IN' clause element value.
+    * @param suffixSize    size of the suffix for a quey statement
+    * @param addParens     Do we add an additional parenthesis?
+    */
+   private static int querySizeExpected(int sizeSoFar,
+                                        int sizeNextItem,
+                                        int suffixSize,
+                                        boolean addParens) {
+ 
+     int size = sizeSoFar + sizeNextItem + suffixSize;
+ 
+     if (addParens) {
+        size++;
+     }
+ 
+     return size;
+   }
+ }


[24/54] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - more tests (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionResponse.java
new file mode 100644
index 0000000..7c756ab
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionResponse.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class RenamePartitionResponse implements org.apache.thrift.TBase<RenamePartitionResponse, RenamePartitionResponse._Fields>, java.io.Serializable, Cloneable, Comparable<RenamePartitionResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RenamePartitionResponse");
+
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new RenamePartitionResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new RenamePartitionResponseTupleSchemeFactory());
+  }
+
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(RenamePartitionResponse.class, metaDataMap);
+  }
+
+  public RenamePartitionResponse() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public RenamePartitionResponse(RenamePartitionResponse other) {
+  }
+
+  public RenamePartitionResponse deepCopy() {
+    return new RenamePartitionResponse(this);
+  }
+
+  @Override
+  public void clear() {
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof RenamePartitionResponse)
+      return this.equals((RenamePartitionResponse)that);
+    return false;
+  }
+
+  public boolean equals(RenamePartitionResponse that) {
+    if (that == null)
+      return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(RenamePartitionResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("RenamePartitionResponse(");
+    boolean first = true;
+
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class RenamePartitionResponseStandardSchemeFactory implements SchemeFactory {
+    public RenamePartitionResponseStandardScheme getScheme() {
+      return new RenamePartitionResponseStandardScheme();
+    }
+  }
+
+  private static class RenamePartitionResponseStandardScheme extends StandardScheme<RenamePartitionResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, RenamePartitionResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, RenamePartitionResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class RenamePartitionResponseTupleSchemeFactory implements SchemeFactory {
+    public RenamePartitionResponseTupleScheme getScheme() {
+      return new RenamePartitionResponseTupleScheme();
+    }
+  }
+
+  private static class RenamePartitionResponseTupleScheme extends TupleScheme<RenamePartitionResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, RenamePartitionResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, RenamePartitionResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+    }
+  }
+
+}
+


[41/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 0000000,9dd3787..7fd0642
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@@ -1,0 -1,5051 +1,5094 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import java.io.PrintWriter;
+ import java.nio.ByteBuffer;
+ import java.sql.Connection;
+ import java.sql.Driver;
+ import java.sql.ResultSet;
+ import java.sql.SQLException;
+ import java.sql.SQLFeatureNotSupportedException;
+ import java.sql.Savepoint;
+ import java.sql.Statement;
+ import java.time.Instant;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.BitSet;
+ import java.util.Collections;
+ import java.util.Comparator;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Properties;
+ import java.util.Set;
+ import java.util.SortedSet;
+ import java.util.TreeSet;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.Semaphore;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicInteger;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.regex.Pattern;
+ 
+ import javax.sql.DataSource;
+ 
+ import org.apache.commons.lang.ArrayUtils;
+ import org.apache.commons.lang.NotImplementedException;
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ValidReadTxnList;
+ import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.DatabaseProduct;
+ import org.apache.hadoop.hive.metastore.Warehouse;
+ import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
+ import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory;
+ import org.apache.hadoop.hive.metastore.events.AbortTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.AllocWriteIdEvent;
+ import org.apache.hadoop.hive.metastore.events.CommitTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.OpenTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.AcidWriteEvent;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+ import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.StringableMap;
+ import org.apache.hadoop.util.StringUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
++
+ import com.google.common.annotations.VisibleForTesting;
+ 
+ /**
+  * A handler to answer transaction related calls that come into the metastore
+  * server.
+  *
+  * Note on log messages:  Please include txnid:X and lockid info using
+  * {@link JavaUtils#txnIdToString(long)}
+  * and {@link JavaUtils#lockIdToString(long)} in all messages.
+  * The txnid:X and lockid:Y matches how Thrift object toString() methods are generated,
+  * so keeping the format consistent makes grep'ing the logs much easier.
+  *
+  * Note on HIVE_LOCKS.hl_last_heartbeat.
+  * For locks that are part of transaction, we set this 0 (would rather set it to NULL but
+  * Currently the DB schema has this NOT NULL) and only update/read heartbeat from corresponding
+  * transaction in TXNS.
+  *
+  * In general there can be multiple metastores where this logic can execute, thus the DB is
+  * used to ensure proper mutexing of operations.
+  * Select ... For Update (or equivalent: either MsSql with(updlock) or actual Update stmt) is
+  * used to properly sequence operations.  Most notably:
+  * 1. various sequence IDs are generated with aid of this mutex
+  * 2. ensuring that each (Hive) Transaction state is transitioned atomically.  Transaction state
+  *  includes its actual state (Open, Aborted) as well as it's lock list/component list.  Thus all
+  *  per transaction ops, either start by update/delete of the relevant TXNS row or do S4U on that row.
+  *  This allows almost all operations to run at READ_COMMITTED and minimizes DB deadlocks.
+  * 3. checkLock() - this is mutexted entirely since we must ensure that while we check if some lock
+  *  can be granted, no other (strictly speaking "earlier") lock can change state.
+  *
+  * The exception to his is Derby which doesn't support proper S4U.  Derby is always running embedded
+  * (this is the only supported configuration for Derby)
+  * in the same JVM as HiveMetaStoreHandler thus we use JVM wide lock to properly sequnce the operations.
+  *
+  * {@link #derbyLock}
+ 
+  * If we ever decide to run remote Derby server, according to
+  * https://db.apache.org/derby/docs/10.0/manuals/develop/develop78.html all transactions will be
+  * seriazlied, so that would also work though has not been tested.
+  *
+  * General design note:
+  * It's imperative that any operation on a txn (e.g. commit), ensure (atomically) that this txn is
+  * still valid and active.  In the code this is usually achieved at the same time the txn record
+  * is locked for some operation.
+  * 
+  * Note on retry logic:
+  * Metastore has retry logic in both {@link org.apache.hadoop.hive.metastore.RetryingMetaStoreClient}
+  * and {@link org.apache.hadoop.hive.metastore.RetryingHMSHandler}.  The retry logic there is very
+  * generic and is not aware whether the operations are idempotent or not.  (This is separate from
+  * retry logic here in TxnHander which can/does retry DB errors intelligently).  The worst case is
+  * when an op here issues a successful commit against the RDBMS but the calling stack doesn't
+  * receive the ack and retries.  (If an op fails before commit, it's trivially idempotent)
+  * Thus the ops here need to be made idempotent as much as possible or
+  * the metstore call stack should have logic not to retry.  There are {@link RetrySemantics}
+  * annotations to document the behavior.
+  */
+ @InterfaceAudience.Private
+ @InterfaceStability.Evolving
+ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
+ 
+   static final protected char INITIATED_STATE = 'i';
+   static final protected char WORKING_STATE = 'w';
+   static final protected char READY_FOR_CLEANING = 'r';
+   static final char FAILED_STATE = 'f';
+   static final char SUCCEEDED_STATE = 's';
+   static final char ATTEMPTED_STATE = 'a';
+ 
+   // Compactor types
+   static final protected char MAJOR_TYPE = 'a';
+   static final protected char MINOR_TYPE = 'i';
+ 
+   // Transaction states
+   static final protected char TXN_ABORTED = 'a';
+   static final protected char TXN_OPEN = 'o';
+   //todo: make these like OperationType and remove above char constatns
+   enum TxnStatus {OPEN, ABORTED, COMMITTED, UNKNOWN}
+ 
+   public enum TxnType {
+     DEFAULT(0), REPL_CREATED(1), READ_ONLY(2);
+ 
+     private final int value;
+     TxnType(int value) {
+       this.value = value;
+     }
+ 
+     public int getValue() {
+       return value;
+     }
+   }
+ 
+   // Lock states
+   static final protected char LOCK_ACQUIRED = 'a';
+   static final protected char LOCK_WAITING = 'w';
+ 
+   // Lock types
+   static final protected char LOCK_EXCLUSIVE = 'e';
+   static final protected char LOCK_SHARED = 'r';
+   static final protected char LOCK_SEMI_SHARED = 'w';
+ 
+   static final private int ALLOWED_REPEATED_DEADLOCKS = 10;
+   static final private Logger LOG = LoggerFactory.getLogger(TxnHandler.class.getName());
+ 
+   static private DataSource connPool;
+   private static DataSource connPoolMutex;
+   static private boolean doRetryOnConnPool = false;
+ 
+   private List<TransactionalMetaStoreEventListener> transactionalListeners;
+   
+   private enum OpertaionType {
+     SELECT('s'), INSERT('i'), UPDATE('u'), DELETE('d');
+     private final char sqlConst;
+     OpertaionType(char sqlConst) {
+       this.sqlConst = sqlConst;
+     }
+     public String toString() {
+       return Character.toString(sqlConst);
+     }
+     public static OpertaionType fromString(char sqlConst) {
+       switch (sqlConst) {
+         case 's':
+           return SELECT;
+         case 'i':
+           return INSERT;
+         case 'u':
+           return UPDATE;
+         case 'd':
+           return DELETE;
+         default:
+           throw new IllegalArgumentException(quoteChar(sqlConst));
+       }
+     }
+     public static OpertaionType fromDataOperationType(DataOperationType dop) {
+       switch (dop) {
+         case SELECT:
+           return OpertaionType.SELECT;
+         case INSERT:
+           return OpertaionType.INSERT;
+         case UPDATE:
+           return OpertaionType.UPDATE;
+         case DELETE:
+           return OpertaionType.DELETE;
+         default:
+           throw new IllegalArgumentException("Unexpected value: " + dop);
+       }
+     }
+   }
+ 
+   // Maximum number of open transactions that's allowed
+   private static volatile int maxOpenTxns = 0;
+   // Whether number of open transactions reaches the threshold
+   private static volatile boolean tooManyOpenTxns = false;
+ 
+   /**
+    * Number of consecutive deadlocks we have seen
+    */
+   private int deadlockCnt;
+   private long deadlockRetryInterval;
+   protected Configuration conf;
+   private static DatabaseProduct dbProduct;
+   private static SQLGenerator sqlGenerator;
+ 
+   // (End user) Transaction timeout, in milliseconds.
+   private long timeout;
+ 
+   private String identifierQuoteString; // quotes to use for quoting tables, where necessary
+   private long retryInterval;
+   private int retryLimit;
+   private int retryNum;
+   // Current number of open txns
+   private AtomicInteger numOpenTxns;
+ 
+   /**
+    * Derby specific concurrency control
+    */
+   private static final ReentrantLock derbyLock = new ReentrantLock(true);
+   /**
+    * must be static since even in UT there may be > 1 instance of TxnHandler
+    * (e.g. via Compactor services)
+    */
+   private final static ConcurrentHashMap<String, Semaphore> derbyKey2Lock = new ConcurrentHashMap<>();
+   private static final String hostname = JavaUtils.hostname();
+ 
+   // Private methods should never catch SQLException and then throw MetaException.  The public
+   // methods depend on SQLException coming back so they can detect and handle deadlocks.  Private
+   // methods should only throw MetaException when they explicitly know there's a logic error and
+   // they want to throw past the public methods.
+   //
+   // All public methods that write to the database have to check for deadlocks when a SQLException
+   // comes back and handle it if they see one.  This has to be done with the connection pooling
+   // in mind.  To do this they should call checkRetryable() AFTER rolling back the db transaction,
+   // and then they should catch RetryException and call themselves recursively. See commitTxn for an example.
+ 
+   public TxnHandler() {
+   }
+ 
+   /**
+    * This is logically part of c'tor and must be called prior to any other method.
+    * Not physically part of c'tor due to use of reflection
+    */
+   public void setConf(Configuration conf) {
+     this.conf = conf;
+ 
+     checkQFileTestHack();
+ 
+     synchronized (TxnHandler.class) {
+       if (connPool == null) {
+         Connection dbConn = null;
+         // Set up the JDBC connection pool
+         try {
+           int maxPoolSize = MetastoreConf.getIntVar(conf, ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS);
+           long getConnectionTimeoutMs = 30000;
+           connPool = setupJdbcConnectionPool(conf, maxPoolSize, getConnectionTimeoutMs);
+           /*the mutex pools should ideally be somewhat larger since some operations require 1
+            connection from each pool and we want to avoid taking a connection from primary pool
+            and then blocking because mutex pool is empty.  There is only 1 thread in any HMS trying
+            to mutex on each MUTEX_KEY except MUTEX_KEY.CheckLock.  The CheckLock operation gets a
+            connection from connPool first, then connPoolMutex.  All others, go in the opposite
+            order (not very elegant...).  So number of connection requests for connPoolMutex cannot
+            exceed (size of connPool + MUTEX_KEY.values().length - 1).*/
+           connPoolMutex = setupJdbcConnectionPool(conf, maxPoolSize + MUTEX_KEY.values().length, getConnectionTimeoutMs);
+           dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+           determineDatabaseProduct(dbConn);
+           sqlGenerator = new SQLGenerator(dbProduct, conf);
+         } catch (SQLException e) {
+           String msg = "Unable to instantiate JDBC connection pooling, " + e.getMessage();
+           LOG.error(msg);
+           throw new RuntimeException(e);
+         } finally {
+           closeDbConn(dbConn);
+         }
+       }
+     }
+ 
+     numOpenTxns = Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_TXNS);
+ 
+     timeout = MetastoreConf.getTimeVar(conf, ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS);
+     buildJumpTable();
+     retryInterval = MetastoreConf.getTimeVar(conf, ConfVars.HMS_HANDLER_INTERVAL,
+         TimeUnit.MILLISECONDS);
+     retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMS_HANDLER_ATTEMPTS);
+     deadlockRetryInterval = retryInterval / 10;
+     maxOpenTxns = MetastoreConf.getIntVar(conf, ConfVars.MAX_OPEN_TXNS);
+ 
+     try {
+       transactionalListeners = MetaStoreUtils.getMetaStoreListeners(
+               TransactionalMetaStoreEventListener.class,
+                       conf, MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS));
+     } catch(MetaException e) {
+       String msg = "Unable to get transaction listeners, " + e.getMessage();
+       LOG.error(msg);
+       throw new RuntimeException(e);
+     }
+   }
+ 
+   @Override
+   public Configuration getConf() {
+     return conf;
+   }
+ 
+   @Override
+   @RetrySemantics.ReadOnly
+   public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException {
+     try {
+       // We need to figure out the current transaction number and the list of
+       // open transactions.  To avoid needing a transaction on the underlying
+       // database we'll look at the current transaction number first.  If it
+       // subsequently shows up in the open list that's ok.
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         /**
+          * This method can run at READ_COMMITTED as long as long as
+          * {@link #openTxns(org.apache.hadoop.hive.metastore.api.OpenTxnRequest)} is atomic.
+          * More specifically, as long as advancing TransactionID in NEXT_TXN_ID is atomic with
+          * adding corresponding entries into TXNS.  The reason is that any txnid below HWM
+          * is either in TXNS and thus considered open (Open/Aborted) or it's considered Committed.
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select ntxn_next - 1 from NEXT_TXN_ID";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, no record found in next_txn_id");
+         }
+         long hwm = rs.getLong(1);
+         if (rs.wasNull()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, null record found in next_txn_id");
+         }
+         close(rs);
+         List<TxnInfo> txnInfos = new ArrayList<>();
+         //need the WHERE clause below to ensure consistent results with READ_COMMITTED
+         s = "select txn_id, txn_state, txn_user, txn_host, txn_started, txn_last_heartbeat from " +
+             "TXNS where txn_id <= " + hwm;
+         LOG.debug("Going to execute query<" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           char c = rs.getString(2).charAt(0);
+           TxnState state;
+           switch (c) {
+             case TXN_ABORTED:
+               state = TxnState.ABORTED;
+               break;
+ 
+             case TXN_OPEN:
+               state = TxnState.OPEN;
+               break;
+ 
+             default:
+               throw new MetaException("Unexpected transaction state " + c +
+                 " found in txns table");
+           }
+           TxnInfo txnInfo = new TxnInfo(rs.getLong(1), state, rs.getString(3), rs.getString(4));
+           txnInfo.setStartedTime(rs.getLong(5));
+           txnInfo.setLastHeartbeatTime(rs.getLong(6));
+           txnInfos.add(txnInfo);
+         }
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         return new GetOpenTxnsInfoResponse(hwm, txnInfos);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getOpenTxnsInfo");
+         throw new MetaException("Unable to select from transaction database: " + getMessage(e)
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return getOpenTxnsInfo();
+     }
+   }
++
+   @Override
+   @RetrySemantics.ReadOnly
+   public GetOpenTxnsResponse getOpenTxns() throws MetaException {
+     try {
+       // We need to figure out the current transaction number and the list of
+       // open transactions.  To avoid needing a transaction on the underlying
+       // database we'll look at the current transaction number first.  If it
+       // subsequently shows up in the open list that's ok.
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         /**
+          * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()}
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select ntxn_next - 1 from NEXT_TXN_ID";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, no record found in next_txn_id");
+         }
+         long hwm = rs.getLong(1);
+         if (rs.wasNull()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, null record found in next_txn_id");
+         }
+         close(rs);
+         List<Long> openList = new ArrayList<>();
+         //need the WHERE clause below to ensure consistent results with READ_COMMITTED
+         s = "select txn_id, txn_state from TXNS where txn_id <= " + hwm + " order by txn_id";
+         LOG.debug("Going to execute query<" + s + ">");
+         rs = stmt.executeQuery(s);
+         long minOpenTxn = Long.MAX_VALUE;
+         BitSet abortedBits = new BitSet();
+         while (rs.next()) {
+           long txnId = rs.getLong(1);
+           openList.add(txnId);
+           char c = rs.getString(2).charAt(0);
+           if(c == TXN_OPEN) {
+             minOpenTxn = Math.min(minOpenTxn, txnId);
+           } else if (c == TXN_ABORTED) {
+             abortedBits.set(openList.size() - 1);
+           }
+         }
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray());
+         GetOpenTxnsResponse otr = new GetOpenTxnsResponse(hwm, openList, byteBuffer);
+         if(minOpenTxn < Long.MAX_VALUE) {
+           otr.setMin_open_txn(minOpenTxn);
+         }
+         return otr;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getOpenTxns");
+         throw new MetaException("Unable to select from transaction database, "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return getOpenTxns();
+     }
+   }
+ 
+   /**
+    * Retry-by-caller note:
+    * Worst case, it will leave an open txn which will timeout.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException {
+     if (!tooManyOpenTxns && numOpenTxns.get() >= maxOpenTxns) {
+       tooManyOpenTxns = true;
+     }
+     if (tooManyOpenTxns) {
+       if (numOpenTxns.get() < maxOpenTxns * 0.9) {
+         tooManyOpenTxns = false;
+       } else {
+         LOG.warn("Maximum allowed number of open transactions (" + maxOpenTxns + ") has been " +
+             "reached. Current number of open transactions: " + numOpenTxns);
+         throw new MetaException("Maximum allowed number of open transactions has been reached. " +
+             "See hive.max.open.txns.");
+       }
+     }
+ 
+     int numTxns = rqst.getNum_txns();
+     if (numTxns <= 0) {
+       throw new MetaException("Invalid input for number of txns: " + numTxns);
+     }
+ 
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         lockInternal();
+         /**
+          * To make {@link #getOpenTxns()}/{@link #getOpenTxnsInfo()} work correctly, this operation must ensure
+          * that advancing the counter in NEXT_TXN_ID and adding appropriate entries to TXNS is atomic.
+          * Also, advancing the counter must work when multiple metastores are running.
+          * SELECT ... FOR UPDATE is used to prevent
+          * concurrent DB transactions being rolled back due to Write-Write conflict on NEXT_TXN_ID.
+          *
+          * In the current design, there can be several metastore instances running in a given Warehouse.
+          * This makes ideas like reserving a range of IDs to save trips to DB impossible.  For example,
+          * a client may go to MS1 and start a transaction with ID 500 to update a particular row.
+          * Now the same client will start another transaction, except it ends up on MS2 and may get
+          * transaction ID 400 and update the same row.  Now the merge that happens to materialize the snapshot
+          * on read will thing the version of the row from transaction ID 500 is the latest one.
+          *
+          * Longer term we can consider running Active-Passive MS (at least wrt to ACID operations).  This
+          * set could support a write-through cache for added performance.
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         // Make sure the user has not requested an insane amount of txns.
+         int maxTxns = MetastoreConf.getIntVar(conf, ConfVars.TXN_MAX_OPEN_BATCH);
+         if (numTxns > maxTxns) numTxns = maxTxns;
+ 
+         stmt = dbConn.createStatement();
+         List<Long> txnIds = openTxns(dbConn, stmt, rqst);
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return new OpenTxnsResponse(txnIds);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "openTxns(" + rqst + ")");
+         throw new MetaException("Unable to select from transaction database "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return openTxns(rqst);
+     }
+   }
+ 
+   private List<Long> openTxns(Connection dbConn, Statement stmt, OpenTxnRequest rqst)
+           throws SQLException, MetaException {
+     int numTxns = rqst.getNum_txns();
+     ResultSet rs = null;
+     TxnType txnType = TxnType.DEFAULT;
+     try {
+       if (rqst.isSetReplPolicy()) {
+         List<Long> targetTxnIdList = getTargetTxnIdList(rqst.getReplPolicy(), rqst.getReplSrcTxnIds(), stmt);
+ 
+         if (!targetTxnIdList.isEmpty()) {
+           if (targetTxnIdList.size() != rqst.getReplSrcTxnIds().size()) {
+             LOG.warn("target txn id number " + targetTxnIdList.toString() +
+                     " is not matching with source txn id number " + rqst.getReplSrcTxnIds().toString());
+           }
+           LOG.info("Target transactions " + targetTxnIdList.toString() + " are present for repl policy :" +
+                   rqst.getReplPolicy() + " and Source transaction id : " + rqst.getReplSrcTxnIds().toString());
+           return targetTxnIdList;
+         }
+         txnType = TxnType.REPL_CREATED;
+       }
+ 
+       String s = sqlGenerator.addForUpdateClause("select ntxn_next from NEXT_TXN_ID");
+       LOG.debug("Going to execute query <" + s + ">");
+       rs = stmt.executeQuery(s);
+       if (!rs.next()) {
+         throw new MetaException("Transaction database not properly " +
+                 "configured, can't find next transaction id.");
+       }
+       long first = rs.getLong(1);
+       s = "update NEXT_TXN_ID set ntxn_next = " + (first + numTxns);
+       LOG.debug("Going to execute update <" + s + ">");
+       stmt.executeUpdate(s);
+ 
+       long now = getDbTime(dbConn);
+       List<Long> txnIds = new ArrayList<>(numTxns);
+ 
+       List<String> rows = new ArrayList<>();
+       for (long i = first; i < first + numTxns; i++) {
+         txnIds.add(i);
+         rows.add(i + "," + quoteChar(TXN_OPEN) + "," + now + "," + now + ","
+                 + quoteString(rqst.getUser()) + "," + quoteString(rqst.getHostname()) + "," + txnType.getValue());
+       }
+       List<String> queries = sqlGenerator.createInsertValuesStmt(
+             "TXNS (txn_id, txn_state, txn_started, txn_last_heartbeat, txn_user, txn_host, txn_type)", rows);
+       for (String q : queries) {
+         LOG.debug("Going to execute update <" + q + ">");
+         stmt.execute(q);
+       }
+ 
+       // Need to register minimum open txnid for current transactions into MIN_HISTORY table.
+       s = "select min(txn_id) from TXNS where txn_state = " + quoteChar(TXN_OPEN);
+       LOG.debug("Going to execute query <" + s + ">");
+       rs = stmt.executeQuery(s);
+       if (!rs.next()) {
+         throw new IllegalStateException("Scalar query returned no rows?!?!!");
+       }
+ 
+       // TXNS table should have atleast one entry because we just inserted the newly opened txns.
+       // So, min(txn_id) would be a non-zero txnid.
+       long minOpenTxnId = rs.getLong(1);
+       assert (minOpenTxnId > 0);
+       rows.clear();
+       for (long txnId = first; txnId < first + numTxns; txnId++) {
+         rows.add(txnId + ", " + minOpenTxnId);
+       }
+ 
+       // Insert transaction entries into MIN_HISTORY_LEVEL.
+       List<String> inserts = sqlGenerator.createInsertValuesStmt(
+               "MIN_HISTORY_LEVEL (mhl_txnid, mhl_min_open_txnid)", rows);
+       for (String insert : inserts) {
+         LOG.debug("Going to execute insert <" + insert + ">");
+         stmt.execute(insert);
+       }
+       LOG.info("Added entries to MIN_HISTORY_LEVEL for current txns: (" + txnIds
+               + ") with min_open_txn: " + minOpenTxnId);
+ 
+       if (rqst.isSetReplPolicy()) {
+         List<String> rowsRepl = new ArrayList<>();
+ 
+         for (int i = 0; i < numTxns; i++) {
+           rowsRepl.add(
+                   quoteString(rqst.getReplPolicy()) + "," + rqst.getReplSrcTxnIds().get(i) + "," + txnIds.get(i));
+         }
+ 
+         List<String> queriesRepl = sqlGenerator.createInsertValuesStmt(
+                 "REPL_TXN_MAP (RTM_REPL_POLICY, RTM_SRC_TXN_ID, RTM_TARGET_TXN_ID)", rowsRepl);
+ 
+         for (String query : queriesRepl) {
+           LOG.info("Going to execute insert <" + query + ">");
+           stmt.execute(query);
+         }
+       }
+ 
+       if (transactionalListeners != null) {
+         MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                 EventMessage.EventType.OPEN_TXN, new OpenTxnEvent(txnIds, null), dbConn, sqlGenerator);
+       }
+       return txnIds;
+     } finally {
+       close(rs);
+     }
+   }
+ 
+   private List<Long> getTargetTxnIdList(String replPolicy, List<Long> sourceTxnIdList, Statement stmt)
+           throws SQLException {
+     ResultSet rs = null;
+     try {
+       List<String> inQueries = new ArrayList<>();
+       StringBuilder prefix = new StringBuilder();
+       StringBuilder suffix = new StringBuilder();
+       List<Long> targetTxnIdList = new ArrayList<>();
+       prefix.append("select RTM_TARGET_TXN_ID from REPL_TXN_MAP where ");
+       suffix.append(" and RTM_REPL_POLICY = " + quoteString(replPolicy));
+       TxnUtils.buildQueryWithINClause(conf, inQueries, prefix, suffix, sourceTxnIdList,
+               "RTM_SRC_TXN_ID", false, false);
+       for (String query : inQueries) {
+         LOG.debug("Going to execute select <" + query + ">");
+         rs = stmt.executeQuery(query);
+         while (rs.next()) {
+           targetTxnIdList.add(rs.getLong(1));
+         }
+       }
+       LOG.debug("targetTxnid for srcTxnId " + sourceTxnIdList.toString() + " is " + targetTxnIdList.toString());
+       return targetTxnIdList;
+     }  catch (SQLException e) {
+       LOG.warn("failed to get target txn ids " + e.getMessage());
+       throw e;
+     } finally {
+       close(rs);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public long getTargetTxnId(String replPolicy, long sourceTxnId) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         List<Long> targetTxnIds = getTargetTxnIdList(replPolicy, Collections.singletonList(sourceTxnId), stmt);
+         if (targetTxnIds.isEmpty()) {
+           LOG.info("Txn {} not present for repl policy {}", sourceTxnId, replPolicy);
+           return -1;
+         }
+         assert (targetTxnIds.size() == 1);
+         return targetTxnIds.get(0);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getTargetTxnId(" + replPolicy + sourceTxnId + ")");
+         throw new MetaException("Unable to get target transaction id "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return getTargetTxnId(replPolicy, sourceTxnId);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException {
+     long txnid = rqst.getTxnid();
+     long sourceTxnId = -1;
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         if (rqst.isSetReplPolicy()) {
+           sourceTxnId = rqst.getTxnid();
+           List<Long> targetTxnIds = getTargetTxnIdList(rqst.getReplPolicy(),
+                   Collections.singletonList(sourceTxnId), stmt);
+           if (targetTxnIds.isEmpty()) {
+             LOG.info("Target txn id is missing for source txn id : " + sourceTxnId +
+                     " and repl policy " + rqst.getReplPolicy());
+             return;
+           }
+           assert targetTxnIds.size() == 1;
+           txnid = targetTxnIds.get(0);
+         }
+ 
+         if (abortTxns(dbConn, Collections.singletonList(txnid), true) != 1) {
+           TxnStatus status = findTxnState(txnid,stmt);
+           if(status == TxnStatus.ABORTED) {
+             if (rqst.isSetReplPolicy()) {
+               // in case of replication, idempotent is taken care by getTargetTxnId
+               LOG.warn("Invalid state ABORTED for transactions started using replication replay task");
+               String s = "delete from REPL_TXN_MAP where RTM_SRC_TXN_ID = " + sourceTxnId +
+                       " and RTM_REPL_POLICY = " + quoteString(rqst.getReplPolicy());
+               LOG.info("Going to execute  <" + s + ">");
+               stmt.executeUpdate(s);
+             }
+             LOG.info("abortTxn(" + JavaUtils.txnIdToString(txnid) +
+               ") requested by it is already " + TxnStatus.ABORTED);
+             return;
+           }
+           raiseTxnUnexpectedState(status, txnid);
+         }
+ 
+         if (rqst.isSetReplPolicy()) {
+           String s = "delete from REPL_TXN_MAP where RTM_SRC_TXN_ID = " + sourceTxnId +
+               " and RTM_REPL_POLICY = " + quoteString(rqst.getReplPolicy());
+           LOG.info("Going to execute  <" + s + ">");
+           stmt.executeUpdate(s);
+         }
+ 
+         if (transactionalListeners != null) {
+           MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                   EventMessage.EventType.ABORT_TXN, new AbortTxnEvent(txnid, null), dbConn, sqlGenerator);
+         }
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "abortTxn(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       abortTxn(rqst);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException {
+     List<Long> txnids = rqst.getTxn_ids();
+     try {
+       Connection dbConn = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         int numAborted = abortTxns(dbConn, txnids, false);
+         if (numAborted != txnids.size()) {
+           LOG.warn("Abort Transactions command only aborted " + numAborted + " out of " +
+               txnids.size() + " transactions. It's possible that the other " +
+               (txnids.size() - numAborted) +
+               " transactions have been aborted or committed, or the transaction ids are invalid.");
+         }
+ 
+         for (Long txnId : txnids) {
+           if (transactionalListeners != null) {
+             MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                     EventMessage.EventType.ABORT_TXN, new AbortTxnEvent(txnId, null), dbConn, sqlGenerator);
+           }
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "abortTxns(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+             + StringUtils.stringifyException(e));
+       } finally {
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       abortTxns(rqst);
+     }
+   }
+ 
+   /**
+    * Concurrency/isolation notes:
+    * This is mutexed with {@link #openTxns(OpenTxnRequest)} and other {@link #commitTxn(CommitTxnRequest)}
+    * operations using select4update on NEXT_TXN_ID.  Also, mutexes on TXNX table for specific txnid:X
+    * see more notes below.
+    * In order to prevent lost updates, we need to determine if any 2 transactions overlap.  Each txn
+    * is viewed as an interval [M,N]. M is the txnid and N is taken from the same NEXT_TXN_ID sequence
+    * so that we can compare commit time of txn T with start time of txn S.  This sequence can be thought of
+    * as a logical time counter.  If S.commitTime < T.startTime, T and S do NOT overlap.
+    *
+    * Motivating example:
+    * Suppose we have multi-statment transactions T and S both of which are attempting x = x + 1
+    * In order to prevent lost update problem, the the non-overlapping txns must lock in the snapshot
+    * that they read appropriately.  In particular, if txns do not overlap, then one follows the other
+    * (assumig they write the same entity), and thus the 2nd must see changes of the 1st.  We ensure
+    * this by locking in snapshot after 
+    * {@link #openTxns(OpenTxnRequest)} call is made (see org.apache.hadoop.hive.ql.Driver.acquireLocksAndOpenTxn)
+    * and mutexing openTxn() with commit().  In other words, once a S.commit() starts we must ensure
+    * that txn T which will be considered a later txn, locks in a snapshot that includes the result
+    * of S's commit (assuming no other txns).
+    * As a counter example, suppose we have S[3,3] and T[4,4] (commitId=txnid means no other transactions
+    * were running in parallel).  If T and S both locked in the same snapshot (for example commit of
+    * txnid:2, which is possible if commitTxn() and openTxnx() is not mutexed)
+    * 'x' would be updated to the same value by both, i.e. lost update. 
+    */
+   @Override
+   @RetrySemantics.Idempotent("No-op if already committed")
+   public void commitTxn(CommitTxnRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException, MetaException {
+     char isUpdateDelete = 'N';
+     long txnid = rqst.getTxnid();
+     long sourceTxnId = -1;
+ 
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet lockHandle = null;
+       ResultSet commitIdRs = null, rs;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         if (rqst.isSetReplPolicy()) {
+           sourceTxnId = rqst.getTxnid();
+           List<Long> targetTxnIds = getTargetTxnIdList(rqst.getReplPolicy(),
+                   Collections.singletonList(sourceTxnId), stmt);
+           if (targetTxnIds.isEmpty()) {
+             LOG.info("Target txn id is missing for source txn id : " + sourceTxnId +
+                     " and repl policy " + rqst.getReplPolicy());
+             return;
+           }
+           assert targetTxnIds.size() == 1;
+           txnid = targetTxnIds.get(0);
+         }
+ 
+         /**
+          * Runs at READ_COMMITTED with S4U on TXNS row for "txnid".  S4U ensures that no other
+          * operation can change this txn (such acquiring locks). While lock() and commitTxn()
+          * should not normally run concurrently (for same txn) but could due to bugs in the client
+          * which could then corrupt internal transaction manager state.  Also competes with abortTxn().
+          */
+         lockHandle = lockTransactionRecord(stmt, txnid, TXN_OPEN);
+         if (lockHandle == null) {
+           //if here, txn was not found (in expected state)
+           TxnStatus actualTxnStatus = findTxnState(txnid, stmt);
+           if(actualTxnStatus == TxnStatus.COMMITTED) {
+             if (rqst.isSetReplPolicy()) {
+               // in case of replication, idempotent is taken care by getTargetTxnId
+               LOG.warn("Invalid state COMMITTED for transactions started using replication replay task");
+             }
+             /**
+              * This makes the operation idempotent
+              * (assume that this is most likely due to retry logic)
+              */
+             LOG.info("Nth commitTxn(" + JavaUtils.txnIdToString(txnid) + ") msg");
+             return;
+           }
+           raiseTxnUnexpectedState(actualTxnStatus, txnid);
+           shouldNeverHappen(txnid);
+           //dbConn is rolled back in finally{}
+         }
+ 
+         String conflictSQLSuffix = null;
+         if (rqst.isSetReplPolicy()) {
+           rs = null;
+         } else {
+           conflictSQLSuffix = "from TXN_COMPONENTS where tc_txnid=" + txnid + " and tc_operation_type IN(" +
+                   quoteChar(OpertaionType.UPDATE.sqlConst) + "," + quoteChar(OpertaionType.DELETE.sqlConst) + ")";
+           rs = stmt.executeQuery(sqlGenerator.addLimitClause(1,
+                   "tc_operation_type " + conflictSQLSuffix));
+         }
+         if (rs != null && rs.next()) {
+           isUpdateDelete = 'Y';
+           close(rs);
+           //if here it means currently committing txn performed update/delete and we should check WW conflict
+           /**
+            * This S4U will mutex with other commitTxn() and openTxns(). 
+            * -1 below makes txn intervals look like [3,3] [4,4] if all txns are serial
+            * Note: it's possible to have several txns have the same commit id.  Suppose 3 txns start
+            * at the same time and no new txns start until all 3 commit.
+            * We could've incremented the sequence for commitId is well but it doesn't add anything functionally.
+            */
+           commitIdRs = stmt.executeQuery(sqlGenerator.addForUpdateClause("select ntxn_next - 1 from NEXT_TXN_ID"));
+           if (!commitIdRs.next()) {
+             throw new IllegalStateException("No rows found in NEXT_TXN_ID");
+           }
+           long commitId = commitIdRs.getLong(1);
+           Savepoint undoWriteSetForCurrentTxn = dbConn.setSavepoint();
+           /**
+            * "select distinct" is used below because
+            * 1. once we get to multi-statement txns, we only care to record that something was updated once
+            * 2. if {@link #addDynamicPartitions(AddDynamicPartitions)} is retried by caller it my create
+            *  duplicate entries in TXN_COMPONENTS
+            * but we want to add a PK on WRITE_SET which won't have unique rows w/o this distinct
+            * even if it includes all of it's columns
+            */
+           int numCompsWritten = stmt.executeUpdate(
+             "insert into WRITE_SET (ws_database, ws_table, ws_partition, ws_txnid, ws_commit_id, ws_operation_type)" +
+             " select distinct tc_database, tc_table, tc_partition, tc_txnid, " + commitId + ", tc_operation_type " + conflictSQLSuffix);
+           /**
+            * see if there are any overlapping txns wrote the same element, i.e. have a conflict
+            * Since entire commit operation is mutexed wrt other start/commit ops,
+            * committed.ws_commit_id <= current.ws_commit_id for all txns
+            * thus if committed.ws_commit_id < current.ws_txnid, transactions do NOT overlap
+            * For example, [17,20] is committed, [6,80] is being committed right now - these overlap
+            * [17,20] committed and [21,21] committing now - these do not overlap.
+            * [17,18] committed and [18,19] committing now - these overlap  (here 18 started while 17 was still running)
+            */
+           rs = stmt.executeQuery
+             (sqlGenerator.addLimitClause(1, "committed.ws_txnid, committed.ws_commit_id, committed.ws_database," +
+               "committed.ws_table, committed.ws_partition, cur.ws_commit_id cur_ws_commit_id, " +
+               "cur.ws_operation_type cur_op, committed.ws_operation_type committed_op " +
+               "from WRITE_SET committed INNER JOIN WRITE_SET cur " +
+               "ON committed.ws_database=cur.ws_database and committed.ws_table=cur.ws_table " +
+               //For partitioned table we always track writes at partition level (never at table)
+               //and for non partitioned - always at table level, thus the same table should never
+               //have entries with partition key and w/o
+               "and (committed.ws_partition=cur.ws_partition or (committed.ws_partition is null and cur.ws_partition is null)) " +
+               "where cur.ws_txnid <= committed.ws_commit_id" + //txns overlap; could replace ws_txnid
+               // with txnid, though any decent DB should infer this
+               " and cur.ws_txnid=" + txnid + //make sure RHS of join only has rows we just inserted as
+               // part of this commitTxn() op
+               " and committed.ws_txnid <> " + txnid + //and LHS only has committed txns
+               //U+U and U+D is a conflict but D+D is not and we don't currently track I in WRITE_SET at all
+               " and (committed.ws_operation_type=" + quoteChar(OpertaionType.UPDATE.sqlConst) +
+               " OR cur.ws_operation_type=" + quoteChar(OpertaionType.UPDATE.sqlConst) + ")"));
+           if (rs.next()) {
+             //found a conflict
+             String committedTxn = "[" + JavaUtils.txnIdToString(rs.getLong(1)) + "," + rs.getLong(2) + "]";
+             StringBuilder resource = new StringBuilder(rs.getString(3)).append("/").append(rs.getString(4));
+             String partitionName = rs.getString(5);
+             if (partitionName != null) {
+               resource.append('/').append(partitionName);
+             }
+             String msg = "Aborting [" + JavaUtils.txnIdToString(txnid) + "," + rs.getLong(6) + "]" + " due to a write conflict on " + resource +
+               " committed by " + committedTxn + " " + rs.getString(7) + "/" + rs.getString(8);
+             close(rs);
+             //remove WRITE_SET info for current txn since it's about to abort
+             dbConn.rollback(undoWriteSetForCurrentTxn);
+             LOG.info(msg);
+             //todo: should make abortTxns() write something into TXNS.TXN_META_INFO about this
+             if (abortTxns(dbConn, Collections.singletonList(txnid), true) != 1) {
+               throw new IllegalStateException(msg + " FAILED!");
+             }
+             dbConn.commit();
+             close(null, stmt, dbConn);
+             throw new TxnAbortedException(msg);
+           } else {
+             //no conflicting operations, proceed with the rest of commit sequence
+           }
+         }
+         else {
+           /**
+            * current txn didn't update/delete anything (may have inserted), so just proceed with commit
+            *
+            * We only care about commit id for write txns, so for RO (when supported) txns we don't
+            * have to mutex on NEXT_TXN_ID.
+            * Consider: if RO txn is after a W txn, then RO's openTxns() will be mutexed with W's
+            * commitTxn() because both do S4U on NEXT_TXN_ID and thus RO will see result of W txn.
+            * If RO < W, then there is no reads-from relationship.
+            * In replication flow we don't expect any write write conflict as it should have been handled at source.
+            */
+         }
+ 
+         String s;
+         if (!rqst.isSetReplPolicy()) {
+           // Move the record from txn_components into completed_txn_components so that the compactor
+           // knows where to look to compact.
+           s = "insert into COMPLETED_TXN_COMPONENTS (ctc_txnid, ctc_database, " +
+                   "ctc_table, ctc_partition, ctc_writeid, ctc_update_delete) select tc_txnid, tc_database, tc_table, " +
+                   "tc_partition, tc_writeid, '" + isUpdateDelete + "' from TXN_COMPONENTS where tc_txnid = " + txnid;
+           LOG.debug("Going to execute insert <" + s + ">");
+ 
+           if ((stmt.executeUpdate(s)) < 1) {
+             //this can be reasonable for an empty txn START/COMMIT or read-only txn
+             //also an IUD with DP that didn't match any rows.
+             LOG.info("Expected to move at least one record from txn_components to " +
+                     "completed_txn_components when committing txn! " + JavaUtils.txnIdToString(txnid));
+           }
+         } else {
+           if (rqst.isSetWriteEventInfos()) {
+             List<String> rows = new ArrayList<>();
+             for (WriteEventInfo writeEventInfo : rqst.getWriteEventInfos()) {
+               rows.add(txnid + "," + quoteString(writeEventInfo.getDatabase()) + "," +
+                       quoteString(writeEventInfo.getTable()) + "," +
+                       quoteString(writeEventInfo.getPartition()) + "," +
+                       writeEventInfo.getWriteId() + "," +
+                       "'" + isUpdateDelete + "'");
+             }
+             List<String> queries = sqlGenerator.createInsertValuesStmt("COMPLETED_TXN_COMPONENTS " +
+                     "(ctc_txnid," + " ctc_database, ctc_table, ctc_partition, ctc_writeid, ctc_update_delete)", rows);
+             for (String q : queries) {
+               LOG.debug("Going to execute insert  <" + q + "> ");
+               stmt.execute(q);
+             }
+           }
+ 
+           s = "delete from REPL_TXN_MAP where RTM_SRC_TXN_ID = " + sourceTxnId +
+                   " and RTM_REPL_POLICY = " + quoteString(rqst.getReplPolicy());
+           LOG.info("Repl going to execute  <" + s + ">");
+           stmt.executeUpdate(s);
+         }
+ 
+         // cleanup all txn related metadata
+         s = "delete from TXN_COMPONENTS where tc_txnid = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         s = "delete from HIVE_LOCKS where hl_txnid = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         s = "delete from TXNS where txn_id = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         s = "delete from MIN_HISTORY_LEVEL where mhl_txnid = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         LOG.info("Removed committed transaction: (" + txnid + ") from MIN_HISTORY_LEVEL");
+ 
+         s = "delete from MATERIALIZATION_REBUILD_LOCKS where mrl_txn_id = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+ 
+         if (transactionalListeners != null) {
+           MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                   EventMessage.EventType.COMMIT_TXN, new CommitTxnEvent(txnid, null), dbConn, sqlGenerator);
+         }
+ 
+         LOG.debug("Going to commit");
+         close(rs);
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "commitTxn(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(commitIdRs);
+         close(lockHandle, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       commitTxn(rqst);
+     }
+   }
+ 
+   /**
+    * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark.
+    * @param rqst info on table/partitions and writeid snapshot to replicate.
+    * @throws MetaException
+    */
+   @Override
+   @RetrySemantics.Idempotent("No-op if already replicated the writeid state")
+   public void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException {
+     String dbName = rqst.getDbName().toLowerCase();
+     String tblName = rqst.getTableName().toLowerCase();
+     ValidWriteIdList validWriteIdList = new ValidReaderWriteIdList(rqst.getValidWriteIdlist());
+ 
+     // Get the abortedWriteIds which are already sorted in ascending order.
+     List<Long> abortedWriteIds = getAbortedWriteIds(validWriteIdList);
+     int numAbortedWrites = abortedWriteIds.size();
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       TxnStore.MutexAPI.LockHandle handle = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         // Check if this txn state is already replicated for this given table. If yes, then it is
+         // idempotent case and just return.
+         String sql = "select nwi_next from NEXT_WRITE_ID where nwi_database = " + quoteString(dbName)
+                         + " and nwi_table = " + quoteString(tblName);
+         LOG.debug("Going to execute query <" + sql + ">");
+ 
+         rs = stmt.executeQuery(sql);
+         if (rs.next()) {
+           LOG.info("Idempotent flow: WriteId state <" + validWriteIdList + "> is already applied for the table: "
+                   + dbName + "." + tblName);
+           rollbackDBConn(dbConn);
+           return;
+         }
+ 
+         if (numAbortedWrites > 0) {
+           // Allocate/Map one txn per aborted writeId and abort the txn to mark writeid as aborted.
+           List<Long> txnIds = openTxns(dbConn, stmt,
+                   new OpenTxnRequest(numAbortedWrites, rqst.getUser(), rqst.getHostName()));
+           assert(numAbortedWrites == txnIds.size());
+ 
+           // Map each aborted write id with each allocated txn.
+           List<String> rows = new ArrayList<>();
+           int i = 0;
+           for (long txn : txnIds) {
+             long writeId = abortedWriteIds.get(i++);
+             rows.add(txn + ", " + quoteString(dbName) + ", " + quoteString(tblName) + ", " + writeId);
+             LOG.info("Allocated writeID: " + writeId + " for txnId: " + txn);
+           }
+ 
+           // Insert entries to TXN_TO_WRITE_ID for aborted write ids
+           List<String> inserts = sqlGenerator.createInsertValuesStmt(
+                   "TXN_TO_WRITE_ID (t2w_txnid, t2w_database, t2w_table, t2w_writeid)", rows);
+           for (String insert : inserts) {
+             LOG.debug("Going to execute insert <" + insert + ">");
+             stmt.execute(insert);
+           }
+ 
+           // Abort all the allocated txns so that the mapped write ids are referred as aborted ones.
+           int numAborts = abortTxns(dbConn, txnIds, true);
+           assert(numAborts == numAbortedWrites);
+         }
+         handle = getMutexAPI().acquireLock(MUTEX_KEY.WriteIdAllocator.name());
+ 
+         // There are some txns in the list which has no write id allocated and hence go ahead and do it.
+         // Get the next write id for the given table and update it with new next write id.
+         // It is expected NEXT_WRITE_ID doesn't have entry for this table and hence directly insert it.
+         long nextWriteId = validWriteIdList.getHighWatermark() + 1;
+ 
+         // First allocation of write id (hwm+1) should add the table to the next_write_id meta table.
+         sql = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values ("
+                 + quoteString(dbName) + "," + quoteString(tblName) + ","
+                 + Long.toString(nextWriteId) + ")";
+         LOG.debug("Going to execute insert <" + sql + ">");
+         stmt.execute(sql);
+ 
+         LOG.info("WriteId state <" + validWriteIdList + "> is applied for the table: " + dbName + "." + tblName);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "replTableWriteIdState(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         if(handle != null) {
+           handle.releaseLocks();
+         }
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       replTableWriteIdState(rqst);
+     }
+ 
+     // Schedule Major compaction on all the partitions/table to clean aborted data
+     if (numAbortedWrites > 0) {
+       CompactionRequest compactRqst = new CompactionRequest(rqst.getDbName(), rqst.getTableName(),
+               CompactionType.MAJOR);
+       if (rqst.isSetPartNames()) {
+         for (String partName : rqst.getPartNames()) {
+           compactRqst.setPartitionname(partName);
+           compact(compactRqst);
+         }
+       } else {
+         compact(compactRqst);
+       }
+     }
+   }
+ 
+   private List<Long> getAbortedWriteIds(ValidWriteIdList validWriteIdList) {
+     List<Long> abortedWriteIds = new ArrayList<>();
+     for (long writeId : validWriteIdList.getInvalidWriteIds()) {
+       if (validWriteIdList.isWriteIdAborted(writeId)) {
+         abortedWriteIds.add(writeId);
+       }
+     }
+     return abortedWriteIds;
+   }
+ 
+   @Override
+   @RetrySemantics.ReadOnly
+   public GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst)
+           throws NoSuchTxnException, MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ValidTxnList validTxnList;
+ 
+       // We should prepare the valid write ids list based on validTxnList of current txn.
+       // If no txn exists in the caller, then they would pass null for validTxnList and so it is
+       // required to get the current state of txns to make validTxnList
+       if (rqst.isSetValidTxnList()) {
+         validTxnList = new ValidReadTxnList(rqst.getValidTxnList());
+       } else {
+         // Passing 0 for currentTxn means, this validTxnList is not wrt to any txn
+         validTxnList = TxnUtils.createValidReadTxnList(getOpenTxns(), 0);
+       }
+       try {
+         /**
+          * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()}
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         // Get the valid write id list for all the tables read by the current txn
+         List<TableValidWriteIds> tblValidWriteIdsList = new ArrayList<>();
+         for (String fullTableName : rqst.getFullTableNames()) {
+           tblValidWriteIdsList.add(getValidWriteIdsForTable(stmt, fullTableName, validTxnList));
+         }
+ 
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         GetValidWriteIdsResponse owr = new GetValidWriteIdsResponse(tblValidWriteIdsList);
+         return owr;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getValidWriteIds");
+         throw new MetaException("Unable to select from transaction database, "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return getValidWriteIds(rqst);
+     }
+   }
+ 
+   // Method to get the Valid write ids list for the given table
+   // Input fullTableName is expected to be of format <db_name>.<table_name>
+   private TableValidWriteIds getValidWriteIdsForTable(Statement stmt, String fullTableName,
+                                                ValidTxnList validTxnList) throws SQLException {
+     ResultSet rs = null;
+     String[] names = TxnUtils.getDbTableName(fullTableName);
+     try {
+       // Need to initialize to 0 to make sure if nobody modified this table, then current txn
+       // shouldn't read any data.
+       // If there is a conversion from non-acid to acid table, then by default 0 would be assigned as
+       // writeId for data from non-acid table and so writeIdHwm=0 would ensure those data are readable by any txns.
+       long writeIdHwm = 0;
+       List<Long> invalidWriteIdList = new ArrayList<>();
+       long minOpenWriteId = Long.MAX_VALUE;
+       BitSet abortedBits = new BitSet();
+       long txnHwm = validTxnList.getHighWatermark();
+ 
+       // Find the writeId high water mark based upon txnId high water mark. If found, then, need to
+       // traverse through all write Ids less than writeId HWM to make exceptions list.
+       // The writeHWM = min(NEXT_WRITE_ID.nwi_next-1, max(TXN_TO_WRITE_ID.t2w_writeid under txnHwm))
+       String s = "select max(t2w_writeid) from TXN_TO_WRITE_ID where t2w_txnid <= " + txnHwm
+               + " and t2w_database = " + quoteString(names[0])
+               + " and t2w_table = " + quoteString(names[1]);
+       LOG.debug("Going to execute query<" + s + ">");
+       rs = stmt.executeQuery(s);
+       if (rs.next()) {
+         writeIdHwm = rs.getLong(1);
+       }
+ 
+       // If no writeIds allocated by txns under txnHwm, then find writeHwm from NEXT_WRITE_ID.
+       if (writeIdHwm <= 0) {
+         // Need to subtract 1 as nwi_next would be the next write id to be allocated but we need highest
+         // allocated write id.
+         s = "select nwi_next-1 from NEXT_WRITE_ID where nwi_database = " + quoteString(names[0])
+                 + " and nwi_table = " + quoteString(names[1]);
+         LOG.debug("Going to execute query<" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (rs.next()) {
+           long maxWriteId = rs.getLong(1);
+           if (maxWriteId > 0) {
+             writeIdHwm = (writeIdHwm > 0) ? Math.min(maxWriteId, writeIdHwm) : maxWriteId;
+           }
+         }
+       }
+ 
+       // As writeIdHwm is known, query all writeIds under the writeId HWM.
+       // If any writeId under HWM is allocated by txn > txnId HWM or belongs to open/aborted txns,
+       // then will be added to invalid list. The results should be sorted in ascending order based
+       // on write id. The sorting is needed as exceptions list in ValidWriteIdList would be looked-up
+       // using binary search.
+       s = "select t2w_txnid, t2w_writeid from TXN_TO_WRITE_ID where t2w_writeid <= " + writeIdHwm
+               + " and t2w_database = " + quoteString(names[0])
+               + " and t2w_table = " + quoteString(names[1])
+               + " order by t2w_writeid asc";
+ 
+       LOG.debug("Going to execute query<" + s + ">");
+       rs = stmt.executeQuery(s);
+       while (rs.next()) {
+         long txnId = rs.getLong(1);
+         long writeId = rs.getLong(2);
+         if (validTxnList.isTxnValid(txnId)) {
+           // Skip if the transaction under evaluation is already committed.
+           continue;
+         }
+ 
+         // The current txn is either in open or aborted state.
+         // Mark the write ids state as per the txn state.
+         invalidWriteIdList.add(writeId);
+         if (validTxnList.isTxnAborted(txnId)) {
+           abortedBits.set(invalidWriteIdList.size() - 1);
+         } else {
+           minOpenWriteId = Math.min(minOpenWriteId, writeId);
+         }
+       }
+ 
+       ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray());
+       TableValidWriteIds owi = new TableValidWriteIds(fullTableName, writeIdHwm, invalidWriteIdList, byteBuffer);
+       if (minOpenWriteId < Long.MAX_VALUE) {
+         owi.setMinOpenWriteId(minOpenWriteId);
+       }
+       return owi;
+     } finally {
+       close(rs);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest rqst)
+           throws NoSuchTxnException, TxnAbortedException, MetaException {
+     List<Long> txnIds;
+     String dbName = rqst.getDbName().toLowerCase();
+     String tblName = rqst.getTableName().toLowerCase();
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       TxnStore.MutexAPI.LockHandle handle = null;
+       List<TxnToWriteId> txnToWriteIds = new ArrayList<>();
+       List<TxnToWriteId> srcTxnToWriteIds = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         if (rqst.isSetReplPolicy()) {
+           srcTxnToWriteIds = rqst.getSrcTxnToWriteIdList();
+           List<Long> srcTxnIds = new ArrayList<>();
+           assert (rqst.isSetSrcTxnToWriteIdList());
+           assert (!rqst.isSetTxnIds());
+           assert (!srcTxnToWriteIds.isEmpty());
+ 
+           for (TxnToWriteId txnToWriteId :  srcTxnToWriteIds) {
+             srcTxnIds.add(txnToWriteId.getTxnId());
+           }
+           txnIds = getTargetTxnIdList(rqst.getReplPolicy(), srcTxnIds, stmt);
+           if (srcTxnIds.size() != txnIds.size()) {
+             LOG.warn("Target txn id is missing for source txn id : " + srcTxnIds.toString() +
+                     " and repl policy " + rqst.getReplPolicy());
+             throw new RuntimeException("This should never happen for txnIds: " + txnIds);
+           }
+         } else {
+           assert (!rqst.isSetSrcTxnToWriteIdList());
+           assert (rqst.isSetTxnIds());
+           txnIds = rqst.getTxnIds();
+         }
+ 
+         Collections.sort(txnIds); //easier to read logs and for assumption done in replication flow
+ 
+         // Check if all the input txns are in open state. Write ID should be allocated only for open transactions.
+         if (!isTxnsInOpenState(txnIds, stmt)) {
+           ensureAllTxnsValid(dbName, tblName, txnIds, stmt);
+           throw new RuntimeException("This should never happen for txnIds: " + txnIds);
+         }
+ 
+         long writeId;
+         String s;
+         long allocatedTxnsCount = 0;
+         long txnId;
+         List<String> queries = new ArrayList<>();
+         StringBuilder prefix = new StringBuilder();
+         StringBuilder suffix = new StringBuilder();
+ 
+         // Traverse the TXN_TO_WRITE_ID to see if any of the input txns already have allocated a
+         // write id for the same db.table. If yes, then need to reuse it else have to allocate new one
+         // The write id would have been already allocated in case of multi-statement txns where
+         // first write on a table will allocate write id and rest of the writes should re-use it.
+         prefix.append("select t2w_txnid, t2w_writeid from TXN_TO_WRITE_ID where"
+                         + " t2w_database = " + quoteString(dbName)
+                         + " and t2w_table = " + quoteString(tblName) + " and ");
+         suffix.append("");
+         TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix,
+                 txnIds, "t2w_txnid", false, false);
+         for (String query : queries) {
+           LOG.debug("Going to execute query <" + query + ">");
+           rs = stmt.executeQuery(query);
+           while (rs.next()) {
+             // If table write ID is already allocated for the given transaction, then just use it
+             txnId = rs.getLong(1);
+             writeId = rs.getLong(2);
+             txnToWriteIds.add(new TxnToWriteId(txnId, writeId));
+             allocatedTxnsCount++;
+             LOG.info("Reused already allocated writeID: " + writeId + " for txnId: " + txnId);
+           }
+         }
+ 
+         // Batch allocation should always happen atomically. Either write ids for all txns is allocated or none.
+         long numOfWriteIds = txnIds.size();
+         assert ((allocatedTxnsCount == 0) || (numOfWriteIds == allocatedTxnsCount));
+         if (allocatedTxnsCount == numOfWriteIds) {
+           // If all the txns in the list have pre-allocated write ids for the given table, then just return.
+           // This is for idempotent case.
+           return new AllocateTableWriteIdsResponse(txnToWriteIds);
+         }
+ 
+         handle = getMutexAPI().acquireLock(MUTEX_KEY.WriteIdAllocator.name());
+ 
+         // There are some txns in the list which does not have write id allocated and hence go ahead and do it.
+         // Get the next write id for the given table and update it with new next write id.
+         // This is select for update query which takes a lock if the table entry is already there in NEXT_WRITE_ID
+         s = sqlGenerator.addForUpdateClause(
+                 "select nwi_next from NEXT_WRITE_ID where nwi_database = " + quoteString(dbName)
+                         + " and nwi_table = " + quoteString(tblName));
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           // First allocation of write id should add the table to the next_write_id meta table
+           // The initial value for write id should be 1 and hence we add 1 with number of write ids allocated here
+           writeId = 1;
+           s = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values ("
+                   + quoteString(dbName) + "," + quoteString(tblName) + "," + Long.toString(numOfWriteIds + 1) + ")";
+           LOG.debug("Going to execute insert <" + s + ">");
+           stmt.execute(s);
+         } else {
+           writeId = rs.getLong(1);
+           // Update the NEXT_WRITE_ID for the given table after incrementing by number of write ids allocated
+           s = "update NEXT_WRITE_ID set nwi_next = " + (writeId + numOfWriteIds)
+                   + " where nwi_database = " + quoteString(dbName)
+                   + " and nwi_table = " + quoteString(tblName);
+           LOG.debug("Going to execute update <" + s + ">");
+           stmt.executeUpdate(s);
+         }
+ 
+         // Map the newly allocated write ids against the list of txns which doesn't have pre-allocated
+         // write ids
+         List<String> rows = new ArrayList<>();
+         for (long txn : txnIds) {
+           rows.add(txn + ", " + quoteString(dbName) + ", " + quoteString(tblName) + ", " + writeId);
+           txnToWriteIds.add(new TxnToWriteId(txn, writeId));
+           LOG.info("Allocated writeID: " + writeId + " for txnId: " + txn);
+           writeId++;
+         }
+ 
+         if (rqst.isSetReplPolicy()) {
+           int lastIdx = txnToWriteIds.size()-1;
+           if ((txnToWriteIds.get(0).getWriteId() != srcTxnToWriteIds.get(0).getWriteId()) ||
+               (txnToWriteIds.get(lastIdx).getWriteId() != srcTxnToWriteIds.get(lastIdx).getWriteId())) {
+             LOG.error("Allocated write id range {} is not matching with the input write id range {}.",
+                     txnToWriteIds, srcTxnToWriteIds);
+             throw new IllegalStateException("Write id allocation failed for: " + srcTxnToWriteIds);
+           }
+         }
+ 
+         // Insert entries to TXN_TO_WRITE_ID for newly allocated write ids
+         List<String> inserts = sqlGenerator.createInsertValuesStmt(
+                 "TXN_TO_WRITE_ID (t2w_txnid, t2w_database, t2w_table, t2w_writeid)", rows);
+         for (String insert : inserts) {
+           LOG.debug("Going to execute insert <" + insert + ">");
+           stmt.execute(insert);
+         }
+ 
+         if (transactionalListeners != null) {
+           MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                   EventMessage.EventType.ALLOC_WRITE_ID,
+                   new AllocWriteIdEvent(txnToWriteIds, rqst.getDbName(), rqst.getTableName(), null),
+                   dbConn, sqlGenerator);
+         }
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return new AllocateTableWriteIdsResponse(txnToWriteIds);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "allocateTableWriteIds(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         if(handle != null) {
+           handle.releaseLocks();
+         }
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return allocateTableWriteIds(rqst);
+     }
+   }
+   @Override
+   public void seedWriteIdOnAcidConversion(InitializeTableWriteIdsRequest rqst)
+       throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       TxnStore.MutexAPI.LockHandle handle = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         handle = getMutexAPI().acquireLock(MUTEX_KEY.WriteIdAllocator.name());
+         //since this is on conversion from non-acid to acid, NEXT_WRITE_ID should not have an entry
+         //for this table.  It also has a unique index in case 'should not' is violated
+ 
+         // First allocation of write id should add the table to the next_write_id meta table
+         // The initial value for write id should be 1 and hence we add 1 with number of write ids
+         // allocated here
+         String s = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values ("
+             + quoteString(rqst.getDbName()) + "," + quoteString(rqst.getTblName()) + "," +
+             Long.toString(rqst.getSeeWriteId() + 1) + ")";
+         LOG.debug("Going to execute insert <" + s + ">");
+         stmt.execute(s);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "seedWriteIdOnAcidConversion(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+             + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         if(handle != null) {
+           handle.releaseLocks();
+         }
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       seedWriteIdOnAcidConversion(rqst);
+     }
+ 
+   }
+   @Override
+   @RetrySemantics.Idempotent
+   public void addWriteNotificationLog(AcidWriteEvent acidWriteEvent)
+           throws MetaException {
+     Connection dbConn = null;
+     try {
+       try {
+         //Idempotent case is handled by notify Event
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                 EventMessage.EventType.ACID_WRITE, acidWriteEvent, dbConn, sqlGenerator);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         if (isDuplicateKeyError(e)) {
+           // in case of key duplicate error, retry as it might be because of race condition
+           if (waitForRetry("addWriteNotificationLog(" + acidWriteEvent + ")", e.getMessage())) {
+             throw new RetryException();
+           }
+           retryNum = 0;
+           throw new MetaException(e.getMessage());
+         }
+         checkRetryable(dbConn, e, "addWriteNotificationLog(" + acidWriteEvent + ")");
+         throw new MetaException("Unable to add write notification event " + StringUtils.stringifyException(e));
+       } finally{
+         closeDbConn(dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       addWriteNotificationLog(acidWriteEvent);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void performWriteSetGC() {
+     Connection dbConn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+       stmt = dbConn.createStatement();
+       rs = stmt.executeQuery("select ntxn_next - 1 from NEXT_TXN_ID");
+       if(!rs.next()) {
+         throw new IllegalStateException("NEXT_TXN_ID is empty: DB is corrupted");
+       }
+       long highestAllocatedTxnId = rs.getLong(1);
+       close(rs);
+       rs = stmt.executeQuery("select min(txn_id) from TXNS where txn_state=" + quoteChar(TXN_OPEN));
+       if(!rs.next()) {
+         throw new IllegalStateException("Scalar query returned no rows?!?!!");
+       }
+       long commitHighWaterMark;//all currently open txns (if any) have txnid >= than commitHighWaterMark
+       long lowestOpenTxnId = rs.getLong(1);
+       if(rs.wasNull()) {
+         //if here then there are no Open txns and  highestAllocatedTxnId must be
+         //resolved (i.e. committed or aborted), either way
+         //there are no open txns with id <= highestAllocatedTxnId
+         //the +1 is there because "delete ..." below has < (which is correct for the case when
+         //there is an open txn
+         //Concurrency: even if new txn starts (or starts + commits) it is still true that
+         //there are no currently open txns that overlap with any committed txn with 
+         //commitId <= commitHighWaterMark (as set on next line).  So plain READ_COMMITTED is enough.
+         commitHighWaterMark = highestAllocatedTxnId + 1;
+       }
+       else {
+         commitHighWaterMark = lowestOpenTxnId;
+       }
+       int delCnt = stmt.executeUpdate("delete from WRITE_SET where ws_commit_id < " + commitHighWaterMark);
+       LOG.info("Deleted " + delCnt + " obsolete rows from WRTIE_SET");
+       dbConn.commit();
+     } catch (SQLException ex) {
+       LOG.warn("WriteSet GC failed due to " + getMessage(ex), ex);
+     }
+     finally {
+       close(rs, stmt, dbConn);
+     }
+   }
+ 
+   /**
+    * Get invalidation info for the materialization. Currently, the materialization information
+    * only contains information about whether there was update/delete operations on the source
+    * tables used by the materialization since it was created.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public Materialization getMaterializationInvalidationInfo(
+       CreationMetadata creationMetadata, String validTxnListStr) throws MetaException {
+     if (creationMetadata.getTablesUsed().isEmpty()) {
+       // Bail out
+       LOG.warn("Materialization creation metadata does not contain any table");
+       return null;
+     }
+ 
+     // Parse validTxnList
+     final ValidReadTxnList validTxnList =
+         new ValidReadTxnList(validTxnListStr);
+ 
+     // Parse validReaderWriteIdList from creation metadata
+     final ValidTxnWriteIdList validReaderWriteIdList =
+         new ValidTxnWriteIdList(creationMetadata.getValidTxnList());
+ 
+     // We are composing a query that returns a single row if an update happened after
+     // the materialization was created. Otherwise, query returns 0 rows.
+     Connection dbConn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+       stmt = dbConn.createStatement();
+       stmt.setMaxRows(1);
+       StringBuilder query = new StringBuilder();
+       // compose a query that select transactions containing an update...
+       query.append("select ctc_update_delete from COMPLETED_TXN_COMPONENTS where ctc_update_delete='Y' AND (");
+       int i = 0;
+       for (String fullyQualifiedName : creationMetadata.getTablesUsed()) {
+         // ...for each of the tables that are part of the materialized view,
+         // where the transaction had to be committed after the materialization was created...
+         if (i != 0) {
+           query.append("OR");
+         }
+         String[] names = TxnUtils.getDbTableName(fullyQualifiedName);
+         query.append(" (ctc_database=" + quoteString(names[0]) + " AND ctc_table=" + quoteString(names[1]));
+         ValidWriteIdList tblValidWriteIdList =
+             validReaderWriteIdList.getTableValidWriteIdList(fullyQualifiedName);
+         if (tblValidWriteIdList == null) {
+           LOG.warn("ValidWriteIdList for table {} not present in creation metadata, this should not happen");
+           return null;
+         }
+         query.append(" AND (ctc_writeid > " + tblValidWriteIdList.getHighWatermark());
+         query.append(tblValidWriteIdList.getInvalidWriteIds().length == 0 ? ") " :
+             " OR ctc_writeid IN(" + StringUtils.join(",",
+                 Arrays.asList(ArrayUtils.toObject(tblValidWriteIdList.getInvalidWriteIds()))) + ") ");
+         query.append(") ");
+         i++;
+       }
+       // ... and where the transaction has already been committed as per snapshot taken
+       // when we are running current query
+       query.append(") AND ctc_txnid <= " + validTxnList.getHighWatermark());
+       query.append(validTxnList.getInvalidTransactions().length == 0 ? " " :
+           " AND ctc_txnid NOT IN(" + StringUtils.join(",",
+               Arrays.asList(ArrayUtils.toObject(validTxnList.getInvalidTransactions()))) + ") ");
+ 
+       // Execute query
+       String s = query.toString();
+       if (LOG.isDebugEnabled()) {
+         LOG.debug("Going to execute query <" + s + ">");
+       }
+       rs = stmt.executeQuery(s);
+ 
+       return new Materialization(rs.next());
+     } catch (SQLException ex) {
+       LOG.warn("getMaterializationInvalidationInfo failed due to " + getMessage(ex), ex);
+       throw new MetaException("Unable to retrieve materialization invalidation information due to " +
+           StringUtils.stringifyException(ex));
+     } finally {
+       close(rs, stmt, dbConn);
+     }
+   }
+ 
+   @Override
+   public LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId)
+       throws MetaException {
+ 
+     if (LOG.isDebugEnabled()) {
+       LOG.debug("Acquiring lock for materialization rebuild with txnId={} for {}", txnId, Warehouse.getQualifiedName(dbName,tableName));
+     }
+ 
+     TxnStore.MutexAPI.LockHandle handle = null;
+     Connection dbConn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       lockInternal();
+       /**
+        * MUTEX_KEY.MaterializationRebuild lock ensures that there is only 1 entry in
+        * Initiated/Working state for any resource. This ensures we do not run concurrent
+        * rebuild operations on any materialization.
+        */
+       handle = getMutexAPI().acquireLock(MUTEX_KEY.MaterializationRebuild.name());
+       dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+       stmt = dbConn.createStatement();
+ 
+       String selectQ = "select mrl_txn_id from MATERIALIZATION_REBUILD_LOCKS where" +
+           " mrl_db_name =" + quoteString(dbName) +
+           " AND mrl_tbl_name=" + quoteString(tableName);
+       LOG.debug("Going to execute query <" + selectQ + ">");
+       rs = stmt.executeQuery(selectQ);
+       if(rs.next()) {
+         LOG.info("Ignoring request to rebuild " + dbName + "/" + tableName +
+             " since it is already being rebuilt");
+         return new LockResponse(txnId, LockState.NOT_ACQUIRED);
+       }
+       String insertQ = "insert into MATERIALIZATION_REBUILD_LOCKS " +
+           "(mrl_txn_id, mrl_db_name, mrl_tbl_name, mrl_last_heartbeat) values (" + txnId +
+           ", '" + dbName + "', '" + tableName + "', " + Instant.now().toEpochMilli() + ")";
+       LOG.debug("Going to execute update <" + insertQ + ">");
+       stmt.executeUpdate(insertQ);
+       LOG.debug("Going to commit");
+       dbConn.commit();
+       return new LockResponse(txnId, LockState.ACQUIRED);
+     } catch (SQLException ex) {
+       LOG.warn("lockMaterializationRebuild failed due to " + getMessage(ex), ex);
+       throw new MetaException("Unable to retrieve materialization invalidation information due to " +
+           StringUtils.stringifyException(ex));
+     } finally {
+       close(rs, stmt, dbConn);
+       if(handle != null) {
+         handle.releaseLocks();
+       }
+       unlockInternal();
+     }
+   }
+ 
+   @Override
+   public boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId)
+       throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update MATERIALIZATION_REBUILD_LOCKS" +
+             " set mrl_last_heartbeat = " + Instant.now().toEpochMilli() +
+             " where mrl_txn_id = " + txnId +
+             " AND mrl_db_name =" + quoteString(dbName) +
+             " AND mrl_tbl_name=" + quoteString(tableName);
+         LOG.debug("Going to execute update <" + s + ">");
+         int rc = stmt.executeUpdate(s);
+         if (rc < 1) {
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+           LOG.info("No lock found for rebuild of " + Warehouse.getQualifiedName(dbName, tableName) +
+               " when trying to heartbeat");
+           // It could not be renewed, return that information
+           return false;
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         // It could be renewed, return that information
+         return true;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e,
+             "heartbeatLockMaterializationRebuild(" + Warehouse.getQualifiedName(dbName, tableName) + ", " + txnId + ")");
+         throw new MetaException("Unable to heartbeat rebuild lock due to " +
+             StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return heartbeatLockMaterializationRebuild(dbName, tableName ,txnId);
+     }
+   }
+ 
+   @Override
+   public long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout) throws MetaException {
+     try {
+       // Aux values
+       long cnt = 0L;
+       List<Long> txnIds = new ArrayList<>();
+       long timeoutTime = Instant.now().toEpochMilli() - timeout;
+ 
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         String selectQ = "select mrl_txn_id, mrl_last_heartbeat from MATERIALIZATION_REBUILD_LOCKS";
+         LOG.debug("Going to execute query <" + selectQ + ">");
+         rs = stmt.executeQuery(selectQ);
+         while(rs.next()) {
+           long lastHeartbeat = rs.getLong(2);
+           if (lastHeartbeat < timeoutTime) {
+             // The heartbeat has timeout, double check whether we can remove it
+             long txnId = rs.getLong(1);
+             if (validTxnList.isTxnValid(txnId) || validTxnList.isTxnAborted(txnId)) {
+               // Txn was committed (but notification was not received) or it was aborted.
+               // Either case, we can clean it up
+               txnIds.add(txnId);
+             }
+           }
+         }
+         if (!txnIds.isEmpty()) {
+           String deleteQ = "delete from MATERIALIZATION_REBUILD_LOCKS where" +
+               " mrl_txn_id IN(" + StringUtils.join(",", txnIds) + ") ";
+           LOG.debug("Going to execute update <" + deleteQ + ">");
+           cnt = stmt.executeUpdate(deleteQ);
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return cnt;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "cleanupMaterializationRebuildLocks");
+    

<TRUNCATED>

[10/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query40.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query40.q.out b/ql/src/test/results/clientpositive/perf/tez/query40.q.out
index 9920ad3..a3b6c03 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query40.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query40.q.out
@@ -71,115 +71,117 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_135]
-        Limit [LIM_134] (rows=100 width=135)
+      File Output Operator [FS_136]
+        Limit [LIM_135] (rows=100 width=135)
           Number of rows:100
-          Select Operator [SEL_133] (rows=210822976 width=135)
+          Select Operator [SEL_134] (rows=210822976 width=135)
             Output:["_col0","_col1","_col2","_col3"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_132]
-              Group By Operator [GBY_131] (rows=210822976 width=135)
+            SHUFFLE [RS_133]
+              Group By Operator [GBY_132] (rows=210822976 width=135)
                 Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1
               <-Reducer 5 [SIMPLE_EDGE]
                 SHUFFLE [RS_30]
                   PartitionCols:_col0, _col1
                   Group By Operator [GBY_29] (rows=421645953 width=135)
                     Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col0, _col1
-                    Select Operator [SEL_27] (rows=421645953 width=135)
-                      Output:["_col0","_col1","_col2","_col3"]
-                      Merge Join Operator [MERGEJOIN_100] (rows=421645953 width=135)
-                        Conds:RS_24._col1=RS_119._col0(Inner),Output:["_col4","_col7","_col9","_col11","_col14"]
-                      <-Map 13 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_119]
-                          PartitionCols:_col0
-                          Select Operator [SEL_118] (rows=27 width=1029)
-                            Output:["_col0","_col1"]
-                            Filter Operator [FIL_117] (rows=27 width=1029)
-                              predicate:w_warehouse_sk is not null
-                              TableScan [TS_12] (rows=27 width=1029)
-                                default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_state"]
-                      <-Reducer 4 [SIMPLE_EDGE]
-                        SHUFFLE [RS_24]
-                          PartitionCols:_col1
-                          Merge Join Operator [MERGEJOIN_99] (rows=383314495 width=135)
-                            Conds:RS_21._col2=RS_111._col0(Inner),Output:["_col1","_col4","_col7","_col9","_col11"]
-                          <-Map 11 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_111]
-                              PartitionCols:_col0
-                              Select Operator [SEL_110] (rows=51333 width=1436)
-                                Output:["_col0","_col1"]
-                                Filter Operator [FIL_109] (rows=51333 width=1436)
-                                  predicate:(i_current_price BETWEEN 0.99 AND 1.49 and i_item_sk is not null)
-                                  TableScan [TS_9] (rows=462000 width=1436)
-                                    default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_current_price"]
-                          <-Reducer 3 [SIMPLE_EDGE]
-                            SHUFFLE [RS_21]
-                              PartitionCols:_col2
-                              Merge Join Operator [MERGEJOIN_98] (rows=348467716 width=135)
-                                Conds:RS_18._col0=RS_103._col0(Inner),Output:["_col1","_col2","_col4","_col7","_col9"]
-                              <-Map 9 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_103]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_102] (rows=8116 width=1119)
-                                    Output:["_col0","_col1"]
-                                    Filter Operator [FIL_101] (rows=8116 width=1119)
-                                      predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-09 00:00:00' AND TIMESTAMP'1998-05-08 00:00:00' and d_date_sk is not null)
-                                      TableScan [TS_6] (rows=73049 width=1119)
-                                        default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
-                              <-Reducer 2 [SIMPLE_EDGE]
-                                SHUFFLE [RS_18]
-                                  PartitionCols:_col0
-                                  Merge Join Operator [MERGEJOIN_97] (rows=316788826 width=135)
-                                    Conds:RS_127._col2, _col3=RS_130._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col7"]
-                                  <-Map 1 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_127]
-                                      PartitionCols:_col2, _col3
-                                      Select Operator [SEL_126] (rows=287989836 width=135)
-                                        Output:["_col0","_col1","_col2","_col3","_col4"]
-                                        Filter Operator [FIL_125] (rows=287989836 width=135)
-                                          predicate:((cs_item_sk BETWEEN DynamicValue(RS_22_item_i_item_sk_min) AND DynamicValue(RS_22_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_22_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and (cs_warehouse_sk BETWEEN DynamicValue(RS_25_warehouse_w_warehouse_sk_min) AND DynamicValue(RS_25_warehouse_w_warehouse_sk_max) and in_bloom_filter(cs_warehouse_sk, DynamicValue(RS_25_warehouse_w_warehouse_sk_bloom_filter))) and cs_item_sk is not null and cs_sold_date_sk is not null and cs_warehouse_sk is not null)
-                                          TableScan [TS_0] (rows=287989836 width=135)
-                                            default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_warehouse_sk","cs_item_sk","cs_order_number","cs_sales_price"]
-                                          <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_108]
-                                              Group By Operator [GBY_107] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_106]
-                                                  Group By Operator [GBY_105] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_104] (rows=8116 width=1119)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_102]
-                                          <-Reducer 12 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_116]
-                                              Group By Operator [GBY_115] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 11 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_114]
-                                                  Group By Operator [GBY_113] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_112] (rows=51333 width=1436)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_110]
-                                          <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_124]
-                                              Group By Operator [GBY_123] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_122]
-                                                  Group By Operator [GBY_121] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_120] (rows=27 width=1029)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_118]
-                                  <-Map 8 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_130]
-                                      PartitionCols:_col0, _col1
-                                      Select Operator [SEL_129] (rows=28798881 width=106)
-                                        Output:["_col0","_col1","_col2"]
-                                        Filter Operator [FIL_128] (rows=28798881 width=106)
-                                          predicate:cr_item_sk is not null
-                                          TableScan [TS_3] (rows=28798881 width=106)
-                                            default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_refunded_cash"]
+                    Top N Key Operator [TNK_55] (rows=421645953 width=135)
+                      keys:_col0, _col1,sort order:++,top n:100
+                      Select Operator [SEL_27] (rows=421645953 width=135)
+                        Output:["_col0","_col1","_col2","_col3"]
+                        Merge Join Operator [MERGEJOIN_101] (rows=421645953 width=135)
+                          Conds:RS_24._col1=RS_120._col0(Inner),Output:["_col4","_col7","_col9","_col11","_col14"]
+                        <-Map 13 [SIMPLE_EDGE] vectorized
+                          SHUFFLE [RS_120]
+                            PartitionCols:_col0
+                            Select Operator [SEL_119] (rows=27 width=1029)
+                              Output:["_col0","_col1"]
+                              Filter Operator [FIL_118] (rows=27 width=1029)
+                                predicate:w_warehouse_sk is not null
+                                TableScan [TS_12] (rows=27 width=1029)
+                                  default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_state"]
+                        <-Reducer 4 [SIMPLE_EDGE]
+                          SHUFFLE [RS_24]
+                            PartitionCols:_col1
+                            Merge Join Operator [MERGEJOIN_100] (rows=383314495 width=135)
+                              Conds:RS_21._col2=RS_112._col0(Inner),Output:["_col1","_col4","_col7","_col9","_col11"]
+                            <-Map 11 [SIMPLE_EDGE] vectorized
+                              SHUFFLE [RS_112]
+                                PartitionCols:_col0
+                                Select Operator [SEL_111] (rows=51333 width=1436)
+                                  Output:["_col0","_col1"]
+                                  Filter Operator [FIL_110] (rows=51333 width=1436)
+                                    predicate:(i_current_price BETWEEN 0.99 AND 1.49 and i_item_sk is not null)
+                                    TableScan [TS_9] (rows=462000 width=1436)
+                                      default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_current_price"]
+                            <-Reducer 3 [SIMPLE_EDGE]
+                              SHUFFLE [RS_21]
+                                PartitionCols:_col2
+                                Merge Join Operator [MERGEJOIN_99] (rows=348467716 width=135)
+                                  Conds:RS_18._col0=RS_104._col0(Inner),Output:["_col1","_col2","_col4","_col7","_col9"]
+                                <-Map 9 [SIMPLE_EDGE] vectorized
+                                  SHUFFLE [RS_104]
+                                    PartitionCols:_col0
+                                    Select Operator [SEL_103] (rows=8116 width=1119)
+                                      Output:["_col0","_col1"]
+                                      Filter Operator [FIL_102] (rows=8116 width=1119)
+                                        predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-09 00:00:00' AND TIMESTAMP'1998-05-08 00:00:00' and d_date_sk is not null)
+                                        TableScan [TS_6] (rows=73049 width=1119)
+                                          default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
+                                <-Reducer 2 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_18]
+                                    PartitionCols:_col0
+                                    Merge Join Operator [MERGEJOIN_98] (rows=316788826 width=135)
+                                      Conds:RS_128._col2, _col3=RS_131._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col7"]
+                                    <-Map 1 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_128]
+                                        PartitionCols:_col2, _col3
+                                        Select Operator [SEL_127] (rows=287989836 width=135)
+                                          Output:["_col0","_col1","_col2","_col3","_col4"]
+                                          Filter Operator [FIL_126] (rows=287989836 width=135)
+                                            predicate:((cs_item_sk BETWEEN DynamicValue(RS_22_item_i_item_sk_min) AND DynamicValue(RS_22_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_22_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and (cs_warehouse_sk BETWEEN DynamicValue(RS_25_warehouse_w_warehouse_sk_min) AND DynamicValue(RS_25_warehouse_w_warehouse_sk_max) and in_bloom_filter(cs_warehouse_sk, DynamicValue(RS_25_warehouse_w_warehouse_sk_bloom_filter))) and cs_item_sk is not null and cs_sold_date_sk is not null and cs_warehouse_sk is not null)
+                                            TableScan [TS_0] (rows=287989836 width=135)
+                                              default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_warehouse_sk","cs_item_sk","cs_order_number","cs_sales_price"]
+                                            <-Reducer 10 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_109]
+                                                Group By Operator [GBY_108] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_107]
+                                                    Group By Operator [GBY_106] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_105] (rows=8116 width=1119)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_103]
+                                            <-Reducer 12 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_117]
+                                                Group By Operator [GBY_116] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 11 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_115]
+                                                    Group By Operator [GBY_114] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_113] (rows=51333 width=1436)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_111]
+                                            <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_125]
+                                                Group By Operator [GBY_124] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_123]
+                                                    Group By Operator [GBY_122] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_121] (rows=27 width=1029)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_119]
+                                    <-Map 8 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_131]
+                                        PartitionCols:_col0, _col1
+                                        Select Operator [SEL_130] (rows=28798881 width=106)
+                                          Output:["_col0","_col1","_col2"]
+                                          Filter Operator [FIL_129] (rows=28798881 width=106)
+                                            predicate:cr_item_sk is not null
+                                            TableScan [TS_3] (rows=28798881 width=106)
+                                              default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_refunded_cash"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query43.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query43.q.out b/ql/src/test/results/clientpositive/perf/tez/query43.q.out
index 394f728..afa3363 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query43.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query43.q.out
@@ -50,76 +50,78 @@ Stage-0
     limit:100
     Stage-1
       Reducer 5 vectorized
-      File Output Operator [FS_78]
-        Limit [LIM_77] (rows=100 width=88)
+      File Output Operator [FS_79]
+        Limit [LIM_78] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_76] (rows=348477374 width=88)
+          Select Operator [SEL_77] (rows=348477374 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
           <-Reducer 4 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_75]
-              Group By Operator [GBY_74] (rows=348477374 width=88)
+            SHUFFLE [RS_76]
+              Group By Operator [GBY_75] (rows=348477374 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)"],keys:KEY._col0, KEY._col1
               <-Reducer 3 [SIMPLE_EDGE]
                 SHUFFLE [RS_18]
                   PartitionCols:_col0, _col1
                   Group By Operator [GBY_17] (rows=696954748 width=88)
                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)","sum(_col5)","sum(_col6)","sum(_col7)","sum(_col8)"],keys:_col0, _col1
-                    Select Operator [SEL_15] (rows=696954748 width=88)
-                      Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
-                      Merge Join Operator [MERGEJOIN_54] (rows=696954748 width=88)
-                        Conds:RS_12._col1=RS_65._col0(Inner),Output:["_col2","_col5","_col7","_col8"]
-                      <-Map 8 [SIMPLE_EDGE] vectorized
-                        PARTITION_ONLY_SHUFFLE [RS_65]
-                          PartitionCols:_col0
-                          Select Operator [SEL_64] (rows=852 width=1910)
-                            Output:["_col0","_col1","_col2"]
-                            Filter Operator [FIL_63] (rows=852 width=1910)
-                              predicate:((s_gmt_offset = -6) and s_store_sk is not null)
-                              TableScan [TS_6] (rows=1704 width=1910)
-                                default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id","s_store_name","s_gmt_offset"]
-                      <-Reducer 2 [SIMPLE_EDGE]
-                        SHUFFLE [RS_12]
-                          PartitionCols:_col1
-                          Merge Join Operator [MERGEJOIN_53] (rows=633595212 width=88)
-                            Conds:RS_73._col0=RS_57._col0(Inner),Output:["_col1","_col2","_col5"]
-                          <-Map 6 [SIMPLE_EDGE] vectorized
-                            PARTITION_ONLY_SHUFFLE [RS_57]
-                              PartitionCols:_col0
-                              Select Operator [SEL_56] (rows=36524 width=1119)
-                                Output:["_col0","_col2"]
-                                Filter Operator [FIL_55] (rows=36524 width=1119)
-                                  predicate:((d_year = 1998) and d_date_sk is not null)
-                                  TableScan [TS_3] (rows=73049 width=1119)
-                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_day_name"]
-                          <-Map 1 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_73]
-                              PartitionCols:_col0
-                              Select Operator [SEL_72] (rows=575995635 width=88)
-                                Output:["_col0","_col1","_col2"]
-                                Filter Operator [FIL_71] (rows=575995635 width=88)
-                                  predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_10_date_dim_d_date_sk_min) AND DynamicValue(RS_10_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_10_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_13_store_s_store_sk_min) AND DynamicValue(RS_13_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_13_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
-                                  TableScan [TS_0] (rows=575995635 width=88)
-                                    default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_store_sk","ss_sales_price"]
-                                  <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                    BROADCAST [RS_62]
-                                      Group By Operator [GBY_61] (rows=1 width=12)
-                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                      <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_60]
-                                          Group By Operator [GBY_59] (rows=1 width=12)
-                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                            Select Operator [SEL_58] (rows=36524 width=1119)
-                                              Output:["_col0"]
-                                               Please refer to the previous Select Operator [SEL_56]
-                                  <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                    BROADCAST [RS_70]
-                                      Group By Operator [GBY_69] (rows=1 width=12)
-                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                      <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_68]
-                                          Group By Operator [GBY_67] (rows=1 width=12)
-                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                            Select Operator [SEL_66] (rows=852 width=1910)
-                                              Output:["_col0"]
-                                               Please refer to the previous Select Operator [SEL_64]
+                    Top N Key Operator [TNK_33] (rows=696954748 width=88)
+                      keys:_col0, _col1,sort order:++,top n:100
+                      Select Operator [SEL_15] (rows=696954748 width=88)
+                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
+                        Merge Join Operator [MERGEJOIN_55] (rows=696954748 width=88)
+                          Conds:RS_12._col1=RS_66._col0(Inner),Output:["_col2","_col5","_col7","_col8"]
+                        <-Map 8 [SIMPLE_EDGE] vectorized
+                          PARTITION_ONLY_SHUFFLE [RS_66]
+                            PartitionCols:_col0
+                            Select Operator [SEL_65] (rows=852 width=1910)
+                              Output:["_col0","_col1","_col2"]
+                              Filter Operator [FIL_64] (rows=852 width=1910)
+                                predicate:((s_gmt_offset = -6) and s_store_sk is not null)
+                                TableScan [TS_6] (rows=1704 width=1910)
+                                  default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id","s_store_name","s_gmt_offset"]
+                        <-Reducer 2 [SIMPLE_EDGE]
+                          SHUFFLE [RS_12]
+                            PartitionCols:_col1
+                            Merge Join Operator [MERGEJOIN_54] (rows=633595212 width=88)
+                              Conds:RS_74._col0=RS_58._col0(Inner),Output:["_col1","_col2","_col5"]
+                            <-Map 6 [SIMPLE_EDGE] vectorized
+                              PARTITION_ONLY_SHUFFLE [RS_58]
+                                PartitionCols:_col0
+                                Select Operator [SEL_57] (rows=36524 width=1119)
+                                  Output:["_col0","_col2"]
+                                  Filter Operator [FIL_56] (rows=36524 width=1119)
+                                    predicate:((d_year = 1998) and d_date_sk is not null)
+                                    TableScan [TS_3] (rows=73049 width=1119)
+                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_day_name"]
+                            <-Map 1 [SIMPLE_EDGE] vectorized
+                              SHUFFLE [RS_74]
+                                PartitionCols:_col0
+                                Select Operator [SEL_73] (rows=575995635 width=88)
+                                  Output:["_col0","_col1","_col2"]
+                                  Filter Operator [FIL_72] (rows=575995635 width=88)
+                                    predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_10_date_dim_d_date_sk_min) AND DynamicValue(RS_10_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_10_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_13_store_s_store_sk_min) AND DynamicValue(RS_13_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_13_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
+                                    TableScan [TS_0] (rows=575995635 width=88)
+                                      default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_store_sk","ss_sales_price"]
+                                    <-Reducer 7 [BROADCAST_EDGE] vectorized
+                                      BROADCAST [RS_63]
+                                        Group By Operator [GBY_62] (rows=1 width=12)
+                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                        <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_61]
+                                            Group By Operator [GBY_60] (rows=1 width=12)
+                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                              Select Operator [SEL_59] (rows=36524 width=1119)
+                                                Output:["_col0"]
+                                                 Please refer to the previous Select Operator [SEL_57]
+                                    <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                      BROADCAST [RS_71]
+                                        Group By Operator [GBY_70] (rows=1 width=12)
+                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                        <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_69]
+                                            Group By Operator [GBY_68] (rows=1 width=12)
+                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                              Select Operator [SEL_67] (rows=852 width=1910)
+                                                Output:["_col0"]
+                                                 Please refer to the previous Select Operator [SEL_65]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query45.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query45.q.out b/ql/src/test/results/clientpositive/perf/tez/query45.q.out
index c9d820b..edb047d 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query45.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query45.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join MERGEJOIN[132][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product
+Warning: Shuffle Join MERGEJOIN[133][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product
 PREHOOK: query: explain
 select  ca_zip, ca_county, sum(ws_sales_price)
  from web_sales, customer, customer_address, date_dim, item
@@ -59,147 +59,149 @@ Stage-0
     limit:100
     Stage-1
       Reducer 6 vectorized
-      File Output Operator [FS_172]
-        Limit [LIM_171] (rows=100 width=152)
+      File Output Operator [FS_173]
+        Limit [LIM_172] (rows=100 width=152)
           Number of rows:100
-          Select Operator [SEL_170] (rows=95833781 width=152)
+          Select Operator [SEL_171] (rows=95833781 width=152)
             Output:["_col0","_col1","_col2"]
           <-Reducer 5 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_169]
-              Group By Operator [GBY_168] (rows=95833781 width=152)
+            SHUFFLE [RS_170]
+              Group By Operator [GBY_169] (rows=95833781 width=152)
                 Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1
               <-Reducer 4 [SIMPLE_EDGE]
                 SHUFFLE [RS_53]
                   PartitionCols:_col0, _col1
                   Group By Operator [GBY_52] (rows=191667562 width=152)
                     Output:["_col0","_col1","_col2"],aggregations:["sum(_col3)"],keys:_col8, _col7
-                    Select Operator [SEL_51] (rows=191667562 width=152)
-                      Output:["_col3","_col7","_col8"]
-                      Filter Operator [FIL_50] (rows=191667562 width=152)
-                        predicate:((substr(_col8, 1, 5)) IN ('85669', '86197', '88274', '83405', '86475', '85392', '85460', '80348', '81792') or CASE WHEN ((_col14 = 0L)) THEN (false) WHEN (_col17 is not null) THEN (true) WHEN (_col13 is null) THEN (null) WHEN ((_col15 < _col14)) THEN (null) ELSE (false) END)
-                        Select Operator [SEL_49] (rows=191667562 width=152)
-                          Output:["_col3","_col7","_col8","_col13","_col14","_col15","_col17"]
-                          Merge Join Operator [MERGEJOIN_132] (rows=191667562 width=152)
-                            Conds:(Inner),Output:["_col3","_col4","_col6","_col8","_col12","_col16","_col17"]
-                          <-Reducer 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                            PARTITION_ONLY_SHUFFLE [RS_167]
-                              Group By Operator [GBY_166] (rows=1 width=16)
-                                Output:["_col0","_col1"],aggregations:["count(VALUE._col0)","count(VALUE._col1)"]
-                              <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_149]
-                                  Group By Operator [GBY_147] (rows=1 width=16)
-                                    Output:["_col0","_col1"],aggregations:["count()","count(i_item_id)"]
-                                    Select Operator [SEL_144] (rows=231000 width=1436)
-                                      Output:["i_item_id"]
-                                      Filter Operator [FIL_141] (rows=231000 width=1436)
-                                        predicate:(i_item_sk) IN (2, 3, 5, 7, 11, 13, 17, 19, 23, 29)
-                                        TableScan [TS_6] (rows=462000 width=1436)
-                                          default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
-                          <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
-                            PARTITION_ONLY_SHUFFLE [RS_46]
-                              Merge Join Operator [MERGEJOIN_131] (rows=191667562 width=135)
-                                Conds:RS_43._col0=RS_44._col6(Inner),Output:["_col3","_col4","_col6","_col8","_col12"]
-                              <-Reducer 10 [SIMPLE_EDGE]
-                                SHUFFLE [RS_44]
-                                  PartitionCols:_col6
-                                  Merge Join Operator [MERGEJOIN_130] (rows=174243235 width=135)
-                                    Conds:RS_29._col0=RS_30._col1(Inner),Output:["_col1","_col3","_col6","_col7"]
-                                  <-Reducer 9 [SIMPLE_EDGE]
-                                    PARTITION_ONLY_SHUFFLE [RS_29]
-                                      PartitionCols:_col0
-                                      Merge Join Operator [MERGEJOIN_128] (rows=508200 width=1436)
-                                        Conds:RS_145._col1=RS_152._col0(Left Outer),Output:["_col0","_col1","_col3"]
-                                      <-Map 8 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_145]
-                                          PartitionCols:_col1
-                                          Select Operator [SEL_142] (rows=462000 width=1436)
-                                            Output:["_col0","_col1"]
-                                            Filter Operator [FIL_139] (rows=462000 width=1436)
-                                              predicate:i_item_sk is not null
-                                               Please refer to the previous TableScan [TS_6]
-                                      <-Reducer 12 [ONE_TO_ONE_EDGE] vectorized
-                                        FORWARD [RS_152]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_151] (rows=115500 width=1436)
-                                            Output:["_col0","_col1"]
-                                            Group By Operator [GBY_150] (rows=115500 width=1436)
-                                              Output:["_col0"],keys:KEY._col0
-                                            <-Map 8 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_148]
-                                                PartitionCols:_col0
-                                                Group By Operator [GBY_146] (rows=231000 width=1436)
-                                                  Output:["_col0"],keys:i_item_id
-                                                  Select Operator [SEL_143] (rows=231000 width=1436)
-                                                    Output:["i_item_id"]
-                                                    Filter Operator [FIL_140] (rows=231000 width=1436)
-                                                      predicate:(i_item_sk) IN (2, 3, 5, 7, 11, 13, 17, 19, 23, 29)
-                                                       Please refer to the previous TableScan [TS_6]
-                                  <-Reducer 15 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_30]
-                                      PartitionCols:_col1
-                                      Merge Join Operator [MERGEJOIN_129] (rows=158402938 width=135)
-                                        Conds:RS_165._col0=RS_155._col0(Inner),Output:["_col1","_col2","_col3"]
-                                      <-Map 16 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_155]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_154] (rows=18262 width=1119)
-                                            Output:["_col0"]
-                                            Filter Operator [FIL_153] (rows=18262 width=1119)
-                                              predicate:((d_qoy = 2) and (d_year = 2000) and d_date_sk is not null)
-                                              TableScan [TS_19] (rows=73049 width=1119)
-                                                default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                                      <-Map 14 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_165]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_164] (rows=144002668 width=135)
-                                            Output:["_col0","_col1","_col2","_col3"]
-                                            Filter Operator [FIL_163] (rows=144002668 width=135)
-                                              predicate:((ws_item_sk BETWEEN DynamicValue(RS_29_item_i_item_sk_min) AND DynamicValue(RS_29_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_29_item_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_23_date_dim_d_date_sk_min) AND DynamicValue(RS_23_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_23_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_item_sk is not null and ws_sold_date_sk is not null)
-                                              TableScan [TS_16] (rows=144002668 width=135)
-                                                default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_bill_customer_sk","ws_sales_price"]
-                                              <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_162]
-                                                  Group By Operator [GBY_161] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Reducer 9 [CUSTOM_SIMPLE_EDGE]
-                                                    PARTITION_ONLY_SHUFFLE [RS_114]
-                                                      Group By Operator [GBY_113] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_112] (rows=508200 width=1436)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_128]
-                                              <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_160]
-                                                  Group By Operator [GBY_159] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 16 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_158]
-                                                      Group By Operator [GBY_157] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_156] (rows=18262 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_154]
-                              <-Reducer 2 [SIMPLE_EDGE]
-                                SHUFFLE [RS_43]
-                                  PartitionCols:_col0
-                                  Merge Join Operator [MERGEJOIN_127] (rows=88000001 width=860)
-                                    Conds:RS_135._col1=RS_138._col0(Inner),Output:["_col0","_col3","_col4"]
-                                  <-Map 1 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_135]
-                                      PartitionCols:_col1
-                                      Select Operator [SEL_134] (rows=80000000 width=860)
-                                        Output:["_col0","_col1"]
-                                        Filter Operator [FIL_133] (rows=80000000 width=860)
-                                          predicate:(c_current_addr_sk is not null and c_customer_sk is not null)
-                                          TableScan [TS_0] (rows=80000000 width=860)
-                                            default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_addr_sk"]
-                                  <-Map 7 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_138]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_137] (rows=40000000 width=1014)
-                                        Output:["_col0","_col1","_col2"]
-                                        Filter Operator [FIL_136] (rows=40000000 width=1014)
-                                          predicate:ca_address_sk is not null
-                                          TableScan [TS_3] (rows=40000000 width=1014)
-                                            default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county","ca_zip"]
+                    Top N Key Operator [TNK_82] (rows=191667562 width=152)
+                      keys:_col8, _col7,sort order:++,top n:100
+                      Select Operator [SEL_51] (rows=191667562 width=152)
+                        Output:["_col3","_col7","_col8"]
+                        Filter Operator [FIL_50] (rows=191667562 width=152)
+                          predicate:((substr(_col8, 1, 5)) IN ('85669', '86197', '88274', '83405', '86475', '85392', '85460', '80348', '81792') or CASE WHEN ((_col14 = 0L)) THEN (false) WHEN (_col17 is not null) THEN (true) WHEN (_col13 is null) THEN (null) WHEN ((_col15 < _col14)) THEN (null) ELSE (false) END)
+                          Select Operator [SEL_49] (rows=191667562 width=152)
+                            Output:["_col3","_col7","_col8","_col13","_col14","_col15","_col17"]
+                            Merge Join Operator [MERGEJOIN_133] (rows=191667562 width=152)
+                              Conds:(Inner),Output:["_col3","_col4","_col6","_col8","_col12","_col16","_col17"]
+                            <-Reducer 13 [CUSTOM_SIMPLE_EDGE] vectorized
+                              PARTITION_ONLY_SHUFFLE [RS_168]
+                                Group By Operator [GBY_167] (rows=1 width=16)
+                                  Output:["_col0","_col1"],aggregations:["count(VALUE._col0)","count(VALUE._col1)"]
+                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                  SHUFFLE [RS_150]
+                                    Group By Operator [GBY_148] (rows=1 width=16)
+                                      Output:["_col0","_col1"],aggregations:["count()","count(i_item_id)"]
+                                      Select Operator [SEL_145] (rows=231000 width=1436)
+                                        Output:["i_item_id"]
+                                        Filter Operator [FIL_142] (rows=231000 width=1436)
+                                          predicate:(i_item_sk) IN (2, 3, 5, 7, 11, 13, 17, 19, 23, 29)
+                                          TableScan [TS_6] (rows=462000 width=1436)
+                                            default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
+                            <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
+                              PARTITION_ONLY_SHUFFLE [RS_46]
+                                Merge Join Operator [MERGEJOIN_132] (rows=191667562 width=135)
+                                  Conds:RS_43._col0=RS_44._col6(Inner),Output:["_col3","_col4","_col6","_col8","_col12"]
+                                <-Reducer 10 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_44]
+                                    PartitionCols:_col6
+                                    Merge Join Operator [MERGEJOIN_131] (rows=174243235 width=135)
+                                      Conds:RS_29._col0=RS_30._col1(Inner),Output:["_col1","_col3","_col6","_col7"]
+                                    <-Reducer 9 [SIMPLE_EDGE]
+                                      PARTITION_ONLY_SHUFFLE [RS_29]
+                                        PartitionCols:_col0
+                                        Merge Join Operator [MERGEJOIN_129] (rows=508200 width=1436)
+                                          Conds:RS_146._col1=RS_153._col0(Left Outer),Output:["_col0","_col1","_col3"]
+                                        <-Map 8 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_146]
+                                            PartitionCols:_col1
+                                            Select Operator [SEL_143] (rows=462000 width=1436)
+                                              Output:["_col0","_col1"]
+                                              Filter Operator [FIL_140] (rows=462000 width=1436)
+                                                predicate:i_item_sk is not null
+                                                 Please refer to the previous TableScan [TS_6]
+                                        <-Reducer 12 [ONE_TO_ONE_EDGE] vectorized
+                                          FORWARD [RS_153]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_152] (rows=115500 width=1436)
+                                              Output:["_col0","_col1"]
+                                              Group By Operator [GBY_151] (rows=115500 width=1436)
+                                                Output:["_col0"],keys:KEY._col0
+                                              <-Map 8 [SIMPLE_EDGE] vectorized
+                                                SHUFFLE [RS_149]
+                                                  PartitionCols:_col0
+                                                  Group By Operator [GBY_147] (rows=231000 width=1436)
+                                                    Output:["_col0"],keys:i_item_id
+                                                    Select Operator [SEL_144] (rows=231000 width=1436)
+                                                      Output:["i_item_id"]
+                                                      Filter Operator [FIL_141] (rows=231000 width=1436)
+                                                        predicate:(i_item_sk) IN (2, 3, 5, 7, 11, 13, 17, 19, 23, 29)
+                                                         Please refer to the previous TableScan [TS_6]
+                                    <-Reducer 15 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_30]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_130] (rows=158402938 width=135)
+                                          Conds:RS_166._col0=RS_156._col0(Inner),Output:["_col1","_col2","_col3"]
+                                        <-Map 16 [SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_156]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_155] (rows=18262 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_154] (rows=18262 width=1119)
+                                                predicate:((d_qoy = 2) and (d_year = 2000) and d_date_sk is not null)
+                                                TableScan [TS_19] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                                        <-Map 14 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_166]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_165] (rows=144002668 width=135)
+                                              Output:["_col0","_col1","_col2","_col3"]
+                                              Filter Operator [FIL_164] (rows=144002668 width=135)
+                                                predicate:((ws_item_sk BETWEEN DynamicValue(RS_29_item_i_item_sk_min) AND DynamicValue(RS_29_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_29_item_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_23_date_dim_d_date_sk_min) AND DynamicValue(RS_23_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_23_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_item_sk is not null and ws_sold_date_sk is not null)
+                                                TableScan [TS_16] (rows=144002668 width=135)
+                                                  default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_bill_customer_sk","ws_sales_price"]
+                                                <-Reducer 11 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_163]
+                                                    Group By Operator [GBY_162] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Reducer 9 [CUSTOM_SIMPLE_EDGE]
+                                                      PARTITION_ONLY_SHUFFLE [RS_115]
+                                                        Group By Operator [GBY_114] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_113] (rows=508200 width=1436)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Merge Join Operator [MERGEJOIN_129]
+                                                <-Reducer 17 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_161]
+                                                    Group By Operator [GBY_160] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 16 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_159]
+                                                        Group By Operator [GBY_158] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_157] (rows=18262 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_155]
+                                <-Reducer 2 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_43]
+                                    PartitionCols:_col0
+                                    Merge Join Operator [MERGEJOIN_128] (rows=88000001 width=860)
+                                      Conds:RS_136._col1=RS_139._col0(Inner),Output:["_col0","_col3","_col4"]
+                                    <-Map 1 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_136]
+                                        PartitionCols:_col1
+                                        Select Operator [SEL_135] (rows=80000000 width=860)
+                                          Output:["_col0","_col1"]
+                                          Filter Operator [FIL_134] (rows=80000000 width=860)
+                                            predicate:(c_current_addr_sk is not null and c_customer_sk is not null)
+                                            TableScan [TS_0] (rows=80000000 width=860)
+                                              default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_addr_sk"]
+                                    <-Map 7 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_139]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_138] (rows=40000000 width=1014)
+                                          Output:["_col0","_col1","_col2"]
+                                          Filter Operator [FIL_137] (rows=40000000 width=1014)
+                                            predicate:ca_address_sk is not null
+                                            TableScan [TS_3] (rows=40000000 width=1014)
+                                              default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county","ca_zip"]
 


[04/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query76.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query76.q.out b/ql/src/test/results/clientpositive/perf/tez/query76.q.out
index b32d7e4..c1d7b97 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query76.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query76.q.out
@@ -70,193 +70,199 @@ Stage-0
     limit:100
     Stage-1
       Reducer 6 vectorized
-      File Output Operator [FS_195]
-        Limit [LIM_194] (rows=100 width=108)
+      File Output Operator [FS_199]
+        Limit [LIM_198] (rows=100 width=108)
           Number of rows:100
-          Select Operator [SEL_193] (rows=304916424 width=108)
+          Select Operator [SEL_197] (rows=304916424 width=108)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
           <-Reducer 5 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_192]
-              Group By Operator [GBY_191] (rows=304916424 width=108)
+            SHUFFLE [RS_196]
+              Group By Operator [GBY_195] (rows=304916424 width=108)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
               <-Union 4 [SIMPLE_EDGE]
                 <-Reducer 12 [CONTAINS]
-                  Reduce Output Operator [RS_163]
+                  Reduce Output Operator [RS_167]
                     PartitionCols:_col0, _col1, _col2, _col3, _col4
-                    Group By Operator [GBY_162] (rows=609832848 width=108)
+                    Group By Operator [GBY_166] (rows=609832848 width=108)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["count()","sum(_col5)"],keys:_col0, _col1, _col2, _col3, _col4
-                      Select Operator [SEL_160] (rows=174233858 width=135)
-                        Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                        Merge Join Operator [MERGEJOIN_159] (rows=174233858 width=135)
-                          Conds:RS_45._col0=RS_213._col0(Inner),Output:["_col3","_col5","_col7","_col8"]
-                        <-Map 21 [SIMPLE_EDGE] vectorized
-                          PARTITION_ONLY_SHUFFLE [RS_213]
-                            PartitionCols:_col0
-                            Select Operator [SEL_212] (rows=73049 width=1119)
-                              Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_211] (rows=73049 width=1119)
-                                predicate:d_date_sk is not null
-                                TableScan [TS_39] (rows=73049 width=1119)
-                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                        <-Reducer 11 [SIMPLE_EDGE]
-                          SHUFFLE [RS_45]
-                            PartitionCols:_col0
-                            Merge Join Operator [MERGEJOIN_147] (rows=158394413 width=135)
-                              Conds:RS_221._col2=RS_170._col0(Inner),Output:["_col0","_col3","_col5"]
-                            <-Map 1 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_170]
-                                PartitionCols:_col0
-                                Select Operator [SEL_165] (rows=462000 width=1436)
-                                  Output:["_col0","_col1"]
-                                  Filter Operator [FIL_164] (rows=462000 width=1436)
-                                    predicate:i_item_sk is not null
-                                    TableScan [TS_0] (rows=462000 width=1436)
-                                      default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_category"]
-                            <-Map 20 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_221]
-                                PartitionCols:_col2
-                                Select Operator [SEL_220] (rows=143994918 width=135)
-                                  Output:["_col0","_col2","_col3"]
-                                  Filter Operator [FIL_219] (rows=143994918 width=135)
-                                    predicate:((cs_item_sk BETWEEN DynamicValue(RS_43_item_i_item_sk_min) AND DynamicValue(RS_43_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_43_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_46_date_dim_d_date_sk_min) AND DynamicValue(RS_46_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_46_date_dim_d_date_sk_bloom_filter))) and cs_item_sk is not null and cs_sold_date_sk is not null and cs_warehouse_sk is null)
-                                    TableScan [TS_33] (rows=287989836 width=135)
-                                      default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_warehouse_sk","cs_item_sk","cs_ext_sales_price"]
-                                    <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                      BROADCAST [RS_210]
-                                        Group By Operator [GBY_209] (rows=1 width=12)
-                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                        <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_177]
-                                            Group By Operator [GBY_174] (rows=1 width=12)
-                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                              Select Operator [SEL_171] (rows=462000 width=1436)
-                                                Output:["_col0"]
-                                                 Please refer to the previous Select Operator [SEL_165]
-                                    <-Reducer 22 [BROADCAST_EDGE] vectorized
-                                      BROADCAST [RS_218]
-                                        Group By Operator [GBY_217] (rows=1 width=12)
-                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                        <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          PARTITION_ONLY_SHUFFLE [RS_216]
-                                            Group By Operator [GBY_215] (rows=1 width=12)
-                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                              Select Operator [SEL_214] (rows=73049 width=1119)
-                                                Output:["_col0"]
-                                                 Please refer to the previous Select Operator [SEL_212]
+                      Top N Key Operator [TNK_165] (rows=609832848 width=108)
+                        keys:_col0, _col1, _col2, _col3, _col4,sort order:+++++,top n:100
+                        Select Operator [SEL_163] (rows=174233858 width=135)
+                          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                          Merge Join Operator [MERGEJOIN_162] (rows=174233858 width=135)
+                            Conds:RS_45._col0=RS_217._col0(Inner),Output:["_col3","_col5","_col7","_col8"]
+                          <-Map 21 [SIMPLE_EDGE] vectorized
+                            PARTITION_ONLY_SHUFFLE [RS_217]
+                              PartitionCols:_col0
+                              Select Operator [SEL_216] (rows=73049 width=1119)
+                                Output:["_col0","_col1","_col2"]
+                                Filter Operator [FIL_215] (rows=73049 width=1119)
+                                  predicate:d_date_sk is not null
+                                  TableScan [TS_39] (rows=73049 width=1119)
+                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                          <-Reducer 11 [SIMPLE_EDGE]
+                            SHUFFLE [RS_45]
+                              PartitionCols:_col0
+                              Merge Join Operator [MERGEJOIN_148] (rows=158394413 width=135)
+                                Conds:RS_225._col2=RS_174._col0(Inner),Output:["_col0","_col3","_col5"]
+                              <-Map 1 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_174]
+                                  PartitionCols:_col0
+                                  Select Operator [SEL_169] (rows=462000 width=1436)
+                                    Output:["_col0","_col1"]
+                                    Filter Operator [FIL_168] (rows=462000 width=1436)
+                                      predicate:i_item_sk is not null
+                                      TableScan [TS_0] (rows=462000 width=1436)
+                                        default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_category"]
+                              <-Map 20 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_225]
+                                  PartitionCols:_col2
+                                  Select Operator [SEL_224] (rows=143994918 width=135)
+                                    Output:["_col0","_col2","_col3"]
+                                    Filter Operator [FIL_223] (rows=143994918 width=135)
+                                      predicate:((cs_item_sk BETWEEN DynamicValue(RS_43_item_i_item_sk_min) AND DynamicValue(RS_43_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_43_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_46_date_dim_d_date_sk_min) AND DynamicValue(RS_46_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_46_date_dim_d_date_sk_bloom_filter))) and cs_item_sk is not null and cs_sold_date_sk is not null and cs_warehouse_sk is null)
+                                      TableScan [TS_33] (rows=287989836 width=135)
+                                        default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_warehouse_sk","cs_item_sk","cs_ext_sales_price"]
+                                      <-Reducer 13 [BROADCAST_EDGE] vectorized
+                                        BROADCAST [RS_214]
+                                          Group By Operator [GBY_213] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                          <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_181]
+                                              Group By Operator [GBY_178] (rows=1 width=12)
+                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                Select Operator [SEL_175] (rows=462000 width=1436)
+                                                  Output:["_col0"]
+                                                   Please refer to the previous Select Operator [SEL_169]
+                                      <-Reducer 22 [BROADCAST_EDGE] vectorized
+                                        BROADCAST [RS_222]
+                                          Group By Operator [GBY_221] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                          <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
+                                            PARTITION_ONLY_SHUFFLE [RS_220]
+                                              Group By Operator [GBY_219] (rows=1 width=12)
+                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                Select Operator [SEL_218] (rows=73049 width=1119)
+                                                  Output:["_col0"]
+                                                   Please refer to the previous Select Operator [SEL_216]
                 <-Reducer 3 [CONTAINS]
-                  Reduce Output Operator [RS_153]
+                  Reduce Output Operator [RS_155]
                     PartitionCols:_col0, _col1, _col2, _col3, _col4
-                    Group By Operator [GBY_152] (rows=609832848 width=108)
+                    Group By Operator [GBY_154] (rows=609832848 width=108)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["count()","sum(_col5)"],keys:_col0, _col1, _col2, _col3, _col4
-                      Select Operator [SEL_150] (rows=348477373 width=88)
-                        Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                        Merge Join Operator [MERGEJOIN_149] (rows=348477373 width=88)
-                          Conds:RS_12._col2=RS_182._col0(Inner),Output:["_col1","_col5","_col7","_col8"]
-                        <-Map 15 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_182]
-                            PartitionCols:_col0
-                            Select Operator [SEL_181] (rows=73049 width=1119)
-                              Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_180] (rows=73049 width=1119)
-                                predicate:d_date_sk is not null
-                                TableScan [TS_6] (rows=73049 width=1119)
-                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                        <-Reducer 2 [SIMPLE_EDGE]
-                          SHUFFLE [RS_12]
-                            PartitionCols:_col2
-                            Merge Join Operator [MERGEJOIN_143] (rows=316797605 width=88)
-                              Conds:RS_166._col0=RS_190._col1(Inner),Output:["_col1","_col2","_col5"]
-                            <-Map 1 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_166]
-                                PartitionCols:_col0
-                                 Please refer to the previous Select Operator [SEL_165]
-                            <-Map 14 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_190]
-                                PartitionCols:_col1
-                                Select Operator [SEL_189] (rows=287997817 width=88)
-                                  Output:["_col0","_col1","_col3"]
-                                  Filter Operator [FIL_188] (rows=287997817 width=88)
-                                    predicate:((ss_item_sk BETWEEN DynamicValue(RS_9_item_i_item_sk_min) AND DynamicValue(RS_9_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_9_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_13_date_dim_d_date_sk_min) AND DynamicValue(RS_13_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_13_date_dim_d_date_sk_bloom_filter))) and ss_addr_sk is null and ss_item_sk is not null and ss_sold_date_sk is not null)
-                                    TableScan [TS_3] (rows=575995635 width=88)
-                                      default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_addr_sk","ss_ext_sales_price"]
-                                    <-Reducer 16 [BROADCAST_EDGE] vectorized
-                                      BROADCAST [RS_187]
-                                        Group By Operator [GBY_186] (rows=1 width=12)
-                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                        <-Map 15 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_185]
-                                            Group By Operator [GBY_184] (rows=1 width=12)
-                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                              Select Operator [SEL_183] (rows=73049 width=1119)
-                                                Output:["_col0"]
-                                                 Please refer to the previous Select Operator [SEL_181]
-                                    <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                      BROADCAST [RS_179]
-                                        Group By Operator [GBY_178] (rows=1 width=12)
-                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                        <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_175]
-                                            Group By Operator [GBY_172] (rows=1 width=12)
-                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                              Select Operator [SEL_167] (rows=462000 width=1436)
-                                                Output:["_col0"]
-                                                 Please refer to the previous Select Operator [SEL_165]
+                      Top N Key Operator [TNK_153] (rows=609832848 width=108)
+                        keys:_col0, _col1, _col2, _col3, _col4,sort order:+++++,top n:100
+                        Select Operator [SEL_151] (rows=348477373 width=88)
+                          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                          Merge Join Operator [MERGEJOIN_150] (rows=348477373 width=88)
+                            Conds:RS_12._col2=RS_186._col0(Inner),Output:["_col1","_col5","_col7","_col8"]
+                          <-Map 15 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_186]
+                              PartitionCols:_col0
+                              Select Operator [SEL_185] (rows=73049 width=1119)
+                                Output:["_col0","_col1","_col2"]
+                                Filter Operator [FIL_184] (rows=73049 width=1119)
+                                  predicate:d_date_sk is not null
+                                  TableScan [TS_6] (rows=73049 width=1119)
+                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                          <-Reducer 2 [SIMPLE_EDGE]
+                            SHUFFLE [RS_12]
+                              PartitionCols:_col2
+                              Merge Join Operator [MERGEJOIN_144] (rows=316797605 width=88)
+                                Conds:RS_170._col0=RS_194._col1(Inner),Output:["_col1","_col2","_col5"]
+                              <-Map 1 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_170]
+                                  PartitionCols:_col0
+                                   Please refer to the previous Select Operator [SEL_169]
+                              <-Map 14 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_194]
+                                  PartitionCols:_col1
+                                  Select Operator [SEL_193] (rows=287997817 width=88)
+                                    Output:["_col0","_col1","_col3"]
+                                    Filter Operator [FIL_192] (rows=287997817 width=88)
+                                      predicate:((ss_item_sk BETWEEN DynamicValue(RS_9_item_i_item_sk_min) AND DynamicValue(RS_9_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_9_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_13_date_dim_d_date_sk_min) AND DynamicValue(RS_13_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_13_date_dim_d_date_sk_bloom_filter))) and ss_addr_sk is null and ss_item_sk is not null and ss_sold_date_sk is not null)
+                                      TableScan [TS_3] (rows=575995635 width=88)
+                                        default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_addr_sk","ss_ext_sales_price"]
+                                      <-Reducer 16 [BROADCAST_EDGE] vectorized
+                                        BROADCAST [RS_191]
+                                          Group By Operator [GBY_190] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                          <-Map 15 [CUSTOM_SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_189]
+                                              Group By Operator [GBY_188] (rows=1 width=12)
+                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                Select Operator [SEL_187] (rows=73049 width=1119)
+                                                  Output:["_col0"]
+                                                   Please refer to the previous Select Operator [SEL_185]
+                                      <-Reducer 7 [BROADCAST_EDGE] vectorized
+                                        BROADCAST [RS_183]
+                                          Group By Operator [GBY_182] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                          <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_179]
+                                              Group By Operator [GBY_176] (rows=1 width=12)
+                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                Select Operator [SEL_171] (rows=462000 width=1436)
+                                                  Output:["_col0"]
+                                                   Please refer to the previous Select Operator [SEL_169]
                 <-Reducer 9 [CONTAINS]
-                  Reduce Output Operator [RS_158]
+                  Reduce Output Operator [RS_161]
                     PartitionCols:_col0, _col1, _col2, _col3, _col4
-                    Group By Operator [GBY_157] (rows=609832848 width=108)
+                    Group By Operator [GBY_160] (rows=609832848 width=108)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["count()","sum(_col5)"],keys:_col0, _col1, _col2, _col3, _col4
-                      Select Operator [SEL_155] (rows=87121617 width=135)
-                        Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                        Merge Join Operator [MERGEJOIN_154] (rows=87121617 width=135)
-                          Conds:RS_28._col0=RS_200._col0(Inner),Output:["_col3","_col5","_col7","_col8"]
-                        <-Map 18 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_200]
-                            PartitionCols:_col0
-                            Select Operator [SEL_199] (rows=73049 width=1119)
-                              Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_198] (rows=73049 width=1119)
-                                predicate:d_date_sk is not null
-                                TableScan [TS_22] (rows=73049 width=1119)
-                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                        <-Reducer 8 [SIMPLE_EDGE]
-                          SHUFFLE [RS_28]
-                            PartitionCols:_col0
-                            Merge Join Operator [MERGEJOIN_145] (rows=79201469 width=135)
-                              Conds:RS_208._col1=RS_168._col0(Inner),Output:["_col0","_col3","_col5"]
-                            <-Map 1 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_168]
-                                PartitionCols:_col0
-                                 Please refer to the previous Select Operator [SEL_165]
-                            <-Map 17 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_208]
-                                PartitionCols:_col1
-                                Select Operator [SEL_207] (rows=72001334 width=135)
-                                  Output:["_col0","_col1","_col3"]
-                                  Filter Operator [FIL_206] (rows=72001334 width=135)
-                                    predicate:((ws_item_sk BETWEEN DynamicValue(RS_26_item_i_item_sk_min) AND DynamicValue(RS_26_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_26_item_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_29_date_dim_d_date_sk_min) AND DynamicValue(RS_29_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_29_date_dim_d_date_sk_bloom_filter))) and ws_item_sk is not null and ws_sold_date_sk is not null and ws_web_page_sk is null)
-                                    TableScan [TS_16] (rows=144002668 width=135)
-                                      default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_web_page_sk","ws_ext_sales_price"]
-                                    <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                      BROADCAST [RS_197]
-                                        Group By Operator [GBY_196] (rows=1 width=12)
-                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                        <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_176]
-                                            Group By Operator [GBY_173] (rows=1 width=12)
-                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                              Select Operator [SEL_169] (rows=462000 width=1436)
-                                                Output:["_col0"]
-                                                 Please refer to the previous Select Operator [SEL_165]
-                                    <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                      BROADCAST [RS_205]
-                                        Group By Operator [GBY_204] (rows=1 width=12)
-                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                        <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_203]
-                                            Group By Operator [GBY_202] (rows=1 width=12)
-                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                              Select Operator [SEL_201] (rows=73049 width=1119)
-                                                Output:["_col0"]
-                                                 Please refer to the previous Select Operator [SEL_199]
+                      Top N Key Operator [TNK_159] (rows=609832848 width=108)
+                        keys:_col0, _col1, _col2, _col3, _col4,sort order:+++++,top n:100
+                        Select Operator [SEL_157] (rows=87121617 width=135)
+                          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                          Merge Join Operator [MERGEJOIN_156] (rows=87121617 width=135)
+                            Conds:RS_28._col0=RS_204._col0(Inner),Output:["_col3","_col5","_col7","_col8"]
+                          <-Map 18 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_204]
+                              PartitionCols:_col0
+                              Select Operator [SEL_203] (rows=73049 width=1119)
+                                Output:["_col0","_col1","_col2"]
+                                Filter Operator [FIL_202] (rows=73049 width=1119)
+                                  predicate:d_date_sk is not null
+                                  TableScan [TS_22] (rows=73049 width=1119)
+                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                          <-Reducer 8 [SIMPLE_EDGE]
+                            SHUFFLE [RS_28]
+                              PartitionCols:_col0
+                              Merge Join Operator [MERGEJOIN_146] (rows=79201469 width=135)
+                                Conds:RS_212._col1=RS_172._col0(Inner),Output:["_col0","_col3","_col5"]
+                              <-Map 1 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_172]
+                                  PartitionCols:_col0
+                                   Please refer to the previous Select Operator [SEL_169]
+                              <-Map 17 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_212]
+                                  PartitionCols:_col1
+                                  Select Operator [SEL_211] (rows=72001334 width=135)
+                                    Output:["_col0","_col1","_col3"]
+                                    Filter Operator [FIL_210] (rows=72001334 width=135)
+                                      predicate:((ws_item_sk BETWEEN DynamicValue(RS_26_item_i_item_sk_min) AND DynamicValue(RS_26_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_26_item_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_29_date_dim_d_date_sk_min) AND DynamicValue(RS_29_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_29_date_dim_d_date_sk_bloom_filter))) and ws_item_sk is not null and ws_sold_date_sk is not null and ws_web_page_sk is null)
+                                      TableScan [TS_16] (rows=144002668 width=135)
+                                        default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_web_page_sk","ws_ext_sales_price"]
+                                      <-Reducer 10 [BROADCAST_EDGE] vectorized
+                                        BROADCAST [RS_201]
+                                          Group By Operator [GBY_200] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                          <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_180]
+                                              Group By Operator [GBY_177] (rows=1 width=12)
+                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                Select Operator [SEL_173] (rows=462000 width=1436)
+                                                  Output:["_col0"]
+                                                   Please refer to the previous Select Operator [SEL_169]
+                                      <-Reducer 19 [BROADCAST_EDGE] vectorized
+                                        BROADCAST [RS_209]
+                                          Group By Operator [GBY_208] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                          <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_207]
+                                              Group By Operator [GBY_206] (rows=1 width=12)
+                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                Select Operator [SEL_205] (rows=73049 width=1119)
+                                                  Output:["_col0"]
+                                                   Please refer to the previous Select Operator [SEL_203]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query77.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query77.q.out b/ql/src/test/results/clientpositive/perf/tez/query77.q.out
index 915d4fd..617a473 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query77.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query77.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join MERGEJOIN[315][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 16' is a cross product
+Warning: Shuffle Join MERGEJOIN[317][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 16' is a cross product
 PREHOOK: query: explain
 with ss as
  (select s_store_sk,
@@ -249,296 +249,302 @@ Stage-0
     limit:100
     Stage-1
       Reducer 8 vectorized
-      File Output Operator [FS_368]
-        Limit [LIM_367] (rows=100 width=163)
+      File Output Operator [FS_372]
+        Limit [LIM_371] (rows=100 width=163)
           Number of rows:100
-          Select Operator [SEL_366] (rows=956329968 width=163)
+          Select Operator [SEL_370] (rows=956329968 width=163)
             Output:["_col0","_col1","_col2","_col3","_col4"]
           <-Reducer 7 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_365]
-              Select Operator [SEL_364] (rows=956329968 width=163)
+            SHUFFLE [RS_369]
+              Select Operator [SEL_368] (rows=956329968 width=163)
                 Output:["_col0","_col1","_col2","_col3","_col4"]
-                Group By Operator [GBY_363] (rows=956329968 width=163)
+                Group By Operator [GBY_367] (rows=956329968 width=163)
                   Output:["_col0","_col1","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2
                 <-Union 6 [SIMPLE_EDGE]
                   <-Reducer 16 [CONTAINS]
-                    Reduce Output Operator [RS_319]
+                    Reduce Output Operator [RS_322]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_318] (rows=1912659936 width=163)
+                      Group By Operator [GBY_321] (rows=1912659936 width=163)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_316] (rows=158394413 width=360)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Merge Join Operator [MERGEJOIN_315] (rows=158394413 width=360)
-                            Conds:(Inner),Output:["_col0","_col1","_col2","_col3","_col4"]
-                          <-Reducer 15 [CUSTOM_SIMPLE_EDGE] vectorized
-                            PARTITION_ONLY_SHUFFLE [RS_375]
-                              Group By Operator [GBY_374] (rows=158394413 width=135)
-                                Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
-                              <-Reducer 14 [SIMPLE_EDGE]
-                                SHUFFLE [RS_55]
-                                  PartitionCols:_col0
-                                  Group By Operator [GBY_54] (rows=316788826 width=135)
-                                    Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col1
-                                    Merge Join Operator [MERGEJOIN_301] (rows=316788826 width=135)
-                                      Conds:RS_373._col0=RS_330._col0(Inner),Output:["_col1","_col2","_col3"]
-                                    <-Map 9 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_330]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_326] (rows=8116 width=1119)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_325] (rows=8116 width=1119)
-                                            predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00' AND TIMESTAMP'1998-09-03 00:00:00' and d_date_sk is not null)
-                                            TableScan [TS_3] (rows=73049 width=1119)
-                                              default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
-                                    <-Map 31 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_373]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_372] (rows=287989836 width=135)
-                                          Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_371] (rows=287989836 width=135)
-                                            predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_51_date_dim_d_date_sk_min) AND DynamicValue(RS_51_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_51_date_dim_d_date_sk_bloom_filter))) and cs_sold_date_sk is not null)
-                                            TableScan [TS_44] (rows=287989836 width=135)
-                                              default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_call_center_sk","cs_ext_sales_price","cs_net_profit"]
-                                            <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_370]
-                                                Group By Operator [GBY_369] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_340]
-                                                    Group By Operator [GBY_337] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_331] (rows=8116 width=1119)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_326]
-                          <-Reducer 19 [CUSTOM_SIMPLE_EDGE] vectorized
-                            PARTITION_ONLY_SHUFFLE [RS_380]
-                              Group By Operator [GBY_379] (rows=1 width=224)
-                                Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-                              <-Reducer 18 [CUSTOM_SIMPLE_EDGE]
-                                PARTITION_ONLY_SHUFFLE [RS_69]
-                                  Group By Operator [GBY_68] (rows=1 width=224)
-                                    Output:["_col0","_col1"],aggregations:["sum(_col1)","sum(_col2)"]
-                                    Merge Join Operator [MERGEJOIN_302] (rows=31678769 width=106)
-                                      Conds:RS_378._col0=RS_332._col0(Inner),Output:["_col1","_col2"]
-                                    <-Map 9 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_332]
-                                        PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_326]
-                                    <-Map 32 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_378]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_377] (rows=28798881 width=106)
-                                          Output:["_col0","_col1","_col2"]
-                                          Filter Operator [FIL_376] (rows=28798881 width=106)
-                                            predicate:cr_returned_date_sk is not null
-                                            TableScan [TS_58] (rows=28798881 width=106)
-                                              default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_returned_date_sk","cr_return_amount","cr_net_loss"]
+                        Top N Key Operator [TNK_320] (rows=637553312 width=163)
+                          keys:_col0, _col1, 0L,sort order:+++,top n:100
+                          Select Operator [SEL_318] (rows=158394413 width=360)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Merge Join Operator [MERGEJOIN_317] (rows=158394413 width=360)
+                              Conds:(Inner),Output:["_col0","_col1","_col2","_col3","_col4"]
+                            <-Reducer 15 [CUSTOM_SIMPLE_EDGE] vectorized
+                              PARTITION_ONLY_SHUFFLE [RS_379]
+                                Group By Operator [GBY_378] (rows=158394413 width=135)
+                                  Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
+                                <-Reducer 14 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_55]
+                                    PartitionCols:_col0
+                                    Group By Operator [GBY_54] (rows=316788826 width=135)
+                                      Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col1
+                                      Merge Join Operator [MERGEJOIN_302] (rows=316788826 width=135)
+                                        Conds:RS_377._col0=RS_334._col0(Inner),Output:["_col1","_col2","_col3"]
+                                      <-Map 9 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_334]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_330] (rows=8116 width=1119)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_329] (rows=8116 width=1119)
+                                              predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00' AND TIMESTAMP'1998-09-03 00:00:00' and d_date_sk is not null)
+                                              TableScan [TS_3] (rows=73049 width=1119)
+                                                default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
+                                      <-Map 31 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_377]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_376] (rows=287989836 width=135)
+                                            Output:["_col0","_col1","_col2","_col3"]
+                                            Filter Operator [FIL_375] (rows=287989836 width=135)
+                                              predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_51_date_dim_d_date_sk_min) AND DynamicValue(RS_51_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_51_date_dim_d_date_sk_bloom_filter))) and cs_sold_date_sk is not null)
+                                              TableScan [TS_44] (rows=287989836 width=135)
+                                                default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_call_center_sk","cs_ext_sales_price","cs_net_profit"]
+                                              <-Reducer 17 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_374]
+                                                  Group By Operator [GBY_373] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_344]
+                                                      Group By Operator [GBY_341] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_335] (rows=8116 width=1119)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_330]
+                            <-Reducer 19 [CUSTOM_SIMPLE_EDGE] vectorized
+                              PARTITION_ONLY_SHUFFLE [RS_384]
+                                Group By Operator [GBY_383] (rows=1 width=224)
+                                  Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
+                                <-Reducer 18 [CUSTOM_SIMPLE_EDGE]
+                                  PARTITION_ONLY_SHUFFLE [RS_69]
+                                    Group By Operator [GBY_68] (rows=1 width=224)
+                                      Output:["_col0","_col1"],aggregations:["sum(_col1)","sum(_col2)"]
+                                      Merge Join Operator [MERGEJOIN_303] (rows=31678769 width=106)
+                                        Conds:RS_382._col0=RS_336._col0(Inner),Output:["_col1","_col2"]
+                                      <-Map 9 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_336]
+                                          PartitionCols:_col0
+                                           Please refer to the previous Select Operator [SEL_330]
+                                      <-Map 32 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_382]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_381] (rows=28798881 width=106)
+                                            Output:["_col0","_col1","_col2"]
+                                            Filter Operator [FIL_380] (rows=28798881 width=106)
+                                              predicate:cr_returned_date_sk is not null
+                                              TableScan [TS_58] (rows=28798881 width=106)
+                                                default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_returned_date_sk","cr_return_amount","cr_net_loss"]
                   <-Reducer 23 [CONTAINS]
-                    Reduce Output Operator [RS_324]
+                    Reduce Output Operator [RS_328]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_323] (rows=1912659936 width=163)
+                      Group By Operator [GBY_327] (rows=1912659936 width=163)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_321] (rows=95833780 width=135)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Merge Join Operator [MERGEJOIN_320] (rows=95833780 width=135)
-                            Conds:RS_396._col0=RS_401._col0(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5"]
-                          <-Reducer 22 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_396]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_395] (rows=87121617 width=135)
-                                Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
-                              <-Reducer 21 [SIMPLE_EDGE]
-                                SHUFFLE [RS_94]
-                                  PartitionCols:_col0
-                                  Group By Operator [GBY_93] (rows=174243235 width=135)
-                                    Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
-                                    Merge Join Operator [MERGEJOIN_304] (rows=174243235 width=135)
-                                      Conds:RS_89._col1=RS_385._col0(Inner),Output:["_col2","_col3","_col6"]
-                                    <-Map 34 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_385]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_384] (rows=4602 width=585)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_383] (rows=4602 width=585)
-                                            predicate:wp_web_page_sk is not null
-                                            TableScan [TS_83] (rows=4602 width=585)
-                                              default@web_page,web_page,Tbl:COMPLETE,Col:NONE,Output:["wp_web_page_sk"]
-                                    <-Reducer 20 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_89]
-                                        PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_303] (rows=158402938 width=135)
-                                          Conds:RS_394._col0=RS_333._col0(Inner),Output:["_col1","_col2","_col3"]
-                                        <-Map 9 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_333]
-                                            PartitionCols:_col0
-                                             Please refer to the previous Select Operator [SEL_326]
-                                        <-Map 33 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_394]
-                                            PartitionCols:_col0
-                                            Select Operator [SEL_393] (rows=144002668 width=135)
-                                              Output:["_col0","_col1","_col2","_col3"]
-                                              Filter Operator [FIL_392] (rows=144002668 width=135)
-                                                predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_87_date_dim_d_date_sk_min) AND DynamicValue(RS_87_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_87_date_dim_d_date_sk_bloom_filter))) and (ws_web_page_sk BETWEEN DynamicValue(RS_90_web_page_wp_web_page_sk_min) AND DynamicValue(RS_90_web_page_wp_web_page_sk_max) and in_bloom_filter(ws_web_page_sk, DynamicValue(RS_90_web_page_wp_web_page_sk_bloom_filter))) and ws_sold_date_sk is not null and ws_web_page_sk is not null)
-                                                TableScan [TS_77] (rows=144002668 width=135)
-                                                  default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_web_page_sk","ws_ext_sales_price","ws_net_profit"]
-                                                <-Reducer 24 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_382]
-                                                    Group By Operator [GBY_381] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                    <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_341]
-                                                        Group By Operator [GBY_338] (rows=1 width=12)
-                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                          Select Operator [SEL_334] (rows=8116 width=1119)
-                                                            Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_326]
-                                                <-Reducer 35 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_391]
-                                                    Group By Operator [GBY_390] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                    <-Map 34 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      PARTITION_ONLY_SHUFFLE [RS_389]
-                                                        Group By Operator [GBY_388] (rows=1 width=12)
-                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                          Select Operator [SEL_386] (rows=4602 width=585)
-                                                            Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_384]
-                          <-Reducer 27 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_401]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_400] (rows=8711072 width=92)
-                                Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
-                              <-Reducer 26 [SIMPLE_EDGE]
-                                SHUFFLE [RS_114]
-                                  PartitionCols:_col0
-                                  Group By Operator [GBY_113] (rows=17422145 width=92)
-                                    Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
-                                    Merge Join Operator [MERGEJOIN_306] (rows=17422145 width=92)
-                                      Conds:RS_109._col1=RS_387._col0(Inner),Output:["_col2","_col3","_col6"]
-                                    <-Map 34 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_387]
-                                        PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_384]
-                                    <-Reducer 25 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_109]
-                                        PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_305] (rows=15838314 width=92)
-                                          Conds:RS_399._col0=RS_335._col0(Inner),Output:["_col1","_col2","_col3"]
-                                        <-Map 9 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_335]
-                                            PartitionCols:_col0
-                                             Please refer to the previous Select Operator [SEL_326]
-                                        <-Map 36 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_399]
-                                            PartitionCols:_col0
-                                            Select Operator [SEL_398] (rows=14398467 width=92)
-                                              Output:["_col0","_col1","_col2","_col3"]
-                                              Filter Operator [FIL_397] (rows=14398467 width=92)
-                                                predicate:(wr_returned_date_sk is not null and wr_web_page_sk is not null)
-                                                TableScan [TS_97] (rows=14398467 width=92)
-                                                  default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_returned_date_sk","wr_web_page_sk","wr_return_amt","wr_net_loss"]
+                        Top N Key Operator [TNK_326] (rows=637553312 width=163)
+                          keys:_col0, _col1, 0L,sort order:+++,top n:100
+                          Select Operator [SEL_324] (rows=95833780 width=135)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Merge Join Operator [MERGEJOIN_323] (rows=95833780 width=135)
+                              Conds:RS_400._col0=RS_405._col0(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5"]
+                            <-Reducer 22 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_400]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_399] (rows=87121617 width=135)
+                                  Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
+                                <-Reducer 21 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_94]
+                                    PartitionCols:_col0
+                                    Group By Operator [GBY_93] (rows=174243235 width=135)
+                                      Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
+                                      Merge Join Operator [MERGEJOIN_305] (rows=174243235 width=135)
+                                        Conds:RS_89._col1=RS_389._col0(Inner),Output:["_col2","_col3","_col6"]
+                                      <-Map 34 [SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_389]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_388] (rows=4602 width=585)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_387] (rows=4602 width=585)
+                                              predicate:wp_web_page_sk is not null
+                                              TableScan [TS_83] (rows=4602 width=585)
+                                                default@web_page,web_page,Tbl:COMPLETE,Col:NONE,Output:["wp_web_page_sk"]
+                                      <-Reducer 20 [SIMPLE_EDGE]
+                                        SHUFFLE [RS_89]
+                                          PartitionCols:_col1
+                                          Merge Join Operator [MERGEJOIN_304] (rows=158402938 width=135)
+                                            Conds:RS_398._col0=RS_337._col0(Inner),Output:["_col1","_col2","_col3"]
+                                          <-Map 9 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_337]
+                                              PartitionCols:_col0
+                                               Please refer to the previous Select Operator [SEL_330]
+                                          <-Map 33 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_398]
+                                              PartitionCols:_col0
+                                              Select Operator [SEL_397] (rows=144002668 width=135)
+                                                Output:["_col0","_col1","_col2","_col3"]
+                                                Filter Operator [FIL_396] (rows=144002668 width=135)
+                                                  predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_87_date_dim_d_date_sk_min) AND DynamicValue(RS_87_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_87_date_dim_d_date_sk_bloom_filter))) and (ws_web_page_sk BETWEEN DynamicValue(RS_90_web_page_wp_web_page_sk_min) AND DynamicValue(RS_90_web_page_wp_web_page_sk_max) and in_bloom_filter(ws_web_page_sk, DynamicValue(RS_90_web_page_wp_web_page_sk_bloom_filter))) and ws_sold_date_sk is not null and ws_web_page_sk is not null)
+                                                  TableScan [TS_77] (rows=144002668 width=135)
+                                                    default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_web_page_sk","ws_ext_sales_price","ws_net_profit"]
+                                                  <-Reducer 24 [BROADCAST_EDGE] vectorized
+                                                    BROADCAST [RS_386]
+                                                      Group By Operator [GBY_385] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                      <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                        SHUFFLE [RS_345]
+                                                          Group By Operator [GBY_342] (rows=1 width=12)
+                                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                            Select Operator [SEL_338] (rows=8116 width=1119)
+                                                              Output:["_col0"]
+                                                               Please refer to the previous Select Operator [SEL_330]
+                                                  <-Reducer 35 [BROADCAST_EDGE] vectorized
+                                                    BROADCAST [RS_395]
+                                                      Group By Operator [GBY_394] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                      <-Map 34 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                        PARTITION_ONLY_SHUFFLE [RS_393]
+                                                          Group By Operator [GBY_392] (rows=1 width=12)
+                                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                            Select Operator [SEL_390] (rows=4602 width=585)
+                                                              Output:["_col0"]
+                                                               Please refer to the previous Select Operator [SEL_388]
+                            <-Reducer 27 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_405]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_404] (rows=8711072 width=92)
+                                  Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
+                                <-Reducer 26 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_114]
+                                    PartitionCols:_col0
+                                    Group By Operator [GBY_113] (rows=17422145 width=92)
+                                      Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
+                                      Merge Join Operator [MERGEJOIN_307] (rows=17422145 width=92)
+                                        Conds:RS_109._col1=RS_391._col0(Inner),Output:["_col2","_col3","_col6"]
+                                      <-Map 34 [SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_391]
+                                          PartitionCols:_col0
+                                           Please refer to the previous Select Operator [SEL_388]
+                                      <-Reducer 25 [SIMPLE_EDGE]
+                                        SHUFFLE [RS_109]
+                                          PartitionCols:_col1
+                                          Merge Join Operator [MERGEJOIN_306] (rows=15838314 width=92)
+                                            Conds:RS_403._col0=RS_339._col0(Inner),Output:["_col1","_col2","_col3"]
+                                          <-Map 9 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_339]
+                                              PartitionCols:_col0
+                                               Please refer to the previous Select Operator [SEL_330]
+                                          <-Map 36 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_403]
+                                              PartitionCols:_col0
+                                              Select Operator [SEL_402] (rows=14398467 width=92)
+                                                Output:["_col0","_col1","_col2","_col3"]
+                                                Filter Operator [FIL_401] (rows=14398467 width=92)
+                                                  predicate:(wr_returned_date_sk is not null and wr_web_page_sk is not null)
+                                                  TableScan [TS_97] (rows=14398467 width=92)
+                                                    default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_returned_date_sk","wr_web_page_sk","wr_return_amt","wr_net_loss"]
                   <-Reducer 5 [CONTAINS]
-                    Reduce Output Operator [RS_314]
+                    Reduce Output Operator [RS_316]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_313] (rows=1912659936 width=163)
+                      Group By Operator [GBY_315] (rows=1912659936 width=163)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_311] (rows=383325119 width=88)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Merge Join Operator [MERGEJOIN_310] (rows=383325119 width=88)
-                            Conds:RS_357._col0=RS_362._col0(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5"]
-                          <-Reducer 13 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_362]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_361] (rows=34842647 width=77)
-                                Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
-                              <-Reducer 12 [SIMPLE_EDGE]
-                                SHUFFLE [RS_37]
-                                  PartitionCols:_col0
-                                  Group By Operator [GBY_36] (rows=69685294 width=77)
-                                    Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
-                                    Merge Join Operator [MERGEJOIN_300] (rows=69685294 width=77)
-                                      Conds:RS_32._col1=RS_348._col0(Inner),Output:["_col2","_col3","_col6"]
-                                    <-Map 28 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_348]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_345] (rows=1704 width=1910)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_344] (rows=1704 width=1910)
-                                            predicate:s_store_sk is not null
-                                            TableScan [TS_6] (rows=1704 width=1910)
-                                              default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk"]
-                                    <-Reducer 11 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_32]
-                                        PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_299] (rows=63350266 width=77)
-                                          Conds:RS_360._col0=RS_329._col0(Inner),Output:["_col1","_col2","_col3"]
-                                        <-Map 9 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_329]
-                                            PartitionCols:_col0
-                                             Please refer to the previous Select Operator [SEL_326]
-                                        <-Map 30 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_360]
-                                            PartitionCols:_col0
-                                            Select Operator [SEL_359] (rows=57591150 width=77)
-                                              Output:["_col0","_col1","_col2","_col3"]
-                                              Filter Operator [FIL_358] (rows=57591150 width=77)
-                                                predicate:(sr_returned_date_sk is not null and sr_store_sk is not null)
-                                                TableScan [TS_20] (rows=57591150 width=77)
-                                                  default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_store_sk","sr_return_amt","sr_net_loss"]
-                          <-Reducer 4 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_357]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_356] (rows=348477374 width=88)
-                                Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
-                              <-Reducer 3 [SIMPLE_EDGE]
-                                SHUFFLE [RS_17]
-                                  PartitionCols:_col0
-                                  Group By Operator [GBY_16] (rows=696954748 width=88)
-                                    Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
-                                    Merge Join Operator [MERGEJOIN_298] (rows=696954748 width=88)
-                                      Conds:RS_12._col1=RS_346._col0(Inner),Output:["_col2","_col3","_col6"]
-                                    <-Map 28 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_346]
-                                        PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_345]
-                                    <-Reducer 2 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_12]
-                                        PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_297] (rows=633595212 width=88)
-                                          Conds:RS_355._col0=RS_327._col0(Inner),Output:["_col1","_col2","_col3"]
-                                        <-Map 9 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_327]
-                                            PartitionCols:_col0
-                                             Please refer to the previous Select Operator [SEL_326]
-                                        <-Map 1 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_355]
-                                            PartitionCols:_col0
-                                            Select Operator [SEL_354] (rows=575995635 width=88)
-                                              Output:["_col0","_col1","_col2","_col3"]
-                                              Filter Operator [FIL_353] (rows=575995635 width=88)
-                                                predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_10_date_dim_d_date_sk_min) AND DynamicValue(RS_10_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_10_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_13_store_s_store_sk_min) AND DynamicValue(RS_13_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_13_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
-                                                TableScan [TS_0] (rows=575995635 width=88)
-                                                  default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_store_sk","ss_ext_sales_price","ss_net_profit"]
-                                                <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_343]
-                                                    Group By Operator [GBY_342] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                    <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_339]
-                                                        Group By Operator [GBY_336] (rows=1 width=12)
-                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                          Select Operator [SEL_328] (rows=8116 width=1119)
-                                                            Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_326]
-                                                <-Reducer 29 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_352]
-                                                    Group By Operator [GBY_351] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                    <-Map 28 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_350]
-                                                        Group By Operator [GBY_349] (rows=1 width=12)
-                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                          Select Operator [SEL_347] (rows=1704 width=1910)
-                                                            Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_345]
+                        Top N Key Operator [TNK_314] (rows=637553312 width=163)
+                          keys:_col0, _col1, 0L,sort order:+++,top n:100
+                          Select Operator [SEL_312] (rows=383325119 width=88)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Merge Join Operator [MERGEJOIN_311] (rows=383325119 width=88)
+                              Conds:RS_361._col0=RS_366._col0(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5"]
+                            <-Reducer 13 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_366]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_365] (rows=34842647 width=77)
+                                  Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
+                                <-Reducer 12 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_37]
+                                    PartitionCols:_col0
+                                    Group By Operator [GBY_36] (rows=69685294 width=77)
+                                      Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
+                                      Merge Join Operator [MERGEJOIN_301] (rows=69685294 width=77)
+                                        Conds:RS_32._col1=RS_352._col0(Inner),Output:["_col2","_col3","_col6"]
+                                      <-Map 28 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_352]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_349] (rows=1704 width=1910)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_348] (rows=1704 width=1910)
+                                              predicate:s_store_sk is not null
+                                              TableScan [TS_6] (rows=1704 width=1910)
+                                                default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk"]
+                                      <-Reducer 11 [SIMPLE_EDGE]
+                                        SHUFFLE [RS_32]
+                                          PartitionCols:_col1
+                                          Merge Join Operator [MERGEJOIN_300] (rows=63350266 width=77)
+                                            Conds:RS_364._col0=RS_333._col0(Inner),Output:["_col1","_col2","_col3"]
+                                          <-Map 9 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_333]
+                                              PartitionCols:_col0
+                                               Please refer to the previous Select Operator [SEL_330]
+                                          <-Map 30 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_364]
+                                              PartitionCols:_col0
+                                              Select Operator [SEL_363] (rows=57591150 width=77)
+                                                Output:["_col0","_col1","_col2","_col3"]
+                                                Filter Operator [FIL_362] (rows=57591150 width=77)
+                                                  predicate:(sr_returned_date_sk is not null and sr_store_sk is not null)
+                                                  TableScan [TS_20] (rows=57591150 width=77)
+                                                    default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_store_sk","sr_return_amt","sr_net_loss"]
+                            <-Reducer 4 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_361]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_360] (rows=348477374 width=88)
+                                  Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
+                                <-Reducer 3 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_17]
+                                    PartitionCols:_col0
+                                    Group By Operator [GBY_16] (rows=696954748 width=88)
+                                      Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
+                                      Merge Join Operator [MERGEJOIN_299] (rows=696954748 width=88)
+                                        Conds:RS_12._col1=RS_350._col0(Inner),Output:["_col2","_col3","_col6"]
+                                      <-Map 28 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_350]
+                                          PartitionCols:_col0
+                                           Please refer to the previous Select Operator [SEL_349]
+                                      <-Reducer 2 [SIMPLE_EDGE]
+                                        SHUFFLE [RS_12]
+                                          PartitionCols:_col1
+                                          Merge Join Operator [MERGEJOIN_298] (rows=633595212 width=88)
+                                            Conds:RS_359._col0=RS_331._col0(Inner),Output:["_col1","_col2","_col3"]
+                                          <-Map 9 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_331]
+                                              PartitionCols:_col0
+                                               Please refer to the previous Select Operator [SEL_330]
+                                          <-Map 1 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_359]
+                                              PartitionCols:_col0
+                                              Select Operator [SEL_358] (rows=575995635 width=88)
+                                                Output:["_col0","_col1","_col2","_col3"]
+                                                Filter Operator [FIL_357] (rows=575995635 width=88)
+                                                  predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_10_date_dim_d_date_sk_min) AND DynamicValue(RS_10_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_10_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_13_store_s_store_sk_min) AND DynamicValue(RS_13_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_13_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
+                                                  TableScan [TS_0] (rows=575995635 width=88)
+                                                    default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_store_sk","ss_ext_sales_price","ss_net_profit"]
+                                                  <-Reducer 10 [BROADCAST_EDGE] vectorized
+                                                    BROADCAST [RS_347]
+                                                      Group By Operator [GBY_346] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                      <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                        SHUFFLE [RS_343]
+                                                          Group By Operator [GBY_340] (rows=1 width=12)
+                                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                            Select Operator [SEL_332] (rows=8116 width=1119)
+                                                              Output:["_col0"]
+                                                               Please refer to the previous Select Operator [SEL_330]
+                                                  <-Reducer 29 [BROADCAST_EDGE] vectorized
+                                                    BROADCAST [RS_356]
+                                                      Group By Operator [GBY_355] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                      <-Map 28 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                        SHUFFLE [RS_354]
+                                                          Group By Operator [GBY_353] (rows=1 width=12)
+                                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                            Select Operator [SEL_351] (rows=1704 width=1910)
+                                                              Output:["_col0"]
+                                                               Please refer to the previous Select Operator [SEL_349]
 


[53/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index 0000000,a7ca05a..35be3c4
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@@ -1,0 -1,335 +1,336 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.common;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.TreeMap;
+ 
+ import com.google.common.collect.ImmutableList;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.fasterxml.jackson.annotation.JsonInclude;
+ import com.fasterxml.jackson.annotation.JsonProperty;
+ import com.fasterxml.jackson.core.JsonGenerator;
+ import com.fasterxml.jackson.core.JsonParser;
+ import com.fasterxml.jackson.core.JsonProcessingException;
+ import com.fasterxml.jackson.databind.DeserializationContext;
+ import com.fasterxml.jackson.databind.JsonDeserializer;
+ import com.fasterxml.jackson.databind.JsonSerializer;
+ import com.fasterxml.jackson.databind.ObjectMapper;
+ import com.fasterxml.jackson.databind.ObjectReader;
+ import com.fasterxml.jackson.databind.ObjectWriter;
+ import com.fasterxml.jackson.databind.SerializerProvider;
+ import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
+ import com.fasterxml.jackson.databind.annotation.JsonSerialize;
+ 
+ 
+ /**
+  * A class that defines the constant strings used by the statistics implementation.
+  */
+ 
+ public class StatsSetupConst {
+ 
+   protected static final Logger LOG = LoggerFactory.getLogger(StatsSetupConst.class.getName());
+ 
+   public enum StatDB {
+     fs {
+       @Override
+       public String getPublisher(Configuration conf) {
+         return "org.apache.hadoop.hive.ql.stats.fs.FSStatsPublisher";
+       }
+ 
+       @Override
+       public String getAggregator(Configuration conf) {
+         return "org.apache.hadoop.hive.ql.stats.fs.FSStatsAggregator";
+       }
+     },
+     custom {
+       @Override
+       public String getPublisher(Configuration conf) {
+         return MetastoreConf.getVar(conf, ConfVars.STATS_DEFAULT_PUBLISHER); }
+       @Override
+       public String getAggregator(Configuration conf) {
+         return MetastoreConf.getVar(conf,  ConfVars.STATS_DEFAULT_AGGREGATOR); }
+     };
+     public abstract String getPublisher(Configuration conf);
+     public abstract String getAggregator(Configuration conf);
+   }
+ 
+   // statistics stored in metastore
+   /**
+    * The name of the statistic Num Files to be published or gathered.
+    */
+   public static final String NUM_FILES = "numFiles";
+ 
+   /**
+    * The name of the statistic Num Partitions to be published or gathered.
+    */
+   public static final String NUM_PARTITIONS = "numPartitions";
+ 
+   /**
+    * The name of the statistic Total Size to be published or gathered.
+    */
+   public static final String TOTAL_SIZE = "totalSize";
+ 
+   /**
+    * The name of the statistic Row Count to be published or gathered.
+    */
+   public static final String ROW_COUNT = "numRows";
+ 
+   public static final String RUN_TIME_ROW_COUNT = "runTimeNumRows";
+ 
+   /**
+    * The name of the statistic Raw Data Size to be published or gathered.
+    */
+   public static final String RAW_DATA_SIZE = "rawDataSize";
+ 
+   /**
+    * The name of the statistic for Number of Erasure Coded Files - to be published or gathered.
+    */
+   public static final String NUM_ERASURE_CODED_FILES = "numFilesErasureCoded";
+ 
+   /**
+    * Temp dir for writing stats from tasks.
+    */
+   public static final String STATS_TMP_LOC = "hive.stats.tmp.loc";
+ 
+   public static final String STATS_FILE_PREFIX = "tmpstats-";
+   /**
+    * List of all supported statistics
+    */
+   public static final List<String> SUPPORTED_STATS = ImmutableList.of(
+       NUM_FILES, ROW_COUNT, TOTAL_SIZE, RAW_DATA_SIZE, NUM_ERASURE_CODED_FILES);
+ 
+   /**
+    * List of all statistics that need to be collected during query execution. These are
+    * statistics that inherently require a scan of the data.
+    */
+   public static final List<String> STATS_REQUIRE_COMPUTE = ImmutableList.of(ROW_COUNT, RAW_DATA_SIZE);
+ 
+   /**
+    * List of statistics that can be collected quickly without requiring a scan of the data.
+    */
+   public static final List<String> FAST_STATS = ImmutableList.of(
+       NUM_FILES, TOTAL_SIZE, NUM_ERASURE_CODED_FILES);
+ 
+   // This string constant is used to indicate to AlterHandler that
+   // alterPartition/alterTable is happening via statsTask or via user.
+   public static final String STATS_GENERATED = "STATS_GENERATED";
+ 
+   public static final String TASK = "TASK";
+ 
+   public static final String USER = "USER";
+ 
+   // This string constant is used by AlterHandler to figure out that it should not attempt to
+   // update stats. It is set by any client-side task which wishes to signal that no stats
+   // update should take place, such as with replication.
+   public static final String DO_NOT_UPDATE_STATS = "DO_NOT_UPDATE_STATS";
+ 
+   //This string constant will be persisted in metastore to indicate whether corresponding
+   //table or partition's statistics and table or partition's column statistics are accurate or not.
+   public static final String COLUMN_STATS_ACCURATE = "COLUMN_STATS_ACCURATE";
+ 
+   public static final String COLUMN_STATS = "COLUMN_STATS";
+ 
+   public static final String BASIC_STATS = "BASIC_STATS";
+ 
+   public static final String CASCADE = "CASCADE";
+ 
+   public static final String TRUE = "true";
+ 
+   public static final String FALSE = "false";
+ 
+   // The parameter keys for the table statistics. Those keys are excluded from 'show create table' command output.
+   public static final List<String> TABLE_PARAMS_STATS_KEYS = ImmutableList.of(
+       COLUMN_STATS_ACCURATE, NUM_FILES, TOTAL_SIZE, ROW_COUNT, RAW_DATA_SIZE, NUM_PARTITIONS,
+       NUM_ERASURE_CODED_FILES);
+ 
+   private static class ColumnStatsAccurate {
+     private static ObjectReader objectReader;
+     private static ObjectWriter objectWriter;
+ 
+     static {
+       ObjectMapper objectMapper = new ObjectMapper();
+       objectReader = objectMapper.readerFor(ColumnStatsAccurate.class);
+       objectWriter = objectMapper.writerFor(ColumnStatsAccurate.class);
+     }
+ 
+     static class BooleanSerializer extends JsonSerializer<Boolean> {
+ 
+       @Override
+       public void serialize(Boolean value, JsonGenerator jsonGenerator,
+           SerializerProvider serializerProvider) throws IOException {
+         jsonGenerator.writeString(value.toString());
+       }
+     }
+ 
+     static class BooleanDeserializer extends JsonDeserializer<Boolean> {
+ 
+       public Boolean deserialize(JsonParser jsonParser,
+           DeserializationContext deserializationContext)
+               throws IOException {
+         return Boolean.valueOf(jsonParser.getValueAsString());
+       }
+     }
+ 
+     @JsonInclude(JsonInclude.Include.NON_DEFAULT)
+     @JsonSerialize(using = BooleanSerializer.class)
+     @JsonDeserialize(using = BooleanDeserializer.class)
+     @JsonProperty(BASIC_STATS)
+     boolean basicStats;
+ 
+     @JsonInclude(JsonInclude.Include.NON_EMPTY)
+     @JsonProperty(COLUMN_STATS)
+     @JsonSerialize(contentUsing = BooleanSerializer.class)
+     @JsonDeserialize(contentUsing = BooleanDeserializer.class)
+     TreeMap<String, Boolean> columnStats = new TreeMap<>();
+ 
+   }
+ 
+   public static boolean areBasicStatsUptoDate(Map<String, String> params) {
+     if (params == null) {
+       return false;
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     return stats.basicStats;
+   }
+ 
+   public static boolean areColumnStatsUptoDate(Map<String, String> params, String colName) {
+     if (params == null) {
+       return false;
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     return stats.columnStats.containsKey(colName);
+   }
+ 
+   // It will only throw JSONException when stats.put(BASIC_STATS, TRUE)
+   // has duplicate key, which is not possible
+   // note that set basic stats false will wipe out column stats too.
+   public static void setBasicStatsState(Map<String, String> params, String setting) {
+     if (setting.equals(FALSE)) {
+       if (params!=null && params.containsKey(COLUMN_STATS_ACCURATE)) {
+         params.remove(COLUMN_STATS_ACCURATE);
+       }
+       return;
+     }
+     if (params == null) {
+       throw new RuntimeException("params are null...cant set columnstatstate!");
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     stats.basicStats = true;
+     try {
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       throw new RuntimeException("can't serialize column stats", e);
+     }
+   }
+ 
+   public static void setColumnStatsState(Map<String, String> params, List<String> colNames) {
+     if (params == null) {
+       throw new RuntimeException("params are null...cant set columnstatstate!");
+     }
+     if (colNames == null) {
+       return;
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+ 
+     for (String colName : colNames) {
+       if (!stats.columnStats.containsKey(colName)) {
+         stats.columnStats.put(colName, true);
+       }
+     }
+     try {
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       LOG.trace(e.getMessage());
+     }
+   }
+ 
+   public static boolean canColumnStatsMerge(Map<String, String> params, String colName) {
+     if (params == null) {
+       return false;
+     }
++    // TODO: should this also check that the basic flag is valid?
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     return stats.columnStats.containsKey(colName);
+   }
 -  
++
+   public static void clearColumnStatsState(Map<String, String> params) {
+     if (params == null) {
+       return;
+     }
+ 
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     stats.columnStats.clear();
+ 
+     try {
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       LOG.trace(e.getMessage());
+     }
+   }
+ 
+   public static void removeColumnStatsState(Map<String, String> params, List<String> colNames) {
+     if (params == null) {
+       return;
+     }
+     try {
+       ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+       for (String string : colNames) {
+         stats.columnStats.remove(string);
+       }
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       LOG.trace(e.getMessage());
+     }
+   }
+ 
+   public static void setStatsStateForCreateTable(Map<String, String> params,
+       List<String> cols, String setting) {
+     if (TRUE.equals(setting)) {
+       for (String stat : StatsSetupConst.SUPPORTED_STATS) {
+         params.put(stat, "0");
+       }
+     }
+     setBasicStatsState(params, setting);
+     if (TRUE.equals(setting)) {
+       setColumnStatsState(params, cols);
+     }
+   }
 -  
++
+   private static ColumnStatsAccurate parseStatsAcc(String statsAcc) {
+     if (statsAcc == null) {
+       return new ColumnStatsAccurate();
+     }
+     try {
+       return ColumnStatsAccurate.objectReader.readValue(statsAcc);
+     } catch (Exception e) {
+       ColumnStatsAccurate ret = new ColumnStatsAccurate();
+       if (TRUE.equalsIgnoreCase(statsAcc)) {
+         ret.basicStats = true;
+       }
+       return ret;
+     }
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index 0000000,050dca9..f3dc264
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@@ -1,0 -1,202 +1,204 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.List;
+ 
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ 
+ /**
+  * Interface for Alter Table and Alter Partition code
+  */
+ public interface AlterHandler extends Configurable {
+ 
+   /**
+    * @deprecated As of release 2.2.0. Replaced by {@link #alterTable(RawStore, Warehouse, String,
+    * String, String, Table, EnvironmentContext, IHMSHandler)}
+    *
+    * handles alter table, the changes could be cascaded to partitions if applicable
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    *          Hive Warehouse where table data is stored
+    * @param catName
+    *          catalog of the table being altered
+    * @param dbname
+    *          database of the table being altered
+    * @param name
+    *          original name of the table being altered. same as
+    *          <i>newTable.tableName</i> if alter op is not a rename.
+    * @param newTable
+    *          new table object
+    * @throws InvalidOperationException
+    *           thrown if the newTable object is invalid
+    * @throws MetaException
+    *           thrown if there is any other error
+    */
+   @Deprecated
+   default void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
+     String name, Table newTable, EnvironmentContext envContext)
+       throws InvalidOperationException, MetaException {
 -    alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null);
++    alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null, -1, null);
+   }
+ 
+   /**
+    * handles alter table, the changes could be cascaded to partitions if applicable
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    *          Hive Warehouse where table data is stored
+    * @param catName catalog of the table being altered
+    * @param dbname
+    *          database of the table being altered
+    * @param name
+    *          original name of the table being altered. same as
+    *          <i>newTable.tableName</i> if alter op is not a rename.
+    * @param newTable
+    *          new table object
+    * @param handler
+    *          HMSHandle object (required to log event notification)
+    * @throws InvalidOperationException
+    *           thrown if the newTable object is invalid
+    * @throws MetaException
+    *           thrown if there is any other error
+    */
+   void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
+       String name, Table newTable, EnvironmentContext envContext,
 -      IHMSHandler handler) throws InvalidOperationException, MetaException;
++      IHMSHandler handler, long txnId, String writeIdList)
++          throws InvalidOperationException, MetaException;
+ 
+   /**
+    * @deprecated As of release 2.2.0.  Replaced by {@link #alterPartition(RawStore, Warehouse, String,
+    * String, List, Partition, EnvironmentContext, IHMSHandler)}
+    *
+    * handles alter partition
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param part_vals
+    *          original values of the partition being altered
+    * @param new_part
+    *          new partition object
+    * @return the altered partition
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   @Deprecated
+   Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+     final String name, final List<String> part_vals, final Partition new_part,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ 
+   /**
+    * handles alter partition
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh physical warehouse class
+    * @param catName catalog name
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param part_vals
+    *          original values of the partition being altered
+    * @param new_part
+    *          new partition object
+    * @param handler
+    *          HMSHandle object (required to log event notification)
+    * @return the altered partition
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName,
+                            final String dbname, final String name, final List<String> part_vals,
+                            final Partition new_part, EnvironmentContext environmentContext,
 -                           IHMSHandler handler)
++                           IHMSHandler handler, long txnId, String validWriteIds)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ 
+   /**
+    * @deprecated As of release 3.0.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String,
+    * String, String, List, EnvironmentContext, IHMSHandler)}
+    *
+    * handles alter partitions
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param new_parts
+    *          new partition list
+    * @return the altered partition list
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   @Deprecated
+   List<Partition> alterPartitions(final RawStore msdb, Warehouse wh,
+     final String dbname, final String name, final List<Partition> new_parts,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ 
+   /**
+    * handles alter partitions
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param new_parts
+    *          new partition list
+    * @param handler
+    *          HMSHandle object (required to log event notification)
+    * @return the altered partition list
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
+     final String dbname, final String name, final List<Partition> new_parts,
 -    EnvironmentContext environmentContext,IHMSHandler handler)
++    EnvironmentContext environmentContext, long txnId, String writeIdList, long writeId,
++    IHMSHandler handler)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 0000000,93ac74c..719f001
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@@ -1,0 -1,948 +1,961 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.collect.Lists;
+ 
+ import org.apache.commons.lang.StringUtils;
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+ 
+ import java.io.IOException;
+ import java.net.URI;
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+ 
+ /**
+  * Hive specific implementation of alter
+  */
+ public class HiveAlterHandler implements AlterHandler {
+ 
+   protected Configuration conf;
+   private static final Logger LOG = LoggerFactory.getLogger(HiveAlterHandler.class
+       .getName());
+ 
+   // hiveConf, getConf and setConf are in this class because AlterHandler extends Configurable.
+   // Always use the configuration from HMS Handler.  Making AlterHandler not extend Configurable
+   // is not in the scope of the fix for HIVE-17942.
+   @Override
+   public Configuration getConf() {
+     return conf;
+   }
+ 
+   @Override
+   @SuppressWarnings("nls")
+   public void setConf(Configuration conf) {
+     this.conf = conf;
+   }
+ 
+   @Override
+   public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
+       String name, Table newt, EnvironmentContext environmentContext,
 -      IHMSHandler handler) throws InvalidOperationException, MetaException {
++      IHMSHandler handler, long txnId, String writeIdList)
++          throws InvalidOperationException, MetaException {
+     catName = normalizeIdentifier(catName);
+     name = name.toLowerCase();
+     dbname = dbname.toLowerCase();
+ 
+     final boolean cascade = environmentContext != null
+         && environmentContext.isSetProperties()
+         && StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(
+             StatsSetupConst.CASCADE));
+     if (newt == null) {
+       throw new InvalidOperationException("New table is null");
+     }
+ 
+     String newTblName = newt.getTableName().toLowerCase();
+     String newDbName = newt.getDbName().toLowerCase();
+ 
+     if (!MetaStoreUtils.validateName(newTblName, handler.getConf())) {
+       throw new InvalidOperationException(newTblName + " is not a valid object name");
+     }
+     String validate = MetaStoreUtils.validateTblColumns(newt.getSd().getCols());
+     if (validate != null) {
+       throw new InvalidOperationException("Invalid column " + validate);
+     }
+ 
+     Path srcPath = null;
+     FileSystem srcFs;
+     Path destPath = null;
+     FileSystem destFs = null;
+ 
+     boolean success = false;
+     boolean dataWasMoved = false;
+     boolean isPartitionedTable = false;
+ 
+     Table oldt = null;
+ 
+     List<TransactionalMetaStoreEventListener> transactionalListeners = handler.getTransactionalListeners();
+     List<MetaStoreEventListener> listeners = handler.getListeners();
+     Map<String, String> txnAlterTableEventResponses = Collections.emptyMap();
+ 
+     try {
+       boolean rename = false;
+       List<Partition> parts;
+ 
+       // Switching tables between catalogs is not allowed.
+       if (!catName.equalsIgnoreCase(newt.getCatName())) {
+         throw new InvalidOperationException("Tables cannot be moved between catalogs, old catalog" +
+             catName + ", new catalog " + newt.getCatName());
+       }
+ 
+       // check if table with the new name already exists
+       if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
 -        if (msdb.getTable(catName, newDbName, newTblName) != null) {
++        if (msdb.getTable(catName, newDbName, newTblName,  -1, null) != null) {
+           throw new InvalidOperationException("new table " + newDbName
+               + "." + newTblName + " already exists");
+         }
+         rename = true;
+       }
+ 
+       msdb.openTransaction();
+       // get old table
 -      oldt = msdb.getTable(catName, dbname, name);
++      // Note: we don't verify stats here; it's done below in alterTableUpdateTableColumnStats.
++      oldt = msdb.getTable(catName, dbname, name, -1, null);
+       if (oldt == null) {
+         throw new InvalidOperationException("table " +
+             TableName.getQualified(catName, dbname, name) + " doesn't exist");
+       }
+ 
+       if (oldt.getPartitionKeysSize() != 0) {
+         isPartitionedTable = true;
+       }
+ 
+       // Views derive the column type from the base table definition.  So the view definition
+       // can be altered to change the column types.  The column type compatibility checks should
+       // be done only for non-views.
+       if (MetastoreConf.getBoolVar(handler.getConf(),
+             MetastoreConf.ConfVars.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES) &&
+           !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())) {
+         // Throws InvalidOperationException if the new column types are not
+         // compatible with the current column types.
+         checkColTypeChangeCompatible(oldt.getSd().getCols(), newt.getSd().getCols());
+       }
+ 
+       //check that partition keys have not changed, except for virtual views
+       //however, allow the partition comments to change
+       boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(),
+           newt.getPartitionKeys());
+ 
+       if(!oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())){
+         if (!partKeysPartiallyEqual) {
+           throw new InvalidOperationException("partition keys can not be changed.");
+         }
+       }
+ 
+       // rename needs change the data location and move the data to the new location corresponding
+       // to the new name if:
+       // 1) the table is not a virtual view, and
+       // 2) the table is not an external table, and
+       // 3) the user didn't change the default location (or new location is empty), and
+       // 4) the table was not initially created with a specified location
+       if (rename
+           && !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())
+           && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0
+             || StringUtils.isEmpty(newt.getSd().getLocation()))
+           && !MetaStoreUtils.isExternalTable(oldt)) {
+         Database olddb = msdb.getDatabase(catName, dbname);
+         // if a table was created in a user specified location using the DDL like
+         // create table tbl ... location ...., it should be treated like an external table
+         // in the table rename, its data location should not be changed. We can check
+         // if the table directory was created directly under its database directory to tell
+         // if it is such a table
+         srcPath = new Path(oldt.getSd().getLocation());
+         String oldtRelativePath = (new Path(olddb.getLocationUri()).toUri())
+             .relativize(srcPath.toUri()).toString();
+         boolean tableInSpecifiedLoc = !oldtRelativePath.equalsIgnoreCase(name)
+             && !oldtRelativePath.equalsIgnoreCase(name + Path.SEPARATOR);
+ 
+         if (!tableInSpecifiedLoc) {
+           srcFs = wh.getFs(srcPath);
+ 
+           // get new location
+           Database db = msdb.getDatabase(catName, newDbName);
+           Path databasePath = constructRenamedPath(wh.getDatabasePath(db), srcPath);
+           destPath = new Path(databasePath, newTblName);
+           destFs = wh.getFs(destPath);
+ 
+           newt.getSd().setLocation(destPath.toString());
+ 
+           // check that destination does not exist otherwise we will be
+           // overwriting data
+           // check that src and dest are on the same file system
+           if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
+             throw new InvalidOperationException("table new location " + destPath
+                 + " is on a different file system than the old location "
+                 + srcPath + ". This operation is not supported");
+           }
+ 
+           try {
+             if (destFs.exists(destPath)) {
+               throw new InvalidOperationException("New location for this table " +
+                   TableName.getQualified(catName, newDbName, newTblName) +
+                       " already exists : " + destPath);
+             }
+             // check that src exists and also checks permissions necessary, rename src to dest
+             if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath,
+                     ReplChangeManager.isSourceOfReplication(olddb))) {
+               dataWasMoved = true;
+             }
+           } catch (IOException | MetaException e) {
+             LOG.error("Alter Table operation for " + dbname + "." + name + " failed.", e);
+             throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name +
+                 " failed to move data due to: '" + getSimpleMessage(e)
+                 + "' See hive log file for details.");
+           }
+ 
+           if (!HiveMetaStore.isRenameAllowed(olddb, db)) {
+             LOG.error("Alter Table operation for " + TableName.getQualified(catName, dbname, name) +
+                     "to new table = " + TableName.getQualified(catName, newDbName, newTblName) + " failed ");
+             throw new MetaException("Alter table not allowed for table " +
+                     TableName.getQualified(catName, dbname, name) +
+                     "to new table = " + TableName.getQualified(catName, newDbName, newTblName));
+           }
+         }
+ 
+         if (isPartitionedTable) {
+           String oldTblLocPath = srcPath.toUri().getPath();
+           String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null;
+ 
+           // also the location field in partition
+           parts = msdb.getPartitions(catName, dbname, name, -1);
+           Map<Partition, ColumnStatistics> columnStatsNeedUpdated = new HashMap<>();
+           for (Partition part : parts) {
+             String oldPartLoc = part.getSd().getLocation();
+             if (dataWasMoved && oldPartLoc.contains(oldTblLocPath)) {
+               URI oldUri = new Path(oldPartLoc).toUri();
+               String newPath = oldUri.getPath().replace(oldTblLocPath, newTblLocPath);
+               Path newPartLocPath = new Path(oldUri.getScheme(), oldUri.getAuthority(), newPath);
+               part.getSd().setLocation(newPartLocPath.toString());
+             }
+             part.setDbName(newDbName);
+             part.setTableName(newTblName);
+             ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
+                 part.getValues(), part.getSd().getCols(), oldt, part, null);
+             if (colStats != null) {
+               columnStatsNeedUpdated.put(part, colStats);
+             }
+           }
 -          msdb.alterTable(catName, dbname, name, newt);
++          // Do not verify stats parameters on a partitioned table.
++          msdb.alterTable(catName, dbname, name, newt, -1, null);
+           // alterPartition is only for changing the partition location in the table rename
+           if (dataWasMoved) {
+ 
+             int partsToProcess = parts.size();
+             int partitionBatchSize = MetastoreConf.getIntVar(handler.getConf(),
+                 MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX);
+             int batchStart = 0;
+             while (partsToProcess > 0) {
+               int batchEnd = Math.min(batchStart + partitionBatchSize, parts.size());
+               List<Partition> partBatch = parts.subList(batchStart, batchEnd);
+               int partBatchSize = partBatch.size();
+               partsToProcess -= partBatchSize;
+               batchStart += partBatchSize;
+               List<List<String>> partValues = new ArrayList<>(partBatchSize);
+               for (Partition part : partBatch) {
+                 partValues.add(part.getValues());
+               }
 -              msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch);
++              msdb.alterPartitions(catName, newDbName, newTblName, partValues,
++                  partBatch, newt.getWriteId(), txnId, writeIdList);
+             }
+           }
+ 
+           for (Entry<Partition, ColumnStatistics> partColStats : columnStatsNeedUpdated.entrySet()) {
+             ColumnStatistics newPartColStats = partColStats.getValue();
+             newPartColStats.getStatsDesc().setDbName(newDbName);
+             newPartColStats.getStatsDesc().setTableName(newTblName);
 -            msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues());
++            msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues(),
++                txnId, writeIdList, newt.getWriteId());
+           }
+         } else {
 -          alterTableUpdateTableColumnStats(msdb, oldt, newt);
++          alterTableUpdateTableColumnStats(
++              msdb, oldt, newt, environmentContext, txnId, writeIdList);
+         }
+       } else {
+         // operations other than table rename
 -
+         if (MetaStoreUtils.requireCalStats(null, null, newt, environmentContext) &&
+             !isPartitionedTable) {
+           Database db = msdb.getDatabase(catName, newDbName);
+           // Update table stats. For partitioned table, we update stats in alterPartition()
+           MetaStoreUtils.updateTableStatsSlow(db, newt, wh, false, true, environmentContext);
+         }
+ 
+         if (isPartitionedTable) {
+           //Currently only column related changes can be cascaded in alter table
+           if(!MetaStoreUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols())) {
+             parts = msdb.getPartitions(catName, dbname, name, -1);
+             for (Partition part : parts) {
+               Partition oldPart = new Partition(part);
+               List<FieldSchema> oldCols = part.getSd().getCols();
+               part.getSd().setCols(newt.getSd().getCols());
+               ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
+                   part.getValues(), oldCols, oldt, part, null);
+               assert(colStats == null);
+               if (cascade) {
 -                msdb.alterPartition(catName, dbname, name, part.getValues(), part);
++                msdb.alterPartition(
++                    catName, dbname, name, part.getValues(), part, txnId, writeIdList);
+               } else {
+                 // update changed properties (stats)
+                 oldPart.setParameters(part.getParameters());
 -                msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart);
++                msdb.alterPartition(
++                    catName, dbname, name, part.getValues(), oldPart, txnId, writeIdList);
+               }
+             }
 -            msdb.alterTable(catName, dbname, name, newt);
++            // Don't validate table-level stats for a partitoned table.
++            msdb.alterTable(catName, dbname, name, newt, -1, null);
+           } else {
+             LOG.warn("Alter table not cascaded to partitions.");
 -            alterTableUpdateTableColumnStats(msdb, oldt, newt);
++            alterTableUpdateTableColumnStats(
++                msdb, oldt, newt, environmentContext, txnId, writeIdList);
+           }
+         } else {
 -          alterTableUpdateTableColumnStats(msdb, oldt, newt);
++          alterTableUpdateTableColumnStats(
++              msdb, oldt, newt, environmentContext, txnId, writeIdList);
+         }
+       }
+ 
+       if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+         txnAlterTableEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventMessage.EventType.ALTER_TABLE,
+                   new AlterTableEvent(oldt, newt, false, true, handler),
+                   environmentContext);
+       }
+       // commit the changes
+       success = msdb.commitTransaction();
+     } catch (InvalidObjectException e) {
+       LOG.debug("Failed to get object from Metastore ", e);
+       throw new InvalidOperationException(
+           "Unable to change partition or table."
+               + " Check metastore logs for detailed stack." + e.getMessage());
+     } catch (InvalidInputException e) {
+         LOG.debug("Accessing Metastore failed due to invalid input ", e);
+         throw new InvalidOperationException(
+             "Unable to change partition or table."
+                 + " Check metastore logs for detailed stack." + e.getMessage());
+     } catch (NoSuchObjectException e) {
+       LOG.debug("Object not found in metastore ", e);
+       throw new InvalidOperationException(
+           "Unable to change partition or table. Database " + dbname + " does not exist"
+               + " Check metastore logs for detailed stack." + e.getMessage());
+     } finally {
+       if (!success) {
+         LOG.error("Failed to alter table " + TableName.getQualified(catName, dbname, name));
+         msdb.rollbackTransaction();
+         if (dataWasMoved) {
+           try {
+             if (destFs.exists(destPath)) {
+               if (!destFs.rename(destPath, srcPath)) {
+                 LOG.error("Failed to restore data from " + destPath + " to " + srcPath
+                     + " in alter table failure. Manual restore is needed.");
+               }
+             }
+           } catch (IOException e) {
+             LOG.error("Failed to restore data from " + destPath + " to " + srcPath
+                 +  " in alter table failure. Manual restore is needed.");
+           }
+         }
+       }
+     }
+ 
+     if (!listeners.isEmpty()) {
+       // I don't think event notifications in case of failures are necessary, but other HMS operations
+       // make this call whether the event failed or succeeded. To make this behavior consistent,
+       // this call is made for failed events also.
+       MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.ALTER_TABLE,
+           new AlterTableEvent(oldt, newt, false, success, handler),
+           environmentContext, txnAlterTableEventResponses, msdb);
+     }
+   }
+ 
+   /**
+    * MetaException that encapsulates error message from RemoteException from hadoop RPC which wrap
+    * the stack trace into e.getMessage() which makes logs/stack traces confusing.
+    * @param ex
+    * @return
+    */
+   String getSimpleMessage(Exception ex) {
+     if(ex instanceof MetaException) {
+       String msg = ex.getMessage();
+       if(msg == null || !msg.contains("\n")) {
+         return msg;
+       }
+       return msg.substring(0, msg.indexOf('\n'));
+     }
+     return ex.getMessage();
+   }
+ 
+   @Override
+   public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+     final String name, final List<String> part_vals, final Partition new_part,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     return alterPartition(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, part_vals, new_part,
 -        environmentContext, null);
++        environmentContext, null, -1, null);
+   }
+ 
+   @Override
 -  public Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName,
 -                                  final String dbname, final String name,
 -                                  final List<String> part_vals, final Partition new_part,
 -                                  EnvironmentContext environmentContext, IHMSHandler handler)
++  public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, String dbname,
++      String name, List<String> part_vals, final Partition new_part,
++      EnvironmentContext environmentContext, IHMSHandler handler, long txnId, String validWriteIds)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     boolean success = false;
+     Partition oldPart;
+     List<TransactionalMetaStoreEventListener> transactionalListeners = null;
+     if (handler != null) {
+       transactionalListeners = handler.getTransactionalListeners();
+     }
+ 
+     // Set DDL time to now if not specified
+     if (new_part.getParameters() == null ||
+         new_part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
+         Integer.parseInt(new_part.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
+       new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
+           .currentTimeMillis() / 1000));
+     }
+ 
+     //alter partition
+     if (part_vals == null || part_vals.size() == 0) {
+       try {
+         msdb.openTransaction();
+ 
 -        Table tbl = msdb.getTable(catName, dbname, name);
++        Table tbl = msdb.getTable(catName, dbname, name,  -1, null);
+         if (tbl == null) {
+           throw new InvalidObjectException(
+               "Unable to alter partition because table or database does not exist.");
+         }
+         oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues());
+         if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
+           // if stats are same, no need to update
+           if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) {
+             MetaStoreUtils.updateBasicState(environmentContext, new_part.getParameters());
+           } else {
+             MetaStoreUtils.updatePartitionStatsFast(
+                 new_part, tbl, wh, false, true, environmentContext, false);
+           }
+         }
+ 
+         // PartitionView does not have SD. We do not need update its column stats
+         if (oldPart.getSd() != null) {
+           updateOrGetPartitionColumnStats(msdb, catName, dbname, name, new_part.getValues(),
+               oldPart.getSd().getCols(), tbl, new_part, null);
+         }
 -        msdb.alterPartition(catName, dbname, name, new_part.getValues(), new_part);
++        msdb.alterPartition(
++            catName, dbname, name, new_part.getValues(), new_part, txnId, validWriteIds);
+         if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                 EventMessage.EventType.ALTER_PARTITION,
+                                                 new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler),
+                                                 environmentContext);
+ 
+ 
+         }
+         success = msdb.commitTransaction();
+       } catch (InvalidObjectException e) {
+         LOG.warn("Alter failed", e);
+         throw new InvalidOperationException("alter is not possible: " + e.getMessage());
+       } catch (NoSuchObjectException e) {
+         //old partition does not exist
+         throw new InvalidOperationException("alter is not possible: " + e.getMessage());
+       } finally {
+         if(!success) {
+           msdb.rollbackTransaction();
+         }
+       }
+       return oldPart;
+     }
+ 
+     //rename partition
+     String oldPartLoc;
+     String newPartLoc;
+     Path srcPath = null;
+     Path destPath = null;
+     FileSystem srcFs;
+     FileSystem destFs = null;
+     boolean dataWasMoved = false;
+     Database db;
+     try {
+       msdb.openTransaction();
 -      Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name);
++      Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name,  -1, null);
+       if (tbl == null) {
+         throw new InvalidObjectException(
+             "Unable to alter partition because table or database does not exist.");
+       }
+       try {
+         oldPart = msdb.getPartition(catName, dbname, name, part_vals);
+       } catch (NoSuchObjectException e) {
+         // this means there is no existing partition
+         throw new InvalidObjectException(
+             "Unable to rename partition because old partition does not exist");
+       }
+ 
+       Partition check_part;
+       try {
+         check_part = msdb.getPartition(catName, dbname, name, new_part.getValues());
+       } catch(NoSuchObjectException e) {
+         // this means there is no existing partition
+         check_part = null;
+       }
+ 
+       if (check_part != null) {
+         throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." +
+             new_part.getValues());
+       }
+ 
+       // when renaming a partition, we should update
+       // 1) partition SD Location
+       // 2) partition column stats if there are any because of part_name field in HMS table PART_COL_STATS
+       // 3) rename the partition directory if it is not an external table
+       if (!tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
++        // TODO: refactor this into a separate method after master merge, this one is too big.
+         try {
+           db = msdb.getDatabase(catName, dbname);
+ 
+           // if tbl location is available use it
+           // else derive the tbl location from database location
+           destPath = wh.getPartitionPath(db, tbl, new_part.getValues());
+           destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation()));
+         } catch (NoSuchObjectException e) {
+           LOG.debug("Didn't find object in metastore ", e);
+           throw new InvalidOperationException(
+             "Unable to change partition or table. Database " + dbname + " does not exist"
+               + " Check metastore logs for detailed stack." + e.getMessage());
+         }
+ 
+         if (destPath != null) {
+           newPartLoc = destPath.toString();
+           oldPartLoc = oldPart.getSd().getLocation();
+           LOG.info("srcPath:" + oldPartLoc);
+           LOG.info("descPath:" + newPartLoc);
+           srcPath = new Path(oldPartLoc);
+           srcFs = wh.getFs(srcPath);
+           destFs = wh.getFs(destPath);
+           // check that src and dest are on the same file system
+           if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
+             throw new InvalidOperationException("New table location " + destPath
+               + " is on a different file system than the old location "
+               + srcPath + ". This operation is not supported.");
+           }
+ 
+           try {
+             if (srcFs.exists(srcPath)) {
+               if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
+                 throw new InvalidOperationException("New location for this table "
+                   + tbl.getDbName() + "." + tbl.getTableName()
+                   + " already exists : " + destPath);
+               }
+               //if destPath's parent path doesn't exist, we should mkdir it
+               Path destParentPath = destPath.getParent();
+               if (!wh.mkdirs(destParentPath)) {
+                   throw new MetaException("Unable to create path " + destParentPath);
+               }
+ 
+               //rename the data directory
+               wh.renameDir(srcPath, destPath, ReplChangeManager.isSourceOfReplication(db));
+               LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done.");
+               dataWasMoved = true;
+             }
+           } catch (IOException e) {
+             LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, e);
+             throw new InvalidOperationException("Unable to access src or dest location for partition "
+                 + tbl.getDbName() + "." + tbl.getTableName() + " " + new_part.getValues());
+           } catch (MetaException me) {
+             LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, me);
+             throw me;
+           }
+           new_part.getSd().setLocation(newPartLoc);
+         }
+       } else {
+         new_part.getSd().setLocation(oldPart.getSd().getLocation());
+       }
+ 
+       if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
+         MetaStoreUtils.updatePartitionStatsFast(
+             new_part, tbl, wh, false, true, environmentContext, false);
+       }
+ 
+       String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues());
+       ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(),
+           oldPart.getSd().getCols(), tbl, new_part, null);
 -      msdb.alterPartition(catName, dbname, name, part_vals, new_part);
++      msdb.alterPartition(catName, dbname, name, part_vals, new_part, txnId, validWriteIds);
+       if (cs != null) {
+         cs.getStatsDesc().setPartName(newPartName);
+         try {
 -          msdb.updatePartitionColumnStatistics(cs, new_part.getValues());
++          msdb.updatePartitionColumnStatistics(cs, new_part.getValues(),
++              txnId, validWriteIds, new_part.getWriteId());
+         } catch (InvalidInputException iie) {
+           throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
+         } catch (NoSuchObjectException nsoe) {
+           // It is ok, ignore
+         }
+       }
+ 
+       if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+         MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                               EventMessage.EventType.ALTER_PARTITION,
+                                               new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler),
+                                               environmentContext);
+       }
+ 
+       success = msdb.commitTransaction();
+     } finally {
+       if (!success) {
+         LOG.error("Failed to rename a partition. Rollback transaction");
+         msdb.rollbackTransaction();
+         if (dataWasMoved) {
+           LOG.error("Revert the data move in renaming a partition.");
+           try {
+             if (destFs.exists(destPath)) {
+               wh.renameDir(destPath, srcPath, false);
+             }
+           } catch (MetaException me) {
+             LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath
+                 +  " in alter partition failure. Manual restore is needed.");
+           } catch (IOException ioe) {
+             LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath
+                 +  " in alter partition failure. Manual restore is needed.");
+           }
+         }
+       }
+     }
+     return oldPart;
+   }
+ 
++  @Deprecated
+   @Override
+   public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname,
+     final String name, final List<Partition> new_parts,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts,
 -        environmentContext, null);
++        environmentContext, -1, null, -1, null);
+   }
+ 
+   @Override
+   public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
+                                          final String dbname, final String name,
+                                          final List<Partition> new_parts,
 -                                         EnvironmentContext environmentContext, IHMSHandler handler)
++                                         EnvironmentContext environmentContext,
++                                         long txnId, String writeIdList, long writeId,
++                                         IHMSHandler handler)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     List<Partition> oldParts = new ArrayList<>();
+     List<List<String>> partValsList = new ArrayList<>();
+     List<TransactionalMetaStoreEventListener> transactionalListeners = null;
+     if (handler != null) {
+       transactionalListeners = handler.getTransactionalListeners();
+     }
+ 
+     boolean success = false;
+     try {
+       msdb.openTransaction();
+ 
 -      Table tbl = msdb.getTable(catName, dbname, name);
++      // Note: should we pass in write ID here? We only update stats on parts so probably not.
++      Table tbl = msdb.getTable(catName, dbname, name,  -1, null);
+       if (tbl == null) {
+         throw new InvalidObjectException(
+             "Unable to alter partitions because table or database does not exist.");
+       }
+       for (Partition tmpPart: new_parts) {
+         // Set DDL time to now if not specified
+         if (tmpPart.getParameters() == null ||
+             tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
+             Integer.parseInt(tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
+           tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
+               .currentTimeMillis() / 1000));
+         }
+ 
+         Partition oldTmpPart = msdb.getPartition(catName, dbname, name, tmpPart.getValues());
+         oldParts.add(oldTmpPart);
+         partValsList.add(tmpPart.getValues());
+ 
+         if (MetaStoreUtils.requireCalStats(oldTmpPart, tmpPart, tbl, environmentContext)) {
+           // Check if stats are same, no need to update
+           if (MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) {
+             MetaStoreUtils.updateBasicState(environmentContext, tmpPart.getParameters());
+           } else {
+             MetaStoreUtils.updatePartitionStatsFast(
+                 tmpPart, tbl, wh, false, true, environmentContext, false);
+           }
+         }
+ 
+         // PartitionView does not have SD and we do not need to update its column stats
+         if (oldTmpPart.getSd() != null) {
+           updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldTmpPart.getValues(),
+               oldTmpPart.getSd().getCols(), tbl, tmpPart, null);
+         }
+       }
+ 
 -      msdb.alterPartitions(catName, dbname, name, partValsList, new_parts);
++      msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, writeId, txnId, writeIdList);
+       Iterator<Partition> oldPartsIt = oldParts.iterator();
+       for (Partition newPart : new_parts) {
+         Partition oldPart;
+         if (oldPartsIt.hasNext()) {
+           oldPart = oldPartsIt.next();
+         } else {
+           throw new InvalidOperationException("Missing old partition corresponding to new partition " +
+               "when invoking MetaStoreEventListener for alterPartitions event.");
+         }
+ 
+         if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                 EventMessage.EventType.ALTER_PARTITION,
+                                                 new AlterPartitionEvent(oldPart, newPart, tbl, false, true, handler));
+         }
+       }
+ 
+       success = msdb.commitTransaction();
+     } catch (InvalidObjectException | NoSuchObjectException e) {
+       throw new InvalidOperationException("Alter partition operation failed: " + e);
+     } finally {
+       if(!success) {
+         msdb.rollbackTransaction();
+       }
+     }
+ 
+     return oldParts;
+   }
+ 
+   private boolean checkPartialPartKeysEqual(List<FieldSchema> oldPartKeys,
+       List<FieldSchema> newPartKeys) {
+     //return true if both are null, or false if one is null and the other isn't
+     if (newPartKeys == null || oldPartKeys == null) {
+       return oldPartKeys == newPartKeys;
+     }
+     if (oldPartKeys.size() != newPartKeys.size()) {
+       return false;
+     }
+     Iterator<FieldSchema> oldPartKeysIter = oldPartKeys.iterator();
+     Iterator<FieldSchema> newPartKeysIter = newPartKeys.iterator();
+     FieldSchema oldFs;
+     FieldSchema newFs;
+     while (oldPartKeysIter.hasNext()) {
+       oldFs = oldPartKeysIter.next();
+       newFs = newPartKeysIter.next();
+       // Alter table can change the type of partition key now.
+       // So check the column name only.
+       if (!oldFs.getName().equals(newFs.getName())) {
+         return false;
+       }
+     }
+ 
+     return true;
+   }
+ 
+   /**
+    * Uses the scheme and authority of the object's current location and the path constructed
+    * using the object's new name to construct a path for the object's new location.
+    */
+   private Path constructRenamedPath(Path defaultNewPath, Path currentPath) {
+     URI currentUri = currentPath.toUri();
+ 
+     return new Path(currentUri.getScheme(), currentUri.getAuthority(),
+         defaultNewPath.toUri().getPath());
+   }
+ 
+   @VisibleForTesting
 -  void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable)
++  void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable,
++      EnvironmentContext ec, long txnId, String validWriteIds)
+       throws MetaException, InvalidObjectException {
+     String catName = normalizeIdentifier(oldTable.isSetCatName() ? oldTable.getCatName() :
+         getDefaultCatalog(conf));
+     String dbName = oldTable.getDbName().toLowerCase();
+     String tableName = normalizeIdentifier(oldTable.getTableName());
+     String newDbName = newTable.getDbName().toLowerCase();
+     String newTableName = normalizeIdentifier(newTable.getTableName());
+ 
+     try {
+       List<FieldSchema> oldCols = oldTable.getSd().getCols();
+       List<FieldSchema> newCols = newTable.getSd().getCols();
+       List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
+       ColumnStatistics colStats = null;
 -      boolean updateColumnStats = true;
 -
 -      // Nothing to update if everything is the same
 -        if (newDbName.equals(dbName) &&
 -            newTableName.equals(tableName) &&
 -            MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
 -          updateColumnStats = false;
++      boolean updateColumnStats = !newDbName.equals(dbName) || !newTableName.equals(tableName)
++          || !MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols);
++      if (updateColumnStats) {
++        List<String> oldColNames = new ArrayList<>(oldCols.size());
++        for (FieldSchema oldCol : oldCols) {
++          oldColNames.add(oldCol.getName());
+         }
+ 
 -        if (updateColumnStats) {
 -          List<String> oldColNames = new ArrayList<>(oldCols.size());
 -          for (FieldSchema oldCol : oldCols) {
 -            oldColNames.add(oldCol.getName());
 -          }
 -
 -          // Collect column stats which need to be rewritten and remove old stats
 -          colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames);
 -          if (colStats == null) {
 -            updateColumnStats = false;
 -          } else {
 -            List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
 -            if (statsObjs != null) {
 -              List<String> deletedCols = new ArrayList<>();
 -              for (ColumnStatisticsObj statsObj : statsObjs) {
 -                boolean found = false;
 -                for (FieldSchema newCol : newCols) {
 -                  if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
 -                      && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
 -                    found = true;
 -                    break;
 -                  }
++        // NOTE: this doesn't check stats being compliant, but the alterTable call below does.
++        //       The worst we can do is delete the stats.
++        // Collect column stats which need to be rewritten and remove old stats.
++        colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames);
++        if (colStats == null) {
++          updateColumnStats = false;
++        } else {
++          List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
++          if (statsObjs != null) {
++            List<String> deletedCols = new ArrayList<>();
++            for (ColumnStatisticsObj statsObj : statsObjs) {
++              boolean found = false;
++              for (FieldSchema newCol : newCols) {
++                if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
++                    && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
++                  found = true;
++                  break;
+                 }
++              }
+ 
 -                if (found) {
 -                  if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
 -                    msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
 -                    newStatsObjs.add(statsObj);
 -                    deletedCols.add(statsObj.getColName());
 -                  }
 -                } else {
++              if (found) {
++                if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
+                   msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
++                  newStatsObjs.add(statsObj);
+                   deletedCols.add(statsObj.getColName());
+                 }
++              } else {
++                msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
++                deletedCols.add(statsObj.getColName());
+               }
 -              StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
+             }
++            StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
+           }
+         }
++      }
+ 
 -        // Change to new table and append stats for the new table
 -        msdb.alterTable(catName, dbName, tableName, newTable);
 -        if (updateColumnStats && !newStatsObjs.isEmpty()) {
 -          ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
 -          statsDesc.setDbName(newDbName);
 -          statsDesc.setTableName(newTableName);
 -          colStats.setStatsObj(newStatsObjs);
 -          msdb.updateTableColumnStatistics(colStats);
 -        }
++      // Change to new table and append stats for the new table
++      msdb.alterTable(catName, dbName, tableName, newTable, txnId, validWriteIds);
++      if (updateColumnStats && !newStatsObjs.isEmpty()) {
++        ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
++        statsDesc.setDbName(newDbName);
++        statsDesc.setTableName(newTableName);
++        colStats.setStatsObj(newStatsObjs);
++        msdb.updateTableColumnStatistics(colStats, txnId, validWriteIds, newTable.getWriteId());
++      }
+     } catch (NoSuchObjectException nsoe) {
+       LOG.debug("Could not find db entry." + nsoe);
+     } catch (InvalidInputException e) {
+       //should not happen since the input were verified before passed in
+       throw new InvalidObjectException("Invalid inputs to update table column stats: " + e);
+     }
+   }
+ 
+   private ColumnStatistics updateOrGetPartitionColumnStats(
+       RawStore msdb, String catName, String dbname, String tblname, List<String> partVals,
+       List<FieldSchema> oldCols, Table table, Partition part, List<FieldSchema> newCols)
+           throws MetaException, InvalidObjectException {
+     ColumnStatistics newPartsColStats = null;
+     try {
+       // if newCols are not specified, use default ones.
+       if (newCols == null) {
+         newCols = part.getSd() == null ? new ArrayList<>() : part.getSd().getCols();
+       }
+       String oldPartName = Warehouse.makePartName(table.getPartitionKeys(), partVals);
+       String newPartName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
+       boolean rename = !part.getDbName().equals(dbname) || !part.getTableName().equals(tblname)
+           || !oldPartName.equals(newPartName);
+ 
+       // do not need to update column stats if alter partition is not for rename or changing existing columns
+       if (!rename && MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
+         return newPartsColStats;
+       }
+       List<String> oldColNames = new ArrayList<>(oldCols.size());
+       for (FieldSchema oldCol : oldCols) {
+         oldColNames.add(oldCol.getName());
+       }
+       List<String> oldPartNames = Lists.newArrayList(oldPartName);
++      // TODO: doesn't take txn stats into account. This method can only remove stats.
+       List<ColumnStatistics> partsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname,
+           oldPartNames, oldColNames);
+       assert (partsColStats.size() <= 1);
+       for (ColumnStatistics partColStats : partsColStats) { //actually only at most one loop
+         List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
+         List<ColumnStatisticsObj> statsObjs = partColStats.getStatsObj();
+         List<String> deletedCols = new ArrayList<>();
+         for (ColumnStatisticsObj statsObj : statsObjs) {
+           boolean found =false;
+           for (FieldSchema newCol : newCols) {
+             if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
+                 && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
+               found = true;
+               break;
+             }
+           }
+           if (found) {
+             if (rename) {
+               msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(),
+                   partVals, statsObj.getColName());
+               newStatsObjs.add(statsObj);
+             }
+           } else {
+             msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(),
+                 partVals, statsObj.getColName());
+             deletedCols.add(statsObj.getColName());
+           }
+         }
+         StatsSetupConst.removeColumnStatsState(part.getParameters(), deletedCols);
+         if (!newStatsObjs.isEmpty()) {
+           partColStats.setStatsObj(newStatsObjs);
+           newPartsColStats = partColStats;
+         }
+       }
+     } catch (NoSuchObjectException nsoe) {
+       // ignore this exception, actually this exception won't be thrown from getPartitionColumnStatistics
+     } catch (InvalidInputException iie) {
+       throw new InvalidObjectException("Invalid input to delete partition column stats." + iie);
+     }
+ 
+     return newPartsColStats;
+   }
+ 
+   private void checkColTypeChangeCompatible(List<FieldSchema> oldCols, List<FieldSchema> newCols)
+       throws InvalidOperationException {
+     List<String> incompatibleCols = new ArrayList<>();
+     int maxCols = Math.min(oldCols.size(), newCols.size());
+     for (int i = 0; i < maxCols; i++) {
+       if (!ColumnType.areColTypesCompatible(
+           ColumnType.getTypeName(oldCols.get(i).getType()),
+           ColumnType.getTypeName(newCols.get(i).getType()))) {
+         incompatibleCols.add(newCols.get(i).getName());
+       }
+     }
+     if (!incompatibleCols.isEmpty()) {
+       throw new InvalidOperationException(
+           "The following columns have types incompatible with the existing " +
+               "columns in their respective positions :\n" +
+               org.apache.commons.lang.StringUtils.join(incompatibleCols, ',')
+       );
+     }
+   }
+ 
+ }


[18/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/851c8aba
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/851c8aba
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/851c8aba

Branch: refs/heads/master-txnstats
Commit: 851c8aba86aa027cc5aa21e8b71e04a1243c35b9
Parents: e867d1c
Author: Teddy Choi <pu...@gmail.com>
Authored: Thu Jul 19 13:55:57 2018 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Thu Jul 19 13:55:57 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |    2 +
 .../test/resources/testconfiguration.properties |    6 +-
 .../hadoop/hive/ql/plan/api/OperatorType.java   |    5 +-
 .../hadoop/hive/ql/exec/KeyWrapperFactory.java  |    2 +-
 .../hadoop/hive/ql/exec/OperatorFactory.java    |    4 +
 .../hadoop/hive/ql/exec/TopNKeyOperator.java    |  214 ++
 .../ql/exec/vector/VectorTopNKeyOperator.java   |  304 +++
 .../hive/ql/optimizer/TopNKeyProcessor.java     |  109 +
 .../hive/ql/optimizer/physical/Vectorizer.java  |   37 +
 .../hadoop/hive/ql/parse/TezCompiler.java       |   27 +
 .../apache/hadoop/hive/ql/plan/TopNKeyDesc.java |  139 ++
 .../hadoop/hive/ql/plan/VectorTopNKeyDesc.java  |   39 +
 ql/src/test/queries/clientpositive/topnkey.q    |   31 +
 .../queries/clientpositive/vector_topnkey.q     |   30 +
 .../clientpositive/llap/bucket_groupby.q.out    |  274 ++-
 .../clientpositive/llap/check_constraint.q.out  |   27 +-
 .../clientpositive/llap/explainuser_1.q.out     |   28 +-
 .../clientpositive/llap/explainuser_2.q.out     |  406 ++--
 .../clientpositive/llap/limit_pushdown.q.out    |  135 +-
 .../clientpositive/llap/limit_pushdown3.q.out   |   89 +-
 .../llap/llap_decimal64_reader.q.out            |   46 +-
 .../clientpositive/llap/offset_limit.q.out      |   27 +-
 .../llap/offset_limit_ppd_optimizer.q.out       |   85 +-
 .../llap/orc_struct_type_vectorization.q.out    |   53 +-
 .../parquet_complex_types_vectorization.q.out   |  159 +-
 .../llap/parquet_map_type_vectorization.q.out   |   53 +-
 .../parquet_struct_type_vectorization.q.out     |   53 +-
 .../results/clientpositive/llap/topnkey.q.out   |  318 +++
 .../llap/vector_cast_constant.q.out             |   55 +-
 .../clientpositive/llap/vector_char_2.q.out     |  110 +-
 .../vector_groupby_grouping_sets_limit.q.out    |  346 +--
 .../llap/vector_groupby_reduce.q.out            |   49 +-
 .../llap/vector_mr_diff_schema_alias.q.out      |   25 +-
 .../llap/vector_reduce_groupby_decimal.q.out    |   53 +-
 .../llap/vector_string_concat.q.out             |   47 +-
 .../clientpositive/llap/vector_topnkey.q.out    |  592 +++++
 .../llap/vectorization_limit.q.out              |   63 +-
 .../clientpositive/perf/tez/query10.q.out       |  346 +--
 .../clientpositive/perf/tez/query14.q.out       | 2198 +++++++++---------
 .../clientpositive/perf/tez/query15.q.out       |  138 +-
 .../clientpositive/perf/tez/query17.q.out       |  372 +--
 .../clientpositive/perf/tez/query25.q.out       |  366 +--
 .../clientpositive/perf/tez/query26.q.out       |  226 +-
 .../clientpositive/perf/tez/query27.q.out       |  230 +-
 .../clientpositive/perf/tez/query29.q.out       |  374 +--
 .../clientpositive/perf/tez/query35.q.out       |  346 +--
 .../clientpositive/perf/tez/query37.q.out       |  142 +-
 .../clientpositive/perf/tez/query40.q.out       |  206 +-
 .../clientpositive/perf/tez/query43.q.out       |  128 +-
 .../clientpositive/perf/tez/query45.q.out       |  272 +--
 .../clientpositive/perf/tez/query49.q.out       |  478 ++--
 .../clientpositive/perf/tez/query5.q.out        |  542 ++---
 .../clientpositive/perf/tez/query50.q.out       |  250 +-
 .../clientpositive/perf/tez/query60.q.out       |  546 ++---
 .../clientpositive/perf/tez/query66.q.out       |  452 ++--
 .../clientpositive/perf/tez/query69.q.out       |  364 +--
 .../clientpositive/perf/tez/query7.q.out        |  226 +-
 .../clientpositive/perf/tez/query76.q.out       |  356 +--
 .../clientpositive/perf/tez/query77.q.out       |  562 ++---
 .../clientpositive/perf/tez/query8.q.out        |  276 +--
 .../clientpositive/perf/tez/query80.q.out       |  756 +++---
 .../clientpositive/perf/tez/query82.q.out       |  142 +-
 .../clientpositive/perf/tez/query99.q.out       |  230 +-
 .../results/clientpositive/tez/topnkey.q.out    |  162 ++
 .../clientpositive/tez/vector_topnkey.q.out     |  162 ++
 .../test/results/clientpositive/topnkey.q.out   |  301 +++
 .../results/clientpositive/vector_topnkey.q.out |  480 ++++
 .../objectinspector/ObjectInspectorUtils.java   |   19 +
 68 files changed, 9530 insertions(+), 6160 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 4ed1636..e630e88 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2233,6 +2233,8 @@ public class HiveConf extends Configuration {
         "If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime\n" +
         "would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op."),
 
+    HIVE_OPTIMIZE_TOPNKEY("hive.optimize.topnkey", true, "Whether to enable top n key optimizer."),
+
     HIVE_SHARED_WORK_OPTIMIZATION("hive.optimize.shared.work", true,
         "Whether to enable shared work optimizer. The optimizer finds scan operator over the same table\n" +
         "and follow-up operators in the query plan and merges them if they meet some preconditions. Tez only."),

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 870a9b6..d5a33bd 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -26,9 +26,11 @@ disabled.query.files=ql_rewrite_gbtoidx.q,\
 minitez.query.files.shared=delete_orig_table.q,\
   orc_merge12.q,\
   orc_vectorization_ppd.q,\
+  topnkey.q,\
   update_orig_table.q,\
   vector_join_part_col_char.q,\
-  vector_non_string_partition.q
+  vector_non_string_partition.q,\
+  vector_topnkey.q
 
 # NOTE: Add tests to minitez only if it is very
 # specific to tez and cannot be added to minillap.
@@ -209,6 +211,7 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\
   subquery_exists.q,\
   subquery_in.q,\
   temp_table.q,\
+  topnkey.q,\
   union2.q,\
   union3.q,\
   union4.q,\
@@ -315,6 +318,7 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\
   vector_reduce_groupby_duplicate_cols.q,\
   vector_string_concat.q,\
   vector_struct_in.q,\
+  vector_topnkey.q,\
   vector_udf_character_length.q,\
   vector_udf_octet_length.q,\
   vector_varchar_4.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java
----------------------------------------------------------------------
diff --git a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java
index a002348..f8328be 100644
--- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java
+++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java
@@ -37,7 +37,8 @@ public enum OperatorType implements org.apache.thrift.TEnum {
   ORCFILEMERGE(22),
   RCFILEMERGE(23),
   MERGEJOIN(24),
-  SPARKPRUNINGSINK(25);
+  SPARKPRUNINGSINK(25),
+  TOPNKEY(26);
 
   private final int value;
 
@@ -110,6 +111,8 @@ public enum OperatorType implements org.apache.thrift.TEnum {
         return MERGEJOIN;
       case 25:
         return SPARKPRUNINGSINK;
+      case 26:
+        return TOPNKEY;
       default:
         return null;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
index 71ee25d..f1bf902 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
@@ -168,7 +168,6 @@ public class KeyWrapperFactory {
     }
   }
 
-  transient Object[] singleEleArray = new Object[1];
   transient StringObjectInspector soi_new, soi_copy;
 
   class TextKeyWrapper extends KeyWrapper {
@@ -180,6 +179,7 @@ public class KeyWrapperFactory {
     int hashcode;
     Object key;
     boolean isCopy;
+    transient Object[] singleEleArray = new Object[1];
 
     public TextKeyWrapper(boolean isCopy) {
       this(-1, null, isCopy);

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
index 7bb6590..b61d37e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorSMBMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorSelectOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorSparkHashTableSinkOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorSparkPartitionPruningSinkOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorTopNKeyOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
 import org.apache.hadoop.hive.ql.exec.vector.reducesink.VectorReduceSinkCommonOperator;
 import org.apache.hadoop.hive.ql.exec.vector.ptf.VectorPTFOperator;
@@ -76,6 +77,7 @@ import org.apache.hadoop.hive.ql.plan.ScriptDesc;
 import org.apache.hadoop.hive.ql.plan.SelectDesc;
 import org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
+import org.apache.hadoop.hive.ql.plan.TopNKeyDesc;
 import org.apache.hadoop.hive.ql.plan.UDTFDesc;
 import org.apache.hadoop.hive.ql.plan.UnionDesc;
 import org.apache.hadoop.hive.ql.plan.VectorDesc;
@@ -126,6 +128,7 @@ public final class OperatorFactory {
     opvec.put(OrcFileMergeDesc.class, OrcFileMergeOperator.class);
     opvec.put(CommonMergeJoinDesc.class, CommonMergeJoinOperator.class);
     opvec.put(ListSinkDesc.class, ListSinkOperator.class);
+    opvec.put(TopNKeyDesc.class, TopNKeyOperator.class);
   }
 
   static {
@@ -143,6 +146,7 @@ public final class OperatorFactory {
     vectorOpvec.put(LimitDesc.class, VectorLimitOperator.class);
     vectorOpvec.put(PTFDesc.class, VectorPTFOperator.class);
     vectorOpvec.put(SparkHashTableSinkDesc.class, VectorSparkHashTableSinkOperator.class);
+    vectorOpvec.put(TopNKeyDesc.class, VectorTopNKeyOperator.class);
   }
 
   public static <T extends OperatorDesc> Operator<T> getVectorOperator(

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNKeyOperator.java
new file mode 100644
index 0000000..3dfeeaf
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNKeyOperator.java
@@ -0,0 +1,214 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.TopNKeyDesc;
+import org.apache.hadoop.hive.ql.plan.api.OperatorType;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+
+import java.io.Serializable;
+import java.util.Comparator;
+import java.util.PriorityQueue;
+
+import static org.apache.hadoop.hive.ql.plan.api.OperatorType.TOPNKEY;
+
+/**
+ * TopNKeyOperator passes rows that contains top N keys only.
+ */
+public class TopNKeyOperator extends Operator<TopNKeyDesc> implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  // Maximum number of keys to hold
+  private transient int topN;
+
+  // Priority queue that holds occurred keys
+  private transient PriorityQueue<KeyWrapper> priorityQueue;
+
+  // Fast key wrapper in input format for fast comparison
+  private transient KeyWrapper keyWrapper;
+
+  // Standard key wrapper in standard format for output
+  private transient KeyWrapper standardKeyWrapper;
+
+  // Maximum number of rows
+  private transient int rowLimit;
+
+  // Current number of rows
+  private transient int rowSize;
+
+  // Rows
+  private transient Object[] rows;
+
+  /** Kryo ctor. */
+  public TopNKeyOperator() {
+    super();
+  }
+
+  public TopNKeyOperator(CompilationOpContext ctx) {
+    super(ctx);
+  }
+
+  public static class KeyWrapperComparator implements Comparator<KeyWrapper> {
+    private ObjectInspector[] objectInspectors1;
+    private ObjectInspector[] objectInspectors2;
+    private boolean[] columnSortOrderIsDesc;
+
+    public KeyWrapperComparator(ObjectInspector[] objectInspectors1, ObjectInspector[]
+        objectInspectors2, boolean[] columnSortOrderIsDesc) {
+      this.objectInspectors1 = objectInspectors1;
+      this.objectInspectors2 = objectInspectors2;
+      this.columnSortOrderIsDesc = columnSortOrderIsDesc;
+    }
+
+    @Override
+    public int compare(KeyWrapper key1, KeyWrapper key2) {
+      return ObjectInspectorUtils.compare(key1.getKeyArray(), objectInspectors1,
+          key2.getKeyArray(), objectInspectors2, columnSortOrderIsDesc);
+    }
+  }
+
+  @Override
+  protected void initializeOp(Configuration hconf) throws HiveException {
+    super.initializeOp(hconf);
+
+    this.topN = conf.getTopN();
+
+    String columnSortOrder = conf.getColumnSortOrder();
+    boolean[] columnSortOrderIsDesc = new boolean[columnSortOrder.length()];
+    for (int i = 0; i < columnSortOrderIsDesc.length; i++) {
+      columnSortOrderIsDesc[i] = (columnSortOrder.charAt(i) == '-');
+    }
+
+    ObjectInspector rowInspector = inputObjInspectors[0];
+    outputObjInspector = ObjectInspectorUtils.getStandardObjectInspector(rowInspector);
+
+    // init keyFields
+    int numKeys = conf.getKeyColumns().size();
+    ExprNodeEvaluator[] keyFields = new ExprNodeEvaluator[numKeys];
+    ObjectInspector[] keyObjectInspectors = new ObjectInspector[numKeys];
+    ExprNodeEvaluator[] standardKeyFields = new ExprNodeEvaluator[numKeys];
+    ObjectInspector[] standardKeyObjectInspectors = new ObjectInspector[numKeys];
+
+    for (int i = 0; i < numKeys; i++) {
+      ExprNodeDesc key = conf.getKeyColumns().get(i);
+      keyFields[i] = ExprNodeEvaluatorFactory.get(key, hconf);
+      keyObjectInspectors[i] = keyFields[i].initialize(rowInspector);
+      standardKeyFields[i] = ExprNodeEvaluatorFactory.get(key, hconf);
+      standardKeyObjectInspectors[i] = standardKeyFields[i].initialize(outputObjInspector);
+    }
+
+    priorityQueue = new PriorityQueue<>(topN + 1, new TopNKeyOperator.KeyWrapperComparator(
+        standardKeyObjectInspectors, standardKeyObjectInspectors, columnSortOrderIsDesc));
+
+    keyWrapper = new KeyWrapperFactory(keyFields, keyObjectInspectors,
+        standardKeyObjectInspectors).getKeyWrapper();
+    standardKeyWrapper = new KeyWrapperFactory(standardKeyFields, standardKeyObjectInspectors,
+        standardKeyObjectInspectors).getKeyWrapper();
+
+    rowLimit = VectorizedRowBatch.DEFAULT_SIZE;
+    rows = new Object[rowLimit];
+    rowSize = 0;
+  }
+
+  @Override
+  public void process(Object row, int tag) throws HiveException {
+    keyWrapper.getNewKey(row, inputObjInspectors[0]);
+    keyWrapper.setHashKey();
+
+    if (!priorityQueue.contains(keyWrapper)) {
+      priorityQueue.offer(keyWrapper.copyKey());
+    }
+    if (priorityQueue.size() > topN) {
+      priorityQueue.poll();
+    }
+
+    rows[rowSize] = ObjectInspectorUtils.copyToStandardObject(row, inputObjInspectors[0]);
+    rowSize++;
+
+    if (rowSize % rowLimit == 0) {
+      processRows();
+    }
+  }
+
+  private void processRows() throws HiveException {
+    for (int i = 0; i < rowSize; i++) {
+      Object row = rows[i];
+
+      standardKeyWrapper.getNewKey(row, outputObjInspector);
+      standardKeyWrapper.setHashKey();
+
+      if (priorityQueue.contains(standardKeyWrapper)) {
+        forward(row, outputObjInspector);
+      }
+    }
+    priorityQueue.clear();
+    rowSize = 0;
+  }
+
+  @Override
+  protected final void closeOp(boolean abort) throws HiveException {
+    processRows();
+    super.closeOp(abort);
+  }
+
+  @Override
+  public String getName() {
+    return getOperatorName();
+  }
+
+  static public String getOperatorName() {
+    return "TNK";
+  }
+
+  @Override
+  public OperatorType getType() {
+    return TOPNKEY;
+  }
+
+  // Because a TopNKeyOperator works like a FilterOperator with top n key condition, its properties
+  // for optimizers has same values. Following methods are same with FilterOperator;
+  // supportSkewJoinOptimization, columnNamesRowResolvedCanBeObtained,
+  // supportAutomaticSortMergeJoin, and supportUnionRemoveOptimization.
+  @Override
+  public boolean supportSkewJoinOptimization() {
+    return true;
+  }
+
+  @Override
+  public boolean columnNamesRowResolvedCanBeObtained() {
+    return true;
+  }
+
+  @Override
+  public boolean supportAutomaticSortMergeJoin() {
+    return true;
+  }
+
+  @Override
+  public boolean supportUnionRemoveOptimization() {
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorTopNKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorTopNKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorTopNKeyOperator.java
new file mode 100644
index 0000000..6f29f88
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorTopNKeyOperator.java
@@ -0,0 +1,304 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.exec.vector;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.primitives.Ints;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
+import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.TopNKeyOperator;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.ql.plan.TopNKeyDesc;
+import org.apache.hadoop.hive.ql.plan.VectorDesc;
+import org.apache.hadoop.hive.ql.plan.VectorTopNKeyDesc;
+import org.apache.hadoop.hive.ql.plan.api.OperatorType;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.PriorityQueue;
+import java.util.Properties;
+
+import static org.apache.hadoop.hive.ql.plan.api.OperatorType.TOPNKEY;
+
+/**
+ * VectorTopNKeyOperator passes rows that contains top N keys only.
+ */
+public class VectorTopNKeyOperator extends Operator<TopNKeyDesc> implements VectorizationOperator {
+
+  private static final long serialVersionUID = 1L;
+
+  private VectorTopNKeyDesc vectorDesc;
+  private VectorizationContext vContext;
+
+  // Key column info
+  private int[] keyColumnNums;
+  private TypeInfo[] keyTypeInfos;
+
+  // Extract row
+  private transient Object[] singleRow;
+  private transient VectorExtractRow vectorExtractRow;
+
+  // Serialization
+  private transient BinarySortableSerDe binarySortableSerDe;
+  private transient StructObjectInspector keyObjectInspector;
+
+  // Batch processing
+  private transient boolean firstBatch;
+  private transient PriorityQueue<Writable> priorityQueue;
+  private transient int[] temporarySelected;
+
+  public VectorTopNKeyOperator(CompilationOpContext ctx, OperatorDesc conf,
+      VectorizationContext vContext, VectorDesc vectorDesc) {
+
+    this(ctx);
+    this.conf = (TopNKeyDesc) conf;
+    this.vContext = vContext;
+    this.vectorDesc = (VectorTopNKeyDesc) vectorDesc;
+
+    VectorExpression[] keyExpressions = this.vectorDesc.getKeyExpressions();
+    final int numKeys = keyExpressions.length;
+    keyColumnNums = new int[numKeys];
+    keyTypeInfos = new TypeInfo[numKeys];
+
+    for (int i = 0; i < numKeys; i++) {
+      keyColumnNums[i] = keyExpressions[i].getOutputColumnNum();
+      keyTypeInfos[i] = keyExpressions[i].getOutputTypeInfo();
+    }
+  }
+
+  /** Kryo ctor. */
+  @VisibleForTesting
+  public VectorTopNKeyOperator() {
+    super();
+  }
+
+  public VectorTopNKeyOperator(CompilationOpContext ctx) {
+    super(ctx);
+  }
+
+  @Override
+  protected void initializeOp(Configuration hconf) throws HiveException {
+    super.initializeOp(hconf);
+
+    VectorExpression.doTransientInit(vectorDesc.getKeyExpressions());
+    for (VectorExpression keyExpression : vectorDesc.getKeyExpressions()) {
+      keyExpression.init(hconf);
+    }
+
+    this.firstBatch = true;
+
+    VectorExpression[] keyExpressions = vectorDesc.getKeyExpressions();
+    final int size = keyExpressions.length;
+    ObjectInspector[] fieldObjectInspectors = new ObjectInspector[size];
+
+    for (int i = 0; i < size; i++) {
+      VectorExpression keyExpression = keyExpressions[i];
+      fieldObjectInspectors[i] = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(
+          keyExpression.getOutputTypeInfo());
+    }
+
+    keyObjectInspector = ObjectInspectorFactory.getStandardStructObjectInspector(
+        this.conf.getKeyColumnNames(), Arrays.asList(fieldObjectInspectors));
+
+    temporarySelected = new int [VectorizedRowBatch.DEFAULT_SIZE];
+  }
+
+  @Override
+  public void process(Object data, int tag) throws HiveException {
+    VectorizedRowBatch batch = (VectorizedRowBatch) data;
+
+    // The selected vector represents selected rows.
+    // Clone the selected vector
+    System.arraycopy(batch.selected, 0, temporarySelected, 0, batch.size);
+    int [] selectedBackup = batch.selected;
+    batch.selected = temporarySelected;
+    int sizeBackup = batch.size;
+    boolean selectedInUseBackup = batch.selectedInUse;
+
+    for (VectorExpression keyExpression : vectorDesc.getKeyExpressions()) {
+      keyExpression.evaluate(batch);
+    }
+
+    if (firstBatch) {
+      vectorExtractRow = new VectorExtractRow();
+      vectorExtractRow.init(keyObjectInspector, Ints.asList(keyColumnNums));
+
+      singleRow = new Object[vectorExtractRow.getCount()];
+      Comparator comparator = Comparator.reverseOrder();
+      priorityQueue = new PriorityQueue<Writable>(comparator);
+
+      try {
+        binarySortableSerDe = new BinarySortableSerDe();
+        Properties properties = new Properties();
+        Joiner joiner = Joiner.on(',');
+        properties.setProperty(serdeConstants.LIST_COLUMNS, joiner.join(conf.getKeyColumnNames()));
+        properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, joiner.join(keyTypeInfos));
+        properties.setProperty(serdeConstants.SERIALIZATION_SORT_ORDER,
+            conf.getColumnSortOrder());
+        binarySortableSerDe.initialize(getConfiguration(), properties);
+      } catch (SerDeException e) {
+        throw new HiveException(e);
+      }
+
+      firstBatch = false;
+    }
+
+    // Clear the priority queue
+    priorityQueue.clear();
+
+    // Get top n keys
+    for (int i = 0; i < batch.size; i++) {
+
+      // Get keys
+      int j;
+      if (batch.selectedInUse) {
+        j = batch.selected[i];
+      } else {
+        j = i;
+      }
+      vectorExtractRow.extractRow(batch, j, singleRow);
+
+      Writable keysWritable;
+      try {
+        keysWritable = binarySortableSerDe.serialize(singleRow, keyObjectInspector);
+      } catch (SerDeException e) {
+        throw new HiveException(e);
+      }
+
+      // Put the copied keys into the priority queue
+      if (!priorityQueue.contains(keysWritable)) {
+        priorityQueue.offer(WritableUtils.clone(keysWritable, getConfiguration()));
+      }
+
+      // Limit the queue size
+      if (priorityQueue.size() > conf.getTopN()) {
+        priorityQueue.poll();
+      }
+    }
+
+    // Filter rows with top n keys
+    int size = 0;
+    int[] selected = new int[batch.selected.length];
+    for (int i = 0; i < batch.size; i++) {
+      int j;
+      if (batch.selectedInUse) {
+        j = batch.selected[i];
+      } else {
+        j = i;
+      }
+
+      // Get keys
+      vectorExtractRow.extractRow(batch, j, singleRow);
+
+      Writable keysWritable;
+      try {
+        keysWritable = binarySortableSerDe.serialize(singleRow, keyObjectInspector);
+      } catch (SerDeException e) {
+        throw new HiveException(e);
+      }
+
+      // Select a row in the priority queue
+      if (priorityQueue.contains(keysWritable)) {
+        selected[size++] = j;
+      }
+    }
+
+    // Apply selection to batch
+    if (batch.size != size) {
+      batch.selectedInUse = true;
+      batch.selected = selected;
+      batch.size = size;
+    }
+
+    // Forward the result
+    if (size > 0) {
+      forward(batch, null, true);
+    }
+
+    // Restore the original selected vector
+    batch.selected = selectedBackup;
+    batch.size = sizeBackup;
+    batch.selectedInUse = selectedInUseBackup;
+  }
+
+  @Override
+  public String getName() {
+    return TopNKeyOperator.getOperatorName();
+  }
+
+  @Override
+  public OperatorType getType() {
+    return TOPNKEY;
+  }
+
+  @Override
+  public VectorizationContext getInputVectorizationContext() {
+    return vContext;
+  }
+
+  @Override
+  public VectorDesc getVectorDesc() {
+    return vectorDesc;
+  }
+
+  // Because a TopNKeyOperator works like a FilterOperator with top n key condition, its properties
+  // for optimizers has same values. Following methods are same with FilterOperator;
+  // supportSkewJoinOptimization, columnNamesRowResolvedCanBeObtained,
+  // supportAutomaticSortMergeJoin, and supportUnionRemoveOptimization.
+  @Override
+  public boolean supportSkewJoinOptimization() {
+    return true;
+  }
+
+  @Override
+  public boolean columnNamesRowResolvedCanBeObtained() {
+    return true;
+  }
+
+  @Override
+  public boolean supportAutomaticSortMergeJoin() {
+    return true;
+  }
+
+  @Override
+  public boolean supportUnionRemoveOptimization() {
+    return true;
+  }
+
+  // Must send on to VectorPTFOperator...
+  @Override
+  public void setNextVectorBatchGroupStatus(boolean isLastGroupBatch) throws HiveException {
+    for (Operator<? extends OperatorDesc> op : childOperators) {
+      op.setNextVectorBatchGroupStatus(isLastGroupBatch);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TopNKeyProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TopNKeyProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TopNKeyProcessor.java
new file mode 100644
index 0000000..721a9b9
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TopNKeyProcessor.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer;
+
+import org.apache.hadoop.hive.ql.exec.GroupByOperator;
+import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.OperatorFactory;
+import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
+import org.apache.hadoop.hive.ql.exec.RowSchema;
+import org.apache.hadoop.hive.ql.exec.TopNKeyOperator;
+import org.apache.hadoop.hive.ql.lib.Node;
+import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
+import org.apache.hadoop.hive.ql.plan.GroupByDesc;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
+import org.apache.hadoop.hive.ql.plan.TopNKeyDesc;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Stack;
+
+/**
+ * TopNKeyProcessor is a processor for TopNKeyOperator. A TopNKeyOperator will be placed between
+ * a GroupByOperator and its following ReduceSinkOperator. If there already is a TopNKeyOperator,
+ * then it will be skipped.
+ */
+public class TopNKeyProcessor implements NodeProcessor {
+  private static final Logger LOG = LoggerFactory.getLogger(TopNKeyProcessor.class);
+
+  public TopNKeyProcessor() {
+  }
+
+  @Override
+  public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+      Object... nodeOutputs) throws SemanticException {
+
+    // Get ReduceSinkOperator
+    ReduceSinkOperator reduceSinkOperator = (ReduceSinkOperator) nd;
+    ReduceSinkDesc reduceSinkDesc = reduceSinkOperator.getConf();
+
+    // Get GroupByOperator
+    GroupByOperator groupByOperator = (GroupByOperator) reduceSinkOperator.getParentOperators().get(0);
+    GroupByDesc groupByDesc = groupByOperator.getConf();
+
+    // Check whether the reduce sink operator contains top n
+    if (!reduceSinkDesc.isOrdering() || reduceSinkDesc.getTopN() < 0) {
+      return null;
+    }
+
+    // Check whether the group by operator is in hash mode
+    if (groupByDesc.getMode() != GroupByDesc.Mode.HASH) {
+      return null;
+    }
+
+    // Check whether the group by operator has distinct aggregations
+    if (groupByDesc.isDistinct()) {
+      return null;
+    }
+
+    // Check whether RS keys are same as GBY keys
+    List<ExprNodeDesc> groupByKeyColumns = groupByDesc.getKeys();
+    List<ExprNodeDesc> mappedColumns = new ArrayList<>();
+    for (ExprNodeDesc columns : reduceSinkDesc.getKeyCols()) {
+      mappedColumns.add(groupByDesc.getColumnExprMap().get(columns.getExprString()));
+    }
+    if (!ExprNodeDescUtils.isSame(mappedColumns, groupByKeyColumns)) {
+      return null;
+    }
+
+    // Check whether there already is a top n key operator
+    Operator<? extends OperatorDesc> parentOperator = groupByOperator.getParentOperators().get(0);
+    if (parentOperator instanceof TopNKeyOperator) {
+      return null;
+    }
+
+    // Insert a new top n key operator between the group by operator and its parent
+    TopNKeyDesc topNKeyDesc = new TopNKeyDesc(reduceSinkDesc.getTopN(), reduceSinkDesc.getOrder(),
+        groupByKeyColumns);
+    Operator<? extends OperatorDesc> newOperator = OperatorFactory.getAndMakeChild(
+        groupByOperator.getCompilationOpContext(), (OperatorDesc) topNKeyDesc,
+        new RowSchema(groupByOperator.getSchema()), groupByOperator.getParentOperators());
+    newOperator.getChildOperators().add(groupByOperator);
+    groupByOperator.getParentOperators().add(newOperator);
+    parentOperator.removeChild(groupByOperator);
+
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 7ec80e6..40bd075 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -124,6 +124,7 @@ import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.PTFDesc;
 import org.apache.hadoop.hive.ql.plan.SelectDesc;
+import org.apache.hadoop.hive.ql.plan.TopNKeyDesc;
 import org.apache.hadoop.hive.ql.plan.VectorAppMasterEventDesc;
 import org.apache.hadoop.hive.ql.plan.VectorDesc;
 import org.apache.hadoop.hive.ql.plan.VectorFileSinkDesc;
@@ -135,6 +136,7 @@ import org.apache.hadoop.hive.ql.plan.VectorTableScanDesc;
 import org.apache.hadoop.hive.ql.plan.VectorGroupByDesc.ProcessingMode;
 import org.apache.hadoop.hive.ql.plan.VectorSparkHashTableSinkDesc;
 import org.apache.hadoop.hive.ql.plan.VectorSparkPartitionPruningSinkDesc;
+import org.apache.hadoop.hive.ql.plan.VectorTopNKeyDesc;
 import org.apache.hadoop.hive.ql.plan.VectorLimitDesc;
 import org.apache.hadoop.hive.ql.plan.VectorMapJoinInfo;
 import org.apache.hadoop.hive.ql.plan.VectorSMBJoinDesc;
@@ -2555,6 +2557,10 @@ public class Vectorizer implements PhysicalPlanResolver {
         desc, "Predicate", VectorExpressionDescriptor.Mode.FILTER, /* allowComplex */ true);
   }
 
+  private boolean validateTopNKeyOperator(TopNKeyOperator op) {
+    List<ExprNodeDesc> keyColumns = op.getConf().getKeyColumns();
+    return validateExprNodeDesc(keyColumns, "Key columns");
+  }
 
   private boolean validateGroupByOperator(GroupByOperator op, boolean isReduce,
       boolean isTezOrSpark, VectorGroupByDesc vectorGroupByDesc) {
@@ -4155,6 +4161,20 @@ public class Vectorizer implements PhysicalPlanResolver {
         vContext, vectorFilterDesc);
   }
 
+  private static Operator<? extends OperatorDesc> vectorizeTopNKeyOperator(
+      Operator<? extends OperatorDesc> topNKeyOperator, VectorizationContext vContext,
+      VectorTopNKeyDesc vectorTopNKeyDesc) throws HiveException {
+
+    TopNKeyDesc topNKeyDesc = (TopNKeyDesc) topNKeyOperator.getConf();
+
+    List<ExprNodeDesc> keyColumns = topNKeyDesc.getKeyColumns();
+    VectorExpression[] keyExpressions = vContext.getVectorExpressions(keyColumns);
+    vectorTopNKeyDesc.setKeyExpressions(keyExpressions);
+    return OperatorFactory.getVectorOperator(
+        topNKeyOperator.getCompilationOpContext(), topNKeyDesc,
+        vContext, vectorTopNKeyDesc);
+  }
+
   private static Class<? extends VectorAggregateExpression> findVecAggrClass(
       Class<? extends VectorAggregateExpression>[] vecAggrClasses,
       String aggregateName, ColumnVector.Type inputColVectorType,
@@ -5051,6 +5071,23 @@ public class Vectorizer implements PhysicalPlanResolver {
             }
           }
           break;
+        case TOPNKEY:
+          {
+            if (!validateTopNKeyOperator((TopNKeyOperator) op)) {
+              throw new VectorizerCannotVectorizeException();
+            }
+
+            VectorTopNKeyDesc vectorTopNKeyDesc = new VectorTopNKeyDesc();
+            vectorOp = vectorizeTopNKeyOperator(op, vContext, vectorTopNKeyDesc);
+            isNative = true;
+            if (vectorTaskColumnInfo != null) {
+              VectorExpression[] keyExpressions = vectorTopNKeyDesc.getKeyExpressions();
+              if (usesVectorUDFAdaptor(keyExpressions)) {
+                vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
+              }
+            }
+          }
+          break;
         case SELECT:
           {
             if (!validateSelectOperator((SelectOperator) op)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
index 1b433c7..1661aec 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hive.ql.exec.tez.TezTask;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.lib.CompositeProcessor;
+import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
 import org.apache.hadoop.hive.ql.lib.Dispatcher;
 import org.apache.hadoop.hive.ql.lib.ForwardWalker;
@@ -77,6 +78,7 @@ import org.apache.hadoop.hive.ql.optimizer.ReduceSinkMapJoinProc;
 import org.apache.hadoop.hive.ql.optimizer.RemoveDynamicPruningBySize;
 import org.apache.hadoop.hive.ql.optimizer.SetReducerParallelism;
 import org.apache.hadoop.hive.ql.optimizer.SharedWorkOptimizer;
+import org.apache.hadoop.hive.ql.optimizer.TopNKeyProcessor;
 import org.apache.hadoop.hive.ql.optimizer.correlation.ReduceSinkJoinDeDuplication;
 import org.apache.hadoop.hive.ql.optimizer.metainfo.annotation.AnnotateWithOpTraits;
 import org.apache.hadoop.hive.ql.optimizer.physical.AnnotateRunTimeStatsOptimizer;
@@ -144,6 +146,10 @@ public class TezCompiler extends TaskCompiler {
     OptimizeTezProcContext procCtx = new OptimizeTezProcContext(conf, pCtx, inputs, outputs);
 
     perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
+    runTopNKeyOptimization(procCtx);
+    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Run top n key optimization");
+
+    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
     // setup dynamic partition pruning where possible
     runDynamicPartitionPruning(procCtx, inputs, outputs);
     perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Setup dynamic partition pruning");
@@ -1231,6 +1237,27 @@ public class TezCompiler extends TaskCompiler {
     }
   }
 
+  private static void runTopNKeyOptimization(OptimizeTezProcContext procCtx)
+      throws SemanticException {
+    if (!procCtx.conf.getBoolVar(ConfVars.HIVE_OPTIMIZE_TOPNKEY)) {
+      return;
+    }
+
+    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
+    opRules.put(
+        new RuleRegExp("Top n key optimization", GroupByOperator.getOperatorName() + "%" +
+            ReduceSinkOperator.getOperatorName() + "%"),
+        new TopNKeyProcessor());
+
+    // The dispatcher fires the processor corresponding to the closest matching
+    // rule and passes the context along
+    Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx);
+    List<Node> topNodes = new ArrayList<Node>();
+    topNodes.addAll(procCtx.parseContext.getTopOps().values());
+    GraphWalker ogw = new DefaultGraphWalker(disp);
+    ogw.startWalking(topNodes, null);
+  }
+
   private boolean findParallelSemiJoinBranch(Operator<?> mapjoin, TableScanOperator bigTableTS,
                                              ParseContext parseContext,
                                              Map<ReduceSinkOperator, TableScanOperator> semijoins) {

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/java/org/apache/hadoop/hive/ql/plan/TopNKeyDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TopNKeyDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TopNKeyDesc.java
new file mode 100644
index 0000000..c62c4a9
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TopNKeyDesc.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.plan;
+
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * TopNKeyDesc.
+ *
+ */
+@Explain(displayName = "Top N Key Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class TopNKeyDesc extends AbstractOperatorDesc {
+  private static final long serialVersionUID = 1L;
+
+  private int topN;
+  private String columnSortOrder;
+  private List<ExprNodeDesc> keyColumns;
+
+  public TopNKeyDesc() {
+  }
+
+  public TopNKeyDesc(
+      final int topN,
+      final String columnSortOrder,
+      final List<ExprNodeDesc> keyColumns) {
+
+    this.topN = topN;
+    this.columnSortOrder = columnSortOrder;
+    this.keyColumns = keyColumns;
+  }
+
+  @Explain(displayName = "top n", explainLevels = { Level.DEFAULT, Level.EXTENDED, Level.USER })
+  public int getTopN() {
+    return topN;
+  }
+
+  public void setTopN(int topN) {
+    this.topN = topN;
+  }
+
+  @Explain(displayName = "sort order", explainLevels = { Level.DEFAULT, Level.EXTENDED, Level.USER })
+  public String getColumnSortOrder() {
+    return columnSortOrder;
+  }
+
+  public void setColumnSortOrder(String columnSortOrder) {
+    this.columnSortOrder = columnSortOrder;
+  }
+
+  @Explain(displayName = "keys")
+  public String getKeyString() {
+    return PlanUtils.getExprListString(keyColumns);
+  }
+
+  @Explain(displayName = "keys", explainLevels = { Level.USER })
+  public String getUserLevelExplainKeyString() {
+    return PlanUtils.getExprListString(keyColumns, true);
+  }
+
+  public List<ExprNodeDesc> getKeyColumns() {
+    return keyColumns;
+  }
+
+  public void setKeyColumns(List<ExprNodeDesc> keyColumns) {
+    this.keyColumns = keyColumns;
+  }
+
+  public List<String> getKeyColumnNames() {
+    List<String> ret = new ArrayList<>();
+    for (ExprNodeDesc keyColumn : keyColumns) {
+      ret.add(keyColumn.getExprString());
+    }
+    return ret;
+  }
+
+  @Override
+  public boolean isSame(OperatorDesc other) {
+    if (getClass().getName().equals(other.getClass().getName())) {
+      TopNKeyDesc otherDesc = (TopNKeyDesc) other;
+      return getTopN() == otherDesc.getTopN() &&
+          Objects.equals(columnSortOrder, otherDesc.columnSortOrder) &&
+          ExprNodeDescUtils.isSame(keyColumns, otherDesc.keyColumns);
+    }
+    return false;
+  }
+
+  @Override
+  public Object clone() {
+    TopNKeyDesc ret = new TopNKeyDesc();
+    ret.setTopN(topN);
+    ret.setColumnSortOrder(columnSortOrder);
+    ret.setKeyColumns(getKeyColumns() == null ? null : new ArrayList<>(getKeyColumns()));
+    return ret;
+  }
+
+  public class TopNKeyDescExplainVectorization extends OperatorExplainVectorization {
+    private final TopNKeyDesc topNKeyDesc;
+    private final VectorTopNKeyDesc vectorTopNKeyDesc;
+
+    public TopNKeyDescExplainVectorization(TopNKeyDesc topNKeyDesc, VectorTopNKeyDesc vectorTopNKeyDesc) {
+      super(vectorTopNKeyDesc, true);
+      this.topNKeyDesc = topNKeyDesc;
+      this.vectorTopNKeyDesc = vectorTopNKeyDesc;
+    }
+
+    @Explain(vectorization = Explain.Vectorization.OPERATOR, displayName = "keyExpressions", explainLevels = { Level.DEFAULT, Level.EXTENDED })
+    public List<String> getKeyExpressions() {
+      return vectorExpressionsToStringList(vectorTopNKeyDesc.getKeyExpressions());
+    }
+  }
+
+  @Explain(vectorization = Explain.Vectorization.OPERATOR, displayName = "Top N Key Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
+  public TopNKeyDescExplainVectorization getTopNKeyVectorization() {
+    VectorTopNKeyDesc vectorTopNKeyDesc = (VectorTopNKeyDesc) getVectorDesc();
+    if (vectorTopNKeyDesc == null) {
+      return null;
+    }
+    return new TopNKeyDescExplainVectorization(this, vectorTopNKeyDesc);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTopNKeyDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTopNKeyDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTopNKeyDesc.java
new file mode 100644
index 0000000..9a266a0
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTopNKeyDesc.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+
+public class VectorTopNKeyDesc extends AbstractVectorDesc {
+
+  private static final long serialVersionUID = 1L;
+
+  private VectorExpression[] keyExpressions;
+
+  public VectorTopNKeyDesc() {
+  }
+
+  public VectorExpression[] getKeyExpressions() {
+    return keyExpressions;
+  }
+
+  public void setKeyExpressions(VectorExpression[] keyExpressions) {
+    this.keyExpressions = keyExpressions;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/queries/clientpositive/topnkey.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/topnkey.q b/ql/src/test/queries/clientpositive/topnkey.q
new file mode 100644
index 0000000..e02a41d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/topnkey.q
@@ -0,0 +1,31 @@
+--! qt:dataset:src
+set hive.mapred.mode=nonstrict;
+set hive.vectorized.execution.enabled=false;
+set hive.optimize.topnkey=true;
+
+set hive.optimize.ppd=true;
+set hive.ppd.remove.duplicatefilters=true;
+set hive.tez.dynamic.partition.pruning=true;
+set hive.optimize.metadataonly=false;
+set hive.optimize.index.filter=true;
+set hive.tez.min.bloom.filter.entries=1;
+
+set hive.tez.dynamic.partition.pruning=true;
+set hive.stats.fetch.column.stats=true;
+set hive.cbo.enable=true;
+
+EXPLAIN
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5;
+
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5;
+
+EXPLAIN
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5;
+
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5;
+
+explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5;
+
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/queries/clientpositive/vector_topnkey.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_topnkey.q b/ql/src/test/queries/clientpositive/vector_topnkey.q
new file mode 100644
index 0000000..e1b7d26
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_topnkey.q
@@ -0,0 +1,30 @@
+--! qt:dataset:src
+set hive.mapred.mode=nonstrict;
+set hive.vectorized.execution.enabled=true;
+set hive.optimize.topnkey=true;
+
+set hive.optimize.ppd=true;
+set hive.ppd.remove.duplicatefilters=true;
+set hive.tez.dynamic.partition.pruning=true;
+set hive.optimize.metadataonly=false;
+set hive.optimize.index.filter=true;
+set hive.tez.min.bloom.filter.entries=1;
+
+set hive.tez.dynamic.partition.pruning=true;
+set hive.stats.fetch.column.stats=true;
+set hive.cbo.enable=true;
+
+explain vectorization detail
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5;
+
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5;
+
+explain vectorization detail
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5;
+
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5;
+
+explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5;
+
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5;

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out b/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out
index bee7889..726d46b 100644
--- a/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out
@@ -68,19 +68,24 @@ STAGE PLANS:
                     expressions: key (type: string)
                     outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
+                    Top N Key Operator
+                      sort order: +
                       keys: key (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: count()
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -196,19 +201,24 @@ STAGE PLANS:
                     expressions: key (type: string)
                     outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
+                    Top N Key Operator
+                      sort order: +
                       keys: key (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: count()
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -298,19 +308,24 @@ STAGE PLANS:
                     expressions: length(key) (type: int)
                     outputColumnNames: _col0
                     Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: count()
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 250 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -380,19 +395,24 @@ STAGE PLANS:
                     expressions: abs(length(key)) (type: int)
                     outputColumnNames: _col0
                     Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: count()
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 250 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -463,19 +483,24 @@ STAGE PLANS:
                     expressions: key (type: string)
                     outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
+                    Top N Key Operator
+                      sort order: +
                       keys: key (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: count()
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -566,19 +591,24 @@ STAGE PLANS:
                     expressions: value (type: string)
                     outputColumnNames: value
                     Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
+                    Top N Key Operator
+                      sort order: +
                       keys: value (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: count()
+                        keys: value (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -1167,20 +1197,25 @@ STAGE PLANS:
                     expressions: key (type: string)
                     outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      bucketGroup: true
+                    Top N Key Operator
+                      sort order: +
                       keys: key (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: count()
+                        bucketGroup: true
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -1271,19 +1306,24 @@ STAGE PLANS:
                     expressions: value (type: string)
                     outputColumnNames: value
                     Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
+                    Top N Key Operator
+                      sort order: +
                       keys: value (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: count()
+                        keys: value (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -1475,20 +1515,25 @@ STAGE PLANS:
                     expressions: key (type: string)
                     outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      bucketGroup: true
+                    Top N Key Operator
+                      sort order: +
                       keys: key (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: count()
+                        bucketGroup: true
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -1579,19 +1624,24 @@ STAGE PLANS:
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
+                    Top N Key Operator
+                      sort order: ++
                       keys: key (type: string), value (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: string)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: count()
+                        keys: key (type: string), value (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col2 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string), _col1 (type: string)
+                          sort order: ++
+                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                          Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col2 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/check_constraint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/check_constraint.q.out b/ql/src/test/results/clientpositive/llap/check_constraint.q.out
index e4cd97e..123a3e4 100644
--- a/ql/src/test/results/clientpositive/llap/check_constraint.q.out
+++ b/ql/src/test/results/clientpositive/llap/check_constraint.q.out
@@ -1675,19 +1675,24 @@ STAGE PLANS:
                     expressions: key (type: string), value (type: string), UDFToInteger(key) (type: int), CAST( key AS decimal(5,2)) (type: decimal(5,2))
                     outputColumnNames: _col0, _col1, _col2, _col3
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: min(_col2), max(_col3)
+                    Top N Key Operator
+                      sort order: ++
                       keys: _col0 (type: string), _col1 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 250 Data size: 73500 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: string)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 10
+                      Group By Operator
+                        aggregations: min(_col2), max(_col3)
+                        keys: _col0 (type: string), _col1 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 250 Data size: 73500 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col2 (type: int), _col3 (type: decimal(5,2))
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string), _col1 (type: string)
+                          sort order: ++
+                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                          Statistics: Num rows: 250 Data size: 73500 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col2 (type: int), _col3 (type: decimal(5,2))
             Execution mode: vectorized, llap
             LLAP IO: no inputs
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
index 6a2ae62..f9018b4 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
@@ -1264,19 +1264,21 @@ Stage-0
                   PartitionCols:_col0, _col1
                   Group By Operator [GBY_7] (rows=5 width=20)
                     Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col1, _col0
-                    Select Operator [SEL_5] (rows=10 width=101)
-                      Output:["_col0","_col1"]
-                      Group By Operator [GBY_4] (rows=10 width=101)
-                        Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                      <-Map 1 [SIMPLE_EDGE] llap
-                        SHUFFLE [RS_3]
-                          PartitionCols:_col0, _col1, _col2
-                          Group By Operator [GBY_2] (rows=10 width=101)
-                            Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                            Select Operator [SEL_1] (rows=20 width=88)
-                              Output:["key","c_int","c_float"]
-                              TableScan [TS_0] (rows=20 width=88)
-                                default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
+                    Top N Key Operator [TNK_15] (rows=10 width=101)
+                      keys:_col1, _col0,sort order:++,top n:1
+                      Select Operator [SEL_5] (rows=10 width=101)
+                        Output:["_col0","_col1"]
+                        Group By Operator [GBY_4] (rows=10 width=101)
+                          Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
+                        <-Map 1 [SIMPLE_EDGE] llap
+                          SHUFFLE [RS_3]
+                            PartitionCols:_col0, _col1, _col2
+                            Group By Operator [GBY_2] (rows=10 width=101)
+                              Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
+                              Select Operator [SEL_1] (rows=20 width=88)
+                                Output:["key","c_int","c_float"]
+                                TableScan [TS_0] (rows=20 width=88)
+                                  default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
 
 PREHOOK: query: explain select key from(select key from (select key from cbo_t1 limit 5)cbo_t2  limit 5)cbo_t3  limit 5
 PREHOOK: type: QUERY


[36/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
index 0000000,e58ee33..e985366
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@@ -1,0 -1,1175 +1,1177 @@@
+ -- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+ -- NOTE: Some versions of SchemaTool do not automatically generate this table.
+ -- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+ CREATE TABLE SEQUENCE_TABLE
+ (
+    SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+    NEXT_VAL NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ -- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+ -- This table is required if datanucleus.autoStartMechanism=SchemaTable
+ -- NOTE: Some versions of SchemaTool do not automatically generate this table.
+ -- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+ CREATE TABLE NUCLEUS_TABLES
+ (
+    CLASS_NAME VARCHAR2(128) NOT NULL,
+    TABLE_NAME VARCHAR2(128) NOT NULL,
+    TYPE VARCHAR2(4) NOT NULL,
+    OWNER VARCHAR2(2) NOT NULL,
+    VERSION VARCHAR2(20) NOT NULL,
+    INTERFACE_NAME VARCHAR2(255) NULL
+ );
+ 
+ ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+ 
+ -- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ CREATE TABLE PART_COL_PRIVS
+ (
+     PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+     "COLUMN_NAME" VARCHAR2(767) NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PART_ID NUMBER NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     PART_COL_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+ 
+ -- Table CDS.
+ CREATE TABLE CDS
+ (
+     CD_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+ 
+ -- Table COLUMNS_V2 for join relationship
+ CREATE TABLE COLUMNS_V2
+ (
+     CD_ID NUMBER NOT NULL,
+     "COMMENT" VARCHAR2(256) NULL,
+     "COLUMN_NAME" VARCHAR2(767) NOT NULL,
+     TYPE_NAME CLOB NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+ 
+ -- Table PARTITION_KEY_VALS for join relationship
+ CREATE TABLE PARTITION_KEY_VALS
+ (
+     PART_ID NUMBER NOT NULL,
+     PART_KEY_VAL VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+ 
+ CREATE TABLE CTLGS (
+     CTLG_ID NUMBER PRIMARY KEY,
+     "NAME" VARCHAR2(256),
+     "DESC" VARCHAR2(4000),
+     LOCATION_URI VARCHAR2(4000) NOT NULL,
+     UNIQUE ("NAME")
+ );
+ 
+ -- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE TABLE DBS
+ (
+     DB_ID NUMBER NOT NULL,
+     "DESC" VARCHAR2(4000) NULL,
+     DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+     "NAME" VARCHAR2(128) NULL,
+     OWNER_NAME VARCHAR2(128) NULL,
+     OWNER_TYPE VARCHAR2(10) NULL,
+     CTLG_NAME VARCHAR2(256)
+ );
+ 
+ ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+ 
+ -- Table PARTITION_PARAMS for join relationship
+ CREATE TABLE PARTITION_PARAMS
+ (
+     PART_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE VARCHAR2(4000) NULL
+ );
+ 
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+ 
+ -- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ CREATE TABLE SERDES
+ (
+     SERDE_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NULL,
+     SLIB VARCHAR2(4000) NULL,
+     "DESCRIPTION" VARCHAR2(4000),
+     "SERIALIZER_CLASS" VARCHAR2(4000),
+     "DESERIALIZER_CLASS" VARCHAR2(4000),
+     "SERDE_TYPE" NUMBER
+ );
+ 
+ ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+ 
+ -- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE TABLE TYPES
+ (
+     TYPES_ID NUMBER NOT NULL,
+     TYPE_NAME VARCHAR2(128) NULL,
+     TYPE1 VARCHAR2(767) NULL,
+     TYPE2 VARCHAR2(767) NULL
+ );
+ 
+ ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+ 
+ -- Table PARTITION_KEYS for join relationship
+ CREATE TABLE PARTITION_KEYS
+ (
+     TBL_ID NUMBER NOT NULL,
+     PKEY_COMMENT VARCHAR2(4000) NULL,
+     PKEY_NAME VARCHAR2(128) NOT NULL,
+     PKEY_TYPE VARCHAR2(767) NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+ 
+ -- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE TABLE ROLES
+ (
+     ROLE_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     OWNER_NAME VARCHAR2(128) NULL,
+     ROLE_NAME VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+ 
+ -- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+ CREATE TABLE PARTITIONS
+ (
+     PART_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+     PART_NAME VARCHAR2(767) NULL,
+     SD_ID NUMBER NULL,
 -    TBL_ID NUMBER NULL
++    TBL_ID NUMBER NULL,
++    WRITE_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+ 
+ -- Table INDEX_PARAMS for join relationship
+ CREATE TABLE INDEX_PARAMS
+ (
+     INDEX_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE VARCHAR2(4000) NULL
+ );
+ 
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+ 
+ -- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ CREATE TABLE TBL_COL_PRIVS
+ (
+     TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+     "COLUMN_NAME" VARCHAR2(767) NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     TBL_COL_PRIV VARCHAR2(128) NULL,
+     TBL_ID NUMBER NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+ 
+ -- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+ CREATE TABLE IDXS
+ (
+     INDEX_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+     INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+     INDEX_NAME VARCHAR2(128) NULL,
+     INDEX_TBL_ID NUMBER NULL,
+     LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+     ORIG_TBL_ID NUMBER NULL,
+     SD_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+ 
+ -- Table BUCKETING_COLS for join relationship
+ CREATE TABLE BUCKETING_COLS
+ (
+     SD_ID NUMBER NOT NULL,
+     BUCKET_COL_NAME VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table TYPE_FIELDS for join relationship
+ CREATE TABLE TYPE_FIELDS
+ (
+     TYPE_NAME NUMBER NOT NULL,
+     "COMMENT" VARCHAR2(256) NULL,
+     FIELD_NAME VARCHAR2(128) NOT NULL,
+     FIELD_TYPE VARCHAR2(767) NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+ 
+ -- Table SD_PARAMS for join relationship
+ CREATE TABLE SD_PARAMS
+ (
+     SD_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE CLOB NULL
+ );
+ 
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+ 
+ -- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE TABLE GLOBAL_PRIVS
+ (
+     USER_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     USER_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+ 
+ -- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ CREATE TABLE SDS
+ (
+     SD_ID NUMBER NOT NULL,
+     CD_ID NUMBER NULL,
+     INPUT_FORMAT VARCHAR2(4000) NULL,
+     IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+     LOCATION VARCHAR2(4000) NULL,
+     NUM_BUCKETS NUMBER (10) NOT NULL,
+     OUTPUT_FORMAT VARCHAR2(4000) NULL,
+     SERDE_ID NUMBER NULL,
+     IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+ );
+ 
+ ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+ 
+ -- Table TABLE_PARAMS for join relationship
+ CREATE TABLE TABLE_PARAMS
+ (
+     TBL_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE CLOB NULL
+ );
+ 
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+ 
+ -- Table SORT_COLS for join relationship
+ CREATE TABLE SORT_COLS
+ (
+     SD_ID NUMBER NOT NULL,
+     "COLUMN_NAME" VARCHAR2(767) NULL,
+     "ORDER" NUMBER (10) NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ CREATE TABLE TBL_PRIVS
+ (
+     TBL_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     TBL_PRIV VARCHAR2(128) NULL,
+     TBL_ID NUMBER NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+ 
+ -- Table DATABASE_PARAMS for join relationship
+ CREATE TABLE DATABASE_PARAMS
+ (
+     DB_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(180) NOT NULL,
+     PARAM_VALUE VARCHAR2(4000) NULL
+ );
+ 
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+ 
+ -- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ CREATE TABLE ROLE_MAP
+ (
+     ROLE_GRANT_ID NUMBER NOT NULL,
+     ADD_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     ROLE_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+ 
+ -- Table SERDE_PARAMS for join relationship
+ CREATE TABLE SERDE_PARAMS
+ (
+     SERDE_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE CLOB NULL
+ );
+ 
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+ 
+ -- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ CREATE TABLE PART_PRIVS
+ (
+     PART_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PART_ID NUMBER NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     PART_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+ 
+ -- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ CREATE TABLE DB_PRIVS
+ (
+     DB_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     DB_ID NUMBER NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     DB_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+ 
+ -- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+ CREATE TABLE TBLS
+ (
+     TBL_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     DB_ID NUMBER NULL,
+     LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+     OWNER VARCHAR2(767) NULL,
+     OWNER_TYPE VARCHAR2(10) NULL,
+     RETENTION NUMBER (10) NOT NULL,
+     SD_ID NUMBER NULL,
+     TBL_NAME VARCHAR2(256) NULL,
+     TBL_TYPE VARCHAR2(128) NULL,
+     VIEW_EXPANDED_TEXT CLOB NULL,
+     VIEW_ORIGINAL_TEXT CLOB NULL,
 -    IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
++    IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)),
++    WRITE_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+ 
+ -- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata]
+ CREATE TABLE MV_CREATION_METADATA
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     CAT_NAME VARCHAR2(256) NOT NULL,
+     DB_NAME VARCHAR2(128) NOT NULL,
+     TBL_NAME VARCHAR2(256) NOT NULL,
+     TXN_LIST CLOB NULL,
+     MATERIALIZATION_TIME NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_TABLE ON MV_CREATION_METADATA ("DB_NAME", "TBL_NAME");
+ 
+ -- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata]
+ CREATE TABLE MV_TABLES_USED
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     TBL_ID NUMBER NOT NULL
+ );
+ 
+ -- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE TABLE PARTITION_EVENTS
+ (
+     PART_NAME_ID NUMBER NOT NULL,
+     CAT_NAME VARCHAR2(256) NULL,
+     DB_NAME VARCHAR2(128) NULL,
+     EVENT_TIME NUMBER NOT NULL,
+     EVENT_TYPE NUMBER (10) NOT NULL,
+     PARTITION_NAME VARCHAR2(767) NULL,
+     TBL_NAME VARCHAR2(256) NULL
+ );
+ 
+ ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+ 
+ -- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+ CREATE TABLE SKEWED_STRING_LIST
+ (
+     STRING_LIST_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+ 
+ CREATE TABLE SKEWED_STRING_LIST_VALUES
+ (
+     STRING_LIST_ID NUMBER NOT NULL,
+     "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+ 
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+ 
+ CREATE TABLE SKEWED_COL_NAMES
+ (
+     SD_ID NUMBER NOT NULL,
+     "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+ (
+     SD_ID NUMBER NOT NULL,
+     STRING_LIST_ID_KID NUMBER NOT NULL,
+     "LOCATION" VARCHAR2(4000) NULL
+ );
+ 
+ CREATE TABLE MASTER_KEYS
+ (
+     KEY_ID NUMBER (10) NOT NULL,
+     MASTER_KEY VARCHAR2(767) NULL
+ );
+ 
+ CREATE TABLE DELEGATION_TOKENS
+ (
+     TOKEN_IDENT VARCHAR2(767) NOT NULL,
+     TOKEN VARCHAR2(767) NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE TABLE SKEWED_VALUES
+ (
+     SD_ID_OID NUMBER NOT NULL,
+     STRING_LIST_ID_EID NUMBER NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED;
+ 
+ -- column statistics
+ 
+ CREATE TABLE TAB_COL_STATS (
+  CS_ID NUMBER NOT NULL,
+  CAT_NAME VARCHAR2(256) NOT NULL,
+  DB_NAME VARCHAR2(128) NOT NULL,
+  TABLE_NAME VARCHAR2(256) NOT NULL,
+  COLUMN_NAME VARCHAR2(767) NOT NULL,
+  COLUMN_TYPE VARCHAR2(128) NOT NULL,
+  TBL_ID NUMBER NOT NULL,
+  LONG_LOW_VALUE NUMBER,
+  LONG_HIGH_VALUE NUMBER,
+  DOUBLE_LOW_VALUE NUMBER,
+  DOUBLE_HIGH_VALUE NUMBER,
+  BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+  BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+  NUM_NULLS NUMBER NOT NULL,
+  NUM_DISTINCTS NUMBER,
+  BIT_VECTOR BLOB,
+  AVG_COL_LEN NUMBER,
+  MAX_COL_LEN NUMBER,
+  NUM_TRUES NUMBER,
+  NUM_FALSES NUMBER,
+  LAST_ANALYZED NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+ 
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+ 
+ CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME);
+ 
+ CREATE TABLE VERSION (
+   VER_ID NUMBER NOT NULL,
+   SCHEMA_VERSION VARCHAR(127) NOT NULL,
+   VERSION_COMMENT VARCHAR(255)
+ );
+ ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+ 
+ CREATE TABLE PART_COL_STATS (
+  CS_ID NUMBER NOT NULL,
+  CAT_NAME VARCHAR2(256) NOT NULL,
+  DB_NAME VARCHAR2(128) NOT NULL,
+  TABLE_NAME VARCHAR2(256) NOT NULL,
+  PARTITION_NAME VARCHAR2(767) NOT NULL,
+  COLUMN_NAME VARCHAR2(767) NOT NULL,
+  COLUMN_TYPE VARCHAR2(128) NOT NULL,
+  PART_ID NUMBER NOT NULL,
+  LONG_LOW_VALUE NUMBER,
+  LONG_HIGH_VALUE NUMBER,
+  DOUBLE_LOW_VALUE NUMBER,
+  DOUBLE_HIGH_VALUE NUMBER,
+  BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+  BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+  NUM_NULLS NUMBER NOT NULL,
+  NUM_DISTINCTS NUMBER,
+  BIT_VECTOR BLOB,
+  AVG_COL_LEN NUMBER,
+  MAX_COL_LEN NUMBER,
+  NUM_TRUES NUMBER,
+  NUM_FALSES NUMBER,
+  LAST_ANALYZED NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+ 
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+ 
+ CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+ 
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+ 
+ CREATE TABLE FUNCS (
+   FUNC_ID NUMBER NOT NULL,
+   CLASS_NAME VARCHAR2(4000),
+   CREATE_TIME NUMBER(10) NOT NULL,
+   DB_ID NUMBER,
+   FUNC_NAME VARCHAR2(128),
+   FUNC_TYPE NUMBER(10) NOT NULL,
+   OWNER_NAME VARCHAR2(128),
+   OWNER_TYPE VARCHAR2(10)
+ );
+ 
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+ 
+ CREATE TABLE FUNC_RU (
+   FUNC_ID NUMBER NOT NULL,
+   RESOURCE_TYPE NUMBER(10) NOT NULL,
+   RESOURCE_URI VARCHAR2(4000),
+   INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+ 
+ CREATE TABLE NOTIFICATION_LOG
+ (
+     NL_ID NUMBER NOT NULL,
+     EVENT_ID NUMBER NOT NULL,
+     EVENT_TIME NUMBER(10) NOT NULL,
+     EVENT_TYPE VARCHAR2(32) NOT NULL,
+     CAT_NAME VARCHAR2(256),
+     DB_NAME VARCHAR2(128),
+     TBL_NAME VARCHAR2(256),
+     MESSAGE CLOB NULL,
+     MESSAGE_FORMAT VARCHAR(16) NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+ 
+ CREATE TABLE NOTIFICATION_SEQUENCE
+ (
+     NNI_ID NUMBER NOT NULL,
+     NEXT_EVENT_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+ 
+ INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE);
+ 
+ -- Tables to manage resource plans.
+ 
+ CREATE TABLE WM_RESOURCEPLAN
+ (
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     QUERY_PARALLELISM NUMBER(10),
+     STATUS VARCHAR2(20) NOT NULL,
+     DEFAULT_POOL_ID NUMBER
+ );
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+ 
+ CREATE TABLE WM_POOL
+ (
+     POOL_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     PATH VARCHAR2(1024) NOT NULL,
+     ALLOC_FRACTION NUMBER,
+     QUERY_PARALLELISM NUMBER(10),
+     SCHEDULING_POLICY VARCHAR2(1024)
+ );
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+ 
+ CREATE TABLE WM_TRIGGER
+ (
+     TRIGGER_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     TRIGGER_EXPRESSION VARCHAR2(1024),
+     ACTION_EXPRESSION VARCHAR2(1024),
+     IS_IN_UNMANAGED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_IN_UNMANAGED IN (1,0))
+ );
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+ 
+ CREATE TABLE WM_POOL_TO_TRIGGER
+ (
+     POOL_ID NUMBER NOT NULL,
+     TRIGGER_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+ 
+ CREATE TABLE WM_MAPPING
+ (
+     MAPPING_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     ENTITY_TYPE VARCHAR2(128) NOT NULL,
+     ENTITY_NAME VARCHAR2(128) NOT NULL,
+     POOL_ID NUMBER NOT NULL,
+     ORDERING NUMBER(10)
+ );
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+ 
+ -- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+ 
+ CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table COLUMNS_V2
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+ 
+ 
+ -- Constraints for table PARTITION_KEY_VALS
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+ 
+ 
+ -- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME);
+ 
+ 
+ -- Constraints for table PARTITION_PARAMS
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+ 
+ 
+ -- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ 
+ -- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+ 
+ 
+ -- Constraints for table PARTITION_KEYS
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+ 
+ 
+ -- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+ 
+ 
+ -- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+ 
+ CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+ 
+ 
+ -- Constraints for table INDEX_PARAMS
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+ 
+ 
+ -- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (AUTHORIZER,TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+ 
+ 
+ -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+ 
+ CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+ 
+ CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+ 
+ CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+ 
+ 
+ -- Constraints for table BUCKETING_COLS
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table TYPE_FIELDS
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+ 
+ 
+ -- Constraints for table SD_PARAMS
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+ 
+ 
+ -- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (AUTHORIZER,PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+ CREATE INDEX SDS_N50 ON SDS (CD_ID);
+ 
+ 
+ -- Constraints for table TABLE_PARAMS
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+ 
+ 
+ -- Constraints for table SORT_COLS
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+ 
+ CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (AUTHORIZER,TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table DATABASE_PARAMS
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+ 
+ 
+ -- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+ 
+ CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table SERDE_PARAMS
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+ 
+ 
+ -- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (AUTHORIZER,PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+ 
+ 
+ -- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+ 
+ CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (AUTHORIZER,DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+ 
+ 
+ -- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+ 
+ CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+ 
+ 
+ -- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+ 
+ 
+ -- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+ 
+ CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+ 
+ CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+ 
+ 
+ -- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+ 
+ CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+ 
+ CREATE TABLE KEY_CONSTRAINTS
+ (
+   CHILD_CD_ID NUMBER,
+   CHILD_INTEGER_IDX NUMBER,
+   CHILD_TBL_ID NUMBER,
+   PARENT_CD_ID NUMBER,
+   PARENT_INTEGER_IDX NUMBER NOT NULL,
+   PARENT_TBL_ID NUMBER NOT NULL,
+   POSITION NUMBER NOT NULL,
+   CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+   CONSTRAINT_TYPE NUMBER NOT NULL,
+   UPDATE_RULE NUMBER,
+   DELETE_RULE NUMBER,
+   ENABLE_VALIDATE_RELY NUMBER NOT NULL,
+   DEFAULT_VALUE VARCHAR(400)
+ ) ;
+ 
+ ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+ 
+ CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+ 
+ CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+ 
+ -- Table for METASTORE_DB_PROPERTIES and its constraints
+ CREATE TABLE METASTORE_DB_PROPERTIES
+ (
+   PROPERTY_KEY VARCHAR(255) NOT NULL,
+   PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+   DESCRIPTION VARCHAR(1000)
+ );
+ 
+ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+ 
+ -- Constraints for resource plan tables.
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK1 FOREIGN KEY (MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID);
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK2 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID);
+ 
+ ------------------------------
+ -- Transaction and lock tables
+ ------------------------------
+ CREATE TABLE TXNS (
+   TXN_ID NUMBER(19) PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED NUMBER(19) NOT NULL,
+   TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar2(128),
+   TXN_META_INFO varchar2(128),
+   TXN_HEARTBEAT_COUNT number(10),
+   TXN_TYPE number(10)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID NUMBER(19) NOT NULL REFERENCES TXNS (TXN_ID),
+   TC_DATABASE VARCHAR2(128) NOT NULL,
+   TC_TABLE VARCHAR2(128),
+   TC_PARTITION VARCHAR2(767) NULL,
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID NUMBER(19)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID NUMBER(19) NOT NULL,
+   CTC_DATABASE VARCHAR2(128) NOT NULL,
+   CTC_TABLE VARCHAR2(256),
+   CTC_PARTITION VARCHAR2(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID NUMBER(19),
+   CTC_UPDATE_DELETE CHAR(1) NOT NULL
+ ) ROWDEPENDENCIES;
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT NUMBER(19) NOT NULL
+ );
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+   HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+   HL_TXNID NUMBER(19) NOT NULL,
+   HL_DB VARCHAR2(128) NOT NULL,
+   HL_TABLE VARCHAR2(128),
+   HL_PARTITION VARCHAR2(767),
+   HL_LOCK_STATE CHAR(1) NOT NULL,
+   HL_LOCK_TYPE CHAR(1) NOT NULL,
+   HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+   HL_ACQUIRED_AT NUMBER(19),
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT number(10),
+   HL_AGENT_INFO varchar2(128),
+   HL_BLOCKEDBY_EXT_ID number(19),
+   HL_BLOCKEDBY_INT_ID number(19),
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT NUMBER(19) NOT NULL
+ );
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID NUMBER(19) PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START NUMBER(19),
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID NUMBER(19),
+   CQ_META_INFO BLOB,
+   CQ_HADOOP_JOB_ID varchar2(32)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT NUMBER(19) NOT NULL
+ );
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID NUMBER(19) PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START NUMBER(19),
+   CC_END NUMBER(19),
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID NUMBER(19),
+   CC_META_INFO BLOB,
+   CC_HADOOP_JOB_ID varchar2(32)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar2(128) NOT NULL,
+   MT_KEY2 number(19) NOT NULL,
+   MT_COMMENT varchar2(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ );
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar2(128) NOT NULL,
+   WS_TABLE varchar2(128) NOT NULL,
+   WS_PARTITION varchar2(767),
+   WS_TXNID number(19) NOT NULL,
+   WS_COMMIT_ID number(19) NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID NUMBER(19) NOT NULL,
+   T2W_DATABASE VARCHAR2(128) NOT NULL,
+   T2W_TABLE VARCHAR2(256) NOT NULL,
+   T2W_WRITEID NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE VARCHAR2(128) NOT NULL,
+   NWI_TABLE VARCHAR2(256) NOT NULL,
+   NWI_NEXT NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID NUMBER(19) NOT NULL,
+   MHL_MIN_OPEN_TXNID NUMBER(19) NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+   MRL_TXN_ID NUMBER NOT NULL,
+   MRL_DB_NAME VARCHAR(128) NOT NULL,
+   MRL_TBL_NAME VARCHAR(256) NOT NULL,
+   MRL_LAST_HEARTBEAT NUMBER NOT NULL,
+   PRIMARY KEY(MRL_TXN_ID)
+ );
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" number primary key,
+   "SCHEMA_TYPE" number not null,
+   "NAME" varchar2(256) unique,
+   "DB_ID" number references "DBS" ("DB_ID"),
+   "COMPATIBILITY" number not null,
+   "VALIDATION_LEVEL" number not null,
+   "CAN_EVOLVE" number(1) not null,
+   "SCHEMA_GROUP" varchar2(256),
+   "DESCRIPTION" varchar2(4000)
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" number primary key,
+   "SCHEMA_ID" number references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" number not null,
+   "CREATED_AT" number not null,
+   "CD_ID" number references "CDS" ("CD_ID"), 
+   "STATE" number not null,
+   "DESCRIPTION" varchar2(4000),
+   "SCHEMA_TEXT" clob,
+   "FINGERPRINT" varchar2(256),
+   "SCHEMA_VERSION_NAME" varchar2(256),
+   "SERDE_ID" number references "SERDES" ("SERDE_ID"), 
+   UNIQUE ("SCHEMA_ID", "VERSION")
+ );
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID number(19) NOT NULL,
+   RTM_TARGET_TXN_ID number(19) NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID NUMBER primary key,
+   CREATE_TIME NUMBER(10) NOT NULL,
+   WEIGHT NUMBER(10) NOT NULL,
+   PAYLOAD BLOB
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID number(19) NOT NULL,
+   WNL_TXNID number(19) NOT NULL,
+   WNL_WRITEID number(19) NOT NULL,
+   WNL_DATABASE varchar(128) NOT NULL,
+   WNL_TABLE varchar(128) NOT NULL,
+   WNL_PARTITION varchar(1024) NOT NULL,
+   WNL_TABLE_OBJ clob NOT NULL,
+   WNL_PARTITION_OBJ clob,
+   WNL_FILES clob,
+   WNL_EVENT_TIME number(10) NOT NULL,
+   PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION)
+ );
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
index 0000000,71f5034..c9c6b30
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
@@@ -1,0 -1,342 +1,343 @@@
+ SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
+ 
+ --@041-HIVE-16556.oracle.sql;
+ CREATE TABLE METASTORE_DB_PROPERTIES
+ (
+   PROPERTY_KEY VARCHAR(255) NOT NULL,
+   PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+   DESCRIPTION VARCHAR(1000)
+ );
+ 
+ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+ 
+ --@042-HIVE-16575.oracle.sql;
+ CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+ 
+ --@043-HIVE-16922.oracle.sql;
+ UPDATE SERDE_PARAMS
+ SET PARAM_KEY='collection.delim'
+ WHERE PARAM_KEY='colelction.delim';
+ 
+ --@044-HIVE-16997.oracle.sql;
+ ALTER TABLE PART_COL_STATS ADD BIT_VECTOR BLOB NULL;
+ ALTER TABLE TAB_COL_STATS ADD BIT_VECTOR BLOB NULL;
+ 
+ --@045-HIVE-16886.oracle.sql;
+ INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE);
+ 
+ --@046-HIVE-17566.oracle.sql;
+ CREATE TABLE WM_RESOURCEPLAN
+ (
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     QUERY_PARALLELISM NUMBER(10),
+     STATUS VARCHAR2(20) NOT NULL,
+     DEFAULT_POOL_ID NUMBER
+ );
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+ 
+ 
+ CREATE TABLE WM_POOL
+ (
+     POOL_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     PATH VARCHAR2(1024) NOT NULL,
+     ALLOC_FRACTION NUMBER,
+     QUERY_PARALLELISM NUMBER(10),
+     SCHEDULING_POLICY VARCHAR2(1024)
+ );
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ 
+ CREATE TABLE WM_TRIGGER
+ (
+     TRIGGER_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     TRIGGER_EXPRESSION VARCHAR2(1024),
+     ACTION_EXPRESSION VARCHAR2(1024),
+     IS_IN_UNMANAGED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_IN_UNMANAGED IN (1,0))
+ );
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ 
+ CREATE TABLE WM_POOL_TO_TRIGGER
+ (
+     POOL_ID NUMBER NOT NULL,
+     TRIGGER_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+ 
+ 
+ CREATE TABLE WM_MAPPING
+ (
+     MAPPING_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     ENTITY_TYPE VARCHAR2(128) NOT NULL,
+     ENTITY_NAME VARCHAR2(128) NOT NULL,
+     POOL_ID NUMBER NOT NULL,
+     ORDERING NUMBER(10)
+ );
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ -- Upgrades for Schema Registry objects
+ ALTER TABLE "SERDES" ADD "DESCRIPTION" VARCHAR(4000);
+ ALTER TABLE "SERDES" ADD "SERIALIZER_CLASS" VARCHAR(4000);
+ ALTER TABLE "SERDES" ADD "DESERIALIZER_CLASS" VARCHAR(4000);
+ ALTER TABLE "SERDES" ADD "SERDE_TYPE" INTEGER;
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" number primary key,
+   "SCHEMA_TYPE" number not null,
+   "NAME" varchar2(256) unique,
+   "DB_ID" number references "DBS" ("DB_ID"),
+   "COMPATIBILITY" number not null,
+   "VALIDATION_LEVEL" number not null,
+   "CAN_EVOLVE" number(1) not null,
+   "SCHEMA_GROUP" varchar2(256),
+   "DESCRIPTION" varchar2(4000)
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" number primary key,
+   "SCHEMA_ID" number references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" number not null,
+   "CREATED_AT" number not null,
+   "CD_ID" number references "CDS" ("CD_ID"), 
+   "STATE" number not null,
+   "DESCRIPTION" varchar2(4000),
+   "SCHEMA_TEXT" clob,
+   "FINGERPRINT" varchar2(256),
+   "SCHEMA_VERSION_NAME" varchar2(256),
+   "SERDE_ID" number references "SERDES" ("SERDE_ID"), 
+   UNIQUE ("SCHEMA_ID", "VERSION")
+ );
+ 
+ 
+ -- 048-HIVE-14498
+ CREATE TABLE MV_CREATION_METADATA
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     CAT_NAME VARCHAR2(256) NOT NULL,
+     DB_NAME VARCHAR2(128) NOT NULL,
+     TBL_NAME VARCHAR2(256) NOT NULL,
+     TXN_LIST CLOB NULL
+ );
+ 
+ ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_TABLE ON MV_CREATION_METADATA ("DB_NAME", "TBL_NAME");
+ 
+ CREATE TABLE MV_TABLES_USED
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     TBL_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK1 FOREIGN KEY (MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID);
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK2 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID);
+ 
+ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_TIMESTAMP timestamp NULL;
+ 
+ UPDATE COMPLETED_TXN_COMPONENTS SET CTC_TIMESTAMP = CURRENT_TIMESTAMP;
+ 
+ ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY(CTC_TIMESTAMP DEFAULT CURRENT_TIMESTAMP);
+ 
+ ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY(CTC_TIMESTAMP NOT NULL);
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ -- 049-HIVE-18489
+ UPDATE FUNC_RU
+   SET RESOURCE_URI = 's3a' || SUBSTR(RESOURCE_URI, 4)
+   WHERE RESOURCE_URI LIKE 's3n://%' ;
+ 
+ UPDATE SKEWED_COL_VALUE_LOC_MAP
+   SET LOCATION = 's3a' || SUBSTR(LOCATION, 4)
+   WHERE LOCATION LIKE 's3n://%' ;
+ 
+ UPDATE SDS
+   SET LOCATION = 's3a' || SUBSTR(LOCATION, 4)
+   WHERE LOCATION LIKE 's3n://%' ;
+ 
+ UPDATE DBS
+   SET DB_LOCATION_URI = 's3a' || SUBSTR(DB_LOCATION_URI, 4)
+   WHERE DB_LOCATION_URI LIKE 's3n://%' ;
+ 
+ -- HIVE-18192
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID NUMBER(19) NOT NULL,
+   T2W_DATABASE VARCHAR2(128) NOT NULL,
+   T2W_TABLE VARCHAR2(256) NOT NULL,
+   T2W_WRITEID NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE VARCHAR2(128) NOT NULL,
+   NWI_TABLE VARCHAR2(256) NOT NULL,
+   NWI_NEXT NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ ALTER TABLE COMPACTION_QUEUE RENAME COLUMN CQ_HIGHEST_TXN_ID TO CQ_HIGHEST_WRITE_ID;
+ 
+ ALTER TABLE COMPLETED_COMPACTIONS RENAME COLUMN CC_HIGHEST_TXN_ID TO CC_HIGHEST_WRITE_ID;
+ 
+ -- Modify txn_components/completed_txn_components tables to add write id.
+ ALTER TABLE TXN_COMPONENTS ADD TC_WRITEID number(19);
+ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID number(19);
+ 
+ -- HIVE-18726
+ -- add a new column to support default value for DEFAULT constraint
+ ALTER TABLE KEY_CONSTRAINTS ADD DEFAULT_VALUE VARCHAR(400);
+ ALTER TABLE KEY_CONSTRAINTS MODIFY (PARENT_CD_ID NULL);
+ 
+ ALTER TABLE HIVE_LOCKS MODIFY(HL_TXNID NOT NULL);
+ 
+ -- HIVE-18755, add catalogs
+ -- new catalogs table
+ CREATE TABLE CTLGS (
+     CTLG_ID NUMBER PRIMARY KEY,
+     "NAME" VARCHAR2(256),
+     "DESC" VARCHAR2(4000),
+     LOCATION_URI VARCHAR2(4000) NOT NULL,
+     UNIQUE ("NAME")
+ );
+ 
+ -- Insert a default value.  The location is TBD.  Hive will fix this when it starts
+ INSERT INTO CTLGS VALUES (1, 'hive', 'Default catalog for Hive', 'TBD');
+ 
+ -- Drop the unique index on DBS
+ DROP INDEX UNIQUE_DATABASE;
+ 
+ -- Add the new column to the DBS table, can't put in the not null constraint yet
+ ALTER TABLE DBS ADD CTLG_NAME VARCHAR2(256);
+ 
+ -- Update all records in the DBS table to point to the Hive catalog
+ UPDATE DBS 
+   SET "CTLG_NAME" = 'hive';
+ 
+ -- Add the not null constraint
+ ALTER TABLE DBS MODIFY CTLG_NAME NOT NULL;
+ 
+ -- Put back the unique index 
+ CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME);
+ 
+ -- Add the foreign key
+ ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED;
+ 
+ -- Add columns to table stats and part stats
+ ALTER TABLE TAB_COL_STATS ADD CAT_NAME VARCHAR2(256);
+ ALTER TABLE PART_COL_STATS ADD CAT_NAME VARCHAR2(256);
+ 
+ -- Set the existing column names to Hive
+ UPDATE TAB_COL_STATS
+   SET CAT_NAME = 'hive';
+ UPDATE PART_COL_STATS
+   SET CAT_NAME = 'hive';
+ 
+ -- Add the not null constraint
+ ALTER TABLE TAB_COL_STATS MODIFY CAT_NAME NOT NULL;
+ ALTER TABLE PART_COL_STATS MODIFY CAT_NAME NOT NULL;
+ 
+ -- Rebuild the index for Part col stats.  No such index for table stats, which seems weird
+ DROP INDEX PCS_STATS_IDX;
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+ 
+ -- Add column to partition events
+ ALTER TABLE PARTITION_EVENTS ADD CAT_NAME VARCHAR2(256);
+ UPDATE PARTITION_EVENTS
+   SET CAT_NAME = 'hive' WHERE DB_NAME IS NOT NULL;
+ 
+ -- Add column to notification log
+ ALTER TABLE NOTIFICATION_LOG ADD CAT_NAME VARCHAR2(256);
+ UPDATE NOTIFICATION_LOG
+   SET CAT_NAME = 'hive' WHERE DB_NAME IS NOT NULL;
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID number(19) NOT NULL,
+   RTM_TARGET_TXN_ID number(19) NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) SELECT 'org.apache.hadoop.hive.metastore.model.MNotificationLog',1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_VAL FROM SEQUENCE_TABLE WHERE SEQUENCE_NAME = 'org.apache.hadoop.hive.metastore.model.MNotificationLog');
+ 
+ -- HIVE-18747
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID number(19) NOT NULL,
+   MHL_MIN_OPEN_TXNID number(19) NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID NUMBER primary key,
+   CREATE_TIME NUMBER(10) NOT NULL,
+   WEIGHT NUMBER(10) NOT NULL,
+   PAYLOAD BLOB
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ -- HIVE-18193
+ -- Populate NEXT_WRITE_ID for each Transactional table and set next write ID same as next txn ID
+ INSERT INTO NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE, NWI_NEXT)
+     SELECT * FROM
+         (SELECT DB.NAME, TBL_INFO.TBL_NAME FROM DBS DB,
+             (SELECT TBL.DB_ID, TBL.TBL_NAME FROM TBLS TBL,
+                 (SELECT TBL_ID FROM TABLE_PARAMS WHERE PARAM_KEY='transactional' AND to_char(PARAM_VALUE)='true') TBL_PARAM
+             WHERE TBL.TBL_ID=TBL_PARAM.TBL_ID) TBL_INFO
+         where DB.DB_ID=TBL_INFO.DB_ID) DB_TBL_NAME,
+         (SELECT NTXN_NEXT FROM NEXT_TXN_ID) NEXT_WRITE;
+ 
+ -- Populate TXN_TO_WRITE_ID for each aborted/open txns and set write ID equal to txn ID
+ INSERT INTO TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID, T2W_WRITEID)
+     SELECT * FROM
+         (SELECT DB.NAME, TBL_INFO.TBL_NAME FROM DBS DB,
+             (SELECT TBL.DB_ID, TBL.TBL_NAME FROM TBLS TBL,
+                 (SELECT TBL_ID FROM TABLE_PARAMS WHERE PARAM_KEY='transactional' AND to_char(PARAM_VALUE)='true') TBL_PARAM
+             WHERE TBL.TBL_ID=TBL_PARAM.TBL_ID) TBL_INFO
+         where DB.DB_ID=TBL_INFO.DB_ID) DB_TBL_NAME,
+         (SELECT TXN_ID, TXN_ID as WRITE_ID FROM TXNS) TXN_INFO;
+ 
+ -- Update TXN_COMPONENTS and COMPLETED_TXN_COMPONENTS for write ID which is same as txn ID
+ UPDATE TXN_COMPONENTS SET TC_WRITEID = TC_TXNID;
+ UPDATE COMPLETED_TXN_COMPONENTS SET CTC_WRITEID = CTC_TXNID;
+ 
+ ALTER TABLE TBLS ADD OWNER_TYPE VARCHAR2(10) NULL;
+ 
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
++

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
index 0000000,6fa5e2d..c94e6ec
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
@@@ -1,0 -1,6 +1,9 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual;
+ 
++ALTER TABLE TBLS ADD WRITE_ID number NULL;
++ALTER TABLE PARTITIONS ADD WRITE_ID number NULL;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual;
+ 


[07/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query60.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query60.q.out b/ql/src/test/results/clientpositive/perf/tez/query60.q.out
index f10a728..277dd5d 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query60.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query60.q.out
@@ -189,290 +189,296 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_370]
-        Limit [LIM_369] (rows=100 width=108)
+      File Output Operator [FS_375]
+        Limit [LIM_374] (rows=100 width=108)
           Number of rows:100
-          Select Operator [SEL_368] (rows=335408073 width=108)
+          Select Operator [SEL_373] (rows=335408073 width=108)
             Output:["_col0","_col1"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_367]
-              Group By Operator [GBY_366] (rows=335408073 width=108)
+            SHUFFLE [RS_372]
+              Group By Operator [GBY_371] (rows=335408073 width=108)
                 Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
               <-Union 5 [SIMPLE_EDGE]
                 <-Reducer 10 [CONTAINS] vectorized
-                  Reduce Output Operator [RS_382]
+                  Reduce Output Operator [RS_388]
                     PartitionCols:_col0
-                    Group By Operator [GBY_381] (rows=670816147 width=108)
+                    Group By Operator [GBY_387] (rows=670816147 width=108)
                       Output:["_col0","_col1"],aggregations:["sum(_col1)"],keys:_col0
-                      Group By Operator [GBY_380] (rows=191657247 width=135)
-                        Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
-                      <-Reducer 9 [SIMPLE_EDGE]
-                        SHUFFLE [RS_71]
-                          PartitionCols:_col0
-                          Group By Operator [GBY_70] (rows=383314495 width=135)
-                            Output:["_col0","_col1"],aggregations:["sum(_col8)"],keys:_col1
-                            Merge Join Operator [MERGEJOIN_303] (rows=383314495 width=135)
-                              Conds:RS_66._col0=RS_67._col4(Inner),Output:["_col1","_col8"]
-                            <-Reducer 2 [SIMPLE_EDGE]
-                              SHUFFLE [RS_66]
-                                PartitionCols:_col0
-                                Merge Join Operator [MERGEJOIN_293] (rows=508200 width=1436)
-                                  Conds:RS_319._col1=RS_325._col0(Inner),Output:["_col0","_col1"]
-                                <-Map 1 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_319]
-                                    PartitionCols:_col1
-                                    Select Operator [SEL_318] (rows=462000 width=1436)
-                                      Output:["_col0","_col1"]
-                                      Filter Operator [FIL_317] (rows=462000 width=1436)
-                                        predicate:(i_item_id is not null and i_item_sk is not null)
-                                        TableScan [TS_0] (rows=462000 width=1436)
-                                          default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
-                                <-Reducer 16 [ONE_TO_ONE_EDGE] vectorized
-                                  FORWARD [RS_325]
-                                    PartitionCols:_col0
-                                    Group By Operator [GBY_324] (rows=115500 width=1436)
-                                      Output:["_col0"],keys:KEY._col0
-                                    <-Map 15 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_323]
-                                        PartitionCols:_col0
-                                        Group By Operator [GBY_322] (rows=231000 width=1436)
-                                          Output:["_col0"],keys:i_item_id
-                                          Select Operator [SEL_321] (rows=231000 width=1436)
-                                            Output:["i_item_id"]
-                                            Filter Operator [FIL_320] (rows=231000 width=1436)
-                                              predicate:((i_category = 'Children') and i_item_id is not null)
-                                              TableScan [TS_3] (rows=462000 width=1436)
-                                                default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_id","i_category"]
-                            <-Reducer 23 [SIMPLE_EDGE]
-                              SHUFFLE [RS_67]
-                                PartitionCols:_col4
-                                Select Operator [SEL_62] (rows=348467716 width=135)
-                                  Output:["_col4","_col5"]
-                                  Merge Join Operator [MERGEJOIN_298] (rows=348467716 width=135)
-                                    Conds:RS_59._col1=RS_346._col0(Inner),Output:["_col2","_col3"]
-                                  <-Map 28 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_346]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_343] (rows=20000000 width=1014)
-                                        Output:["_col0"]
-                                        Filter Operator [FIL_342] (rows=20000000 width=1014)
-                                          predicate:((ca_gmt_offset = -6) and ca_address_sk is not null)
-                                          TableScan [TS_16] (rows=40000000 width=1014)
-                                            default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_gmt_offset"]
-                                  <-Reducer 22 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_59]
+                      Top N Key Operator [TNK_386] (rows=670816147 width=108)
+                        keys:_col0,sort order:+,top n:100
+                        Group By Operator [GBY_385] (rows=191657247 width=135)
+                          Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+                        <-Reducer 9 [SIMPLE_EDGE]
+                          SHUFFLE [RS_71]
+                            PartitionCols:_col0
+                            Group By Operator [GBY_70] (rows=383314495 width=135)
+                              Output:["_col0","_col1"],aggregations:["sum(_col8)"],keys:_col1
+                              Merge Join Operator [MERGEJOIN_304] (rows=383314495 width=135)
+                                Conds:RS_66._col0=RS_67._col4(Inner),Output:["_col1","_col8"]
+                              <-Reducer 2 [SIMPLE_EDGE]
+                                SHUFFLE [RS_66]
+                                  PartitionCols:_col0
+                                  Merge Join Operator [MERGEJOIN_294] (rows=508200 width=1436)
+                                    Conds:RS_323._col1=RS_329._col0(Inner),Output:["_col0","_col1"]
+                                  <-Map 1 [SIMPLE_EDGE] vectorized
+                                    SHUFFLE [RS_323]
                                       PartitionCols:_col1
-                                      Merge Join Operator [MERGEJOIN_297] (rows=316788826 width=135)
-                                        Conds:RS_379._col0=RS_330._col0(Inner),Output:["_col1","_col2","_col3"]
-                                      <-Map 20 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_330]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_327] (rows=18262 width=1119)
-                                            Output:["_col0"]
-                                            Filter Operator [FIL_326] (rows=18262 width=1119)
-                                              predicate:((d_moy = 9) and (d_year = 1999) and d_date_sk is not null)
-                                              TableScan [TS_13] (rows=73049 width=1119)
-                                                default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                                      <-Map 32 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_379]
+                                      Select Operator [SEL_322] (rows=462000 width=1436)
+                                        Output:["_col0","_col1"]
+                                        Filter Operator [FIL_321] (rows=462000 width=1436)
+                                          predicate:(i_item_id is not null and i_item_sk is not null)
+                                          TableScan [TS_0] (rows=462000 width=1436)
+                                            default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
+                                  <-Reducer 16 [ONE_TO_ONE_EDGE] vectorized
+                                    FORWARD [RS_329]
+                                      PartitionCols:_col0
+                                      Group By Operator [GBY_328] (rows=115500 width=1436)
+                                        Output:["_col0"],keys:KEY._col0
+                                      <-Map 15 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_327]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_378] (rows=287989836 width=135)
-                                            Output:["_col0","_col1","_col2","_col3"]
-                                            Filter Operator [FIL_377] (rows=287989836 width=135)
-                                              predicate:((cs_bill_addr_sk BETWEEN DynamicValue(RS_60_customer_address_ca_address_sk_min) AND DynamicValue(RS_60_customer_address_ca_address_sk_max) and in_bloom_filter(cs_bill_addr_sk, DynamicValue(RS_60_customer_address_ca_address_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_66_item_i_item_sk_min) AND DynamicValue(RS_66_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_66_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_57_date_dim_d_date_sk_min) AND DynamicValue(RS_57_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_57_date_dim_d_date_sk_bloom_filter))) and cs_bill_addr_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
-                                              TableScan [TS_47] (rows=287989836 width=135)
-                                                default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_addr_sk","cs_item_sk","cs_ext_sales_price"]
-                                              <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_376]
-                                                  Group By Operator [GBY_375] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
-                                                    SHUFFLE [RS_240]
-                                                      Group By Operator [GBY_239] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_238] (rows=508200 width=1436)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_293]
-                                              <-Reducer 24 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_372]
-                                                  Group By Operator [GBY_371] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_338]
-                                                      Group By Operator [GBY_335] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_331] (rows=18262 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_327]
-                                              <-Reducer 30 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_374]
-                                                  Group By Operator [GBY_373] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=20000000)"]
-                                                  <-Map 28 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_354]
-                                                      Group By Operator [GBY_351] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=20000000)"]
-                                                        Select Operator [SEL_347] (rows=20000000 width=1014)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_343]
+                                          Group By Operator [GBY_326] (rows=231000 width=1436)
+                                            Output:["_col0"],keys:i_item_id
+                                            Select Operator [SEL_325] (rows=231000 width=1436)
+                                              Output:["i_item_id"]
+                                              Filter Operator [FIL_324] (rows=231000 width=1436)
+                                                predicate:((i_category = 'Children') and i_item_id is not null)
+                                                TableScan [TS_3] (rows=462000 width=1436)
+                                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_id","i_category"]
+                              <-Reducer 23 [SIMPLE_EDGE]
+                                SHUFFLE [RS_67]
+                                  PartitionCols:_col4
+                                  Select Operator [SEL_62] (rows=348467716 width=135)
+                                    Output:["_col4","_col5"]
+                                    Merge Join Operator [MERGEJOIN_299] (rows=348467716 width=135)
+                                      Conds:RS_59._col1=RS_350._col0(Inner),Output:["_col2","_col3"]
+                                    <-Map 28 [SIMPLE_EDGE] vectorized
+                                      PARTITION_ONLY_SHUFFLE [RS_350]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_347] (rows=20000000 width=1014)
+                                          Output:["_col0"]
+                                          Filter Operator [FIL_346] (rows=20000000 width=1014)
+                                            predicate:((ca_gmt_offset = -6) and ca_address_sk is not null)
+                                            TableScan [TS_16] (rows=40000000 width=1014)
+                                              default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_gmt_offset"]
+                                    <-Reducer 22 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_59]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_298] (rows=316788826 width=135)
+                                          Conds:RS_384._col0=RS_334._col0(Inner),Output:["_col1","_col2","_col3"]
+                                        <-Map 20 [SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_334]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_331] (rows=18262 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_330] (rows=18262 width=1119)
+                                                predicate:((d_moy = 9) and (d_year = 1999) and d_date_sk is not null)
+                                                TableScan [TS_13] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                        <-Map 32 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_384]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_383] (rows=287989836 width=135)
+                                              Output:["_col0","_col1","_col2","_col3"]
+                                              Filter Operator [FIL_382] (rows=287989836 width=135)
+                                                predicate:((cs_bill_addr_sk BETWEEN DynamicValue(RS_60_customer_address_ca_address_sk_min) AND DynamicValue(RS_60_customer_address_ca_address_sk_max) and in_bloom_filter(cs_bill_addr_sk, DynamicValue(RS_60_customer_address_ca_address_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_66_item_i_item_sk_min) AND DynamicValue(RS_66_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_66_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_57_date_dim_d_date_sk_min) AND DynamicValue(RS_57_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_57_date_dim_d_date_sk_bloom_filter))) and cs_bill_addr_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
+                                                TableScan [TS_47] (rows=287989836 width=135)
+                                                  default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_addr_sk","cs_item_sk","cs_ext_sales_price"]
+                                                <-Reducer 11 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_381]
+                                                    Group By Operator [GBY_380] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
+                                                      SHUFFLE [RS_241]
+                                                        Group By Operator [GBY_240] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_239] (rows=508200 width=1436)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Merge Join Operator [MERGEJOIN_294]
+                                                <-Reducer 24 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_377]
+                                                    Group By Operator [GBY_376] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_342]
+                                                        Group By Operator [GBY_339] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_335] (rows=18262 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_331]
+                                                <-Reducer 30 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_379]
+                                                    Group By Operator [GBY_378] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=20000000)"]
+                                                    <-Map 28 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_358]
+                                                        Group By Operator [GBY_355] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=20000000)"]
+                                                          Select Operator [SEL_351] (rows=20000000 width=1014)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_347]
                 <-Reducer 13 [CONTAINS] vectorized
-                  Reduce Output Operator [RS_394]
+                  Reduce Output Operator [RS_401]
                     PartitionCols:_col0
-                    Group By Operator [GBY_393] (rows=670816147 width=108)
+                    Group By Operator [GBY_400] (rows=670816147 width=108)
                       Output:["_col0","_col1"],aggregations:["sum(_col1)"],keys:_col0
-                      Group By Operator [GBY_392] (rows=95833781 width=135)
-                        Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
-                      <-Reducer 12 [SIMPLE_EDGE]
-                        SHUFFLE [RS_109]
-                          PartitionCols:_col0
-                          Group By Operator [GBY_108] (rows=191667562 width=135)
-                            Output:["_col0","_col1"],aggregations:["sum(_col8)"],keys:_col1
-                            Merge Join Operator [MERGEJOIN_304] (rows=191667562 width=135)
-                              Conds:RS_104._col0=RS_105._col3(Inner),Output:["_col1","_col8"]
-                            <-Reducer 2 [SIMPLE_EDGE]
-                              SHUFFLE [RS_104]
-                                PartitionCols:_col0
-                                 Please refer to the previous Merge Join Operator [MERGEJOIN_293]
-                            <-Reducer 26 [SIMPLE_EDGE]
-                              SHUFFLE [RS_105]
-                                PartitionCols:_col3
-                                Select Operator [SEL_100] (rows=174243235 width=135)
-                                  Output:["_col3","_col5"]
-                                  Merge Join Operator [MERGEJOIN_301] (rows=174243235 width=135)
-                                    Conds:RS_97._col2=RS_348._col0(Inner),Output:["_col1","_col3"]
-                                  <-Map 28 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_348]
-                                      PartitionCols:_col0
-                                       Please refer to the previous Select Operator [SEL_343]
-                                  <-Reducer 25 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_97]
-                                      PartitionCols:_col2
-                                      Merge Join Operator [MERGEJOIN_300] (rows=158402938 width=135)
-                                        Conds:RS_391._col0=RS_332._col0(Inner),Output:["_col1","_col2","_col3"]
-                                      <-Map 20 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_332]
-                                          PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_327]
-                                      <-Map 33 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_391]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_390] (rows=144002668 width=135)
-                                            Output:["_col0","_col1","_col2","_col3"]
-                                            Filter Operator [FIL_389] (rows=144002668 width=135)
-                                              predicate:((ws_bill_addr_sk BETWEEN DynamicValue(RS_98_customer_address_ca_address_sk_min) AND DynamicValue(RS_98_customer_address_ca_address_sk_max) and in_bloom_filter(ws_bill_addr_sk, DynamicValue(RS_98_customer_address_ca_address_sk_bloom_filter))) and (ws_item_sk BETWEEN DynamicValue(RS_104_item_i_item_sk_min) AND DynamicValue(RS_104_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_104_item_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_95_date_dim_d_date_sk_min) AND DynamicValue(RS_95_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_95_date_dim_d_date_sk_bloom_filter))) and ws_bill_addr_sk is not null and ws_item_sk is not null and ws_sold_date_sk is not null)
-                                              TableScan [TS_85] (rows=144002668 width=135)
-                                                default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_bill_addr_sk","ws_ext_sales_price"]
-                                              <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_388]
-                                                  Group By Operator [GBY_387] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
-                                                    SHUFFLE [RS_280]
-                                                      Group By Operator [GBY_279] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_278] (rows=508200 width=1436)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_293]
-                                              <-Reducer 27 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_384]
-                                                  Group By Operator [GBY_383] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_339]
-                                                      Group By Operator [GBY_336] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_333] (rows=18262 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_327]
-                                              <-Reducer 31 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_386]
-                                                  Group By Operator [GBY_385] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=20000000)"]
-                                                  <-Map 28 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_355]
-                                                      Group By Operator [GBY_352] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=20000000)"]
-                                                        Select Operator [SEL_349] (rows=20000000 width=1014)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_343]
+                      Top N Key Operator [TNK_399] (rows=670816147 width=108)
+                        keys:_col0,sort order:+,top n:100
+                        Group By Operator [GBY_398] (rows=95833781 width=135)
+                          Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+                        <-Reducer 12 [SIMPLE_EDGE]
+                          SHUFFLE [RS_109]
+                            PartitionCols:_col0
+                            Group By Operator [GBY_108] (rows=191667562 width=135)
+                              Output:["_col0","_col1"],aggregations:["sum(_col8)"],keys:_col1
+                              Merge Join Operator [MERGEJOIN_305] (rows=191667562 width=135)
+                                Conds:RS_104._col0=RS_105._col3(Inner),Output:["_col1","_col8"]
+                              <-Reducer 2 [SIMPLE_EDGE]
+                                SHUFFLE [RS_104]
+                                  PartitionCols:_col0
+                                   Please refer to the previous Merge Join Operator [MERGEJOIN_294]
+                              <-Reducer 26 [SIMPLE_EDGE]
+                                SHUFFLE [RS_105]
+                                  PartitionCols:_col3
+                                  Select Operator [SEL_100] (rows=174243235 width=135)
+                                    Output:["_col3","_col5"]
+                                    Merge Join Operator [MERGEJOIN_302] (rows=174243235 width=135)
+                                      Conds:RS_97._col2=RS_352._col0(Inner),Output:["_col1","_col3"]
+                                    <-Map 28 [SIMPLE_EDGE] vectorized
+                                      PARTITION_ONLY_SHUFFLE [RS_352]
+                                        PartitionCols:_col0
+                                         Please refer to the previous Select Operator [SEL_347]
+                                    <-Reducer 25 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_97]
+                                        PartitionCols:_col2
+                                        Merge Join Operator [MERGEJOIN_301] (rows=158402938 width=135)
+                                          Conds:RS_397._col0=RS_336._col0(Inner),Output:["_col1","_col2","_col3"]
+                                        <-Map 20 [SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_336]
+                                            PartitionCols:_col0
+                                             Please refer to the previous Select Operator [SEL_331]
+                                        <-Map 33 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_397]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_396] (rows=144002668 width=135)
+                                              Output:["_col0","_col1","_col2","_col3"]
+                                              Filter Operator [FIL_395] (rows=144002668 width=135)
+                                                predicate:((ws_bill_addr_sk BETWEEN DynamicValue(RS_98_customer_address_ca_address_sk_min) AND DynamicValue(RS_98_customer_address_ca_address_sk_max) and in_bloom_filter(ws_bill_addr_sk, DynamicValue(RS_98_customer_address_ca_address_sk_bloom_filter))) and (ws_item_sk BETWEEN DynamicValue(RS_104_item_i_item_sk_min) AND DynamicValue(RS_104_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_104_item_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_95_date_dim_d_date_sk_min) AND DynamicValue(RS_95_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_95_date_dim_d_date_sk_bloom_filter))) and ws_bill_addr_sk is not null and ws_item_sk is not null and ws_sold_date_sk is not null)
+                                                TableScan [TS_85] (rows=144002668 width=135)
+                                                  default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_bill_addr_sk","ws_ext_sales_price"]
+                                                <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_394]
+                                                    Group By Operator [GBY_393] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
+                                                      SHUFFLE [RS_281]
+                                                        Group By Operator [GBY_280] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_279] (rows=508200 width=1436)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Merge Join Operator [MERGEJOIN_294]
+                                                <-Reducer 27 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_390]
+                                                    Group By Operator [GBY_389] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_343]
+                                                        Group By Operator [GBY_340] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_337] (rows=18262 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_331]
+                                                <-Reducer 31 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_392]
+                                                    Group By Operator [GBY_391] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=20000000)"]
+                                                    <-Map 28 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_359]
+                                                        Group By Operator [GBY_356] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=20000000)"]
+                                                          Select Operator [SEL_353] (rows=20000000 width=1014)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_347]
                 <-Reducer 4 [CONTAINS] vectorized
-                  Reduce Output Operator [RS_365]
+                  Reduce Output Operator [RS_370]
                     PartitionCols:_col0
-                    Group By Operator [GBY_364] (rows=670816147 width=108)
+                    Group By Operator [GBY_369] (rows=670816147 width=108)
                       Output:["_col0","_col1"],aggregations:["sum(_col1)"],keys:_col0
-                      Group By Operator [GBY_363] (rows=383325119 width=88)
-                        Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
-                      <-Reducer 3 [SIMPLE_EDGE]
-                        SHUFFLE [RS_34]
-                          PartitionCols:_col0
-                          Group By Operator [GBY_33] (rows=766650239 width=88)
-                            Output:["_col0","_col1"],aggregations:["sum(_col8)"],keys:_col1
-                            Merge Join Operator [MERGEJOIN_302] (rows=766650239 width=88)
-                              Conds:RS_29._col0=RS_30._col3(Inner),Output:["_col1","_col8"]
-                            <-Reducer 2 [SIMPLE_EDGE]
-                              SHUFFLE [RS_29]
-                                PartitionCols:_col0
-                                 Please refer to the previous Merge Join Operator [MERGEJOIN_293]
-                            <-Reducer 19 [SIMPLE_EDGE]
-                              SHUFFLE [RS_30]
-                                PartitionCols:_col3
-                                Select Operator [SEL_25] (rows=696954748 width=88)
-                                  Output:["_col3","_col5"]
-                                  Merge Join Operator [MERGEJOIN_295] (rows=696954748 width=88)
-                                    Conds:RS_22._col2=RS_344._col0(Inner),Output:["_col1","_col3"]
-                                  <-Map 28 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_344]
-                                      PartitionCols:_col0
-                                       Please refer to the previous Select Operator [SEL_343]
-                                  <-Reducer 18 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_22]
-                                      PartitionCols:_col2
-                                      Merge Join Operator [MERGEJOIN_294] (rows=633595212 width=88)
-                                        Conds:RS_362._col0=RS_328._col0(Inner),Output:["_col1","_col2","_col3"]
-                                      <-Map 20 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_328]
-                                          PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_327]
-                                      <-Map 17 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_362]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_361] (rows=575995635 width=88)
-                                            Output:["_col0","_col1","_col2","_col3"]
-                                            Filter Operator [FIL_360] (rows=575995635 width=88)
-                                              predicate:((ss_addr_sk BETWEEN DynamicValue(RS_23_customer_address_ca_address_sk_min) AND DynamicValue(RS_23_customer_address_ca_address_sk_max) and in_bloom_filter(ss_addr_sk, DynamicValue(RS_23_customer_address_ca_address_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_29_item_i_item_sk_min) AND DynamicValue(RS_29_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_29_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_20_date_dim_d_date_sk_min) AND DynamicValue(RS_20_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_20_date_dim_d_date_sk_bloom_filter))) and ss_addr_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null)
-                                              TableScan [TS_10] (rows=575995635 width=88)
-                                                default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_addr_sk","ss_ext_sales_price"]
-                                              <-Reducer 21 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_341]
-                                                  Group By Operator [GBY_340] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_337]
-                                                      Group By Operator [GBY_334] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_329] (rows=18262 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_327]
-                                              <-Reducer 29 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_357]
-                                                  Group By Operator [GBY_356] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=20000000)"]
-                                                  <-Map 28 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_353]
-                                                      Group By Operator [GBY_350] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=20000000)"]
-                                                        Select Operator [SEL_345] (rows=20000000 width=1014)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_343]
-                                              <-Reducer 8 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_359]
-                                                  Group By Operator [GBY_358] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
-                                                    SHUFFLE [RS_200]
-                                                      Group By Operator [GBY_199] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_198] (rows=508200 width=1436)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_293]
+                      Top N Key Operator [TNK_368] (rows=670816147 width=108)
+                        keys:_col0,sort order:+,top n:100
+                        Group By Operator [GBY_367] (rows=383325119 width=88)
+                          Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+                        <-Reducer 3 [SIMPLE_EDGE]
+                          SHUFFLE [RS_34]
+                            PartitionCols:_col0
+                            Group By Operator [GBY_33] (rows=766650239 width=88)
+                              Output:["_col0","_col1"],aggregations:["sum(_col8)"],keys:_col1
+                              Merge Join Operator [MERGEJOIN_303] (rows=766650239 width=88)
+                                Conds:RS_29._col0=RS_30._col3(Inner),Output:["_col1","_col8"]
+                              <-Reducer 2 [SIMPLE_EDGE]
+                                SHUFFLE [RS_29]
+                                  PartitionCols:_col0
+                                   Please refer to the previous Merge Join Operator [MERGEJOIN_294]
+                              <-Reducer 19 [SIMPLE_EDGE]
+                                SHUFFLE [RS_30]
+                                  PartitionCols:_col3
+                                  Select Operator [SEL_25] (rows=696954748 width=88)
+                                    Output:["_col3","_col5"]
+                                    Merge Join Operator [MERGEJOIN_296] (rows=696954748 width=88)
+                                      Conds:RS_22._col2=RS_348._col0(Inner),Output:["_col1","_col3"]
+                                    <-Map 28 [SIMPLE_EDGE] vectorized
+                                      PARTITION_ONLY_SHUFFLE [RS_348]
+                                        PartitionCols:_col0
+                                         Please refer to the previous Select Operator [SEL_347]
+                                    <-Reducer 18 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_22]
+                                        PartitionCols:_col2
+                                        Merge Join Operator [MERGEJOIN_295] (rows=633595212 width=88)
+                                          Conds:RS_366._col0=RS_332._col0(Inner),Output:["_col1","_col2","_col3"]
+                                        <-Map 20 [SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_332]
+                                            PartitionCols:_col0
+                                             Please refer to the previous Select Operator [SEL_331]
+                                        <-Map 17 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_366]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_365] (rows=575995635 width=88)
+                                              Output:["_col0","_col1","_col2","_col3"]
+                                              Filter Operator [FIL_364] (rows=575995635 width=88)
+                                                predicate:((ss_addr_sk BETWEEN DynamicValue(RS_23_customer_address_ca_address_sk_min) AND DynamicValue(RS_23_customer_address_ca_address_sk_max) and in_bloom_filter(ss_addr_sk, DynamicValue(RS_23_customer_address_ca_address_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_29_item_i_item_sk_min) AND DynamicValue(RS_29_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_29_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_20_date_dim_d_date_sk_min) AND DynamicValue(RS_20_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_20_date_dim_d_date_sk_bloom_filter))) and ss_addr_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null)
+                                                TableScan [TS_10] (rows=575995635 width=88)
+                                                  default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_addr_sk","ss_ext_sales_price"]
+                                                <-Reducer 21 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_345]
+                                                    Group By Operator [GBY_344] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_341]
+                                                        Group By Operator [GBY_338] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_333] (rows=18262 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_331]
+                                                <-Reducer 29 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_361]
+                                                    Group By Operator [GBY_360] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=20000000)"]
+                                                    <-Map 28 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_357]
+                                                        Group By Operator [GBY_354] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=20000000)"]
+                                                          Select Operator [SEL_349] (rows=20000000 width=1014)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_347]
+                                                <-Reducer 8 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_363]
+                                                    Group By Operator [GBY_362] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
+                                                      SHUFFLE [RS_201]
+                                                        Group By Operator [GBY_200] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_199] (rows=508200 width=1436)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Merge Join Operator [MERGEJOIN_294]
 


[05/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query69.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query69.q.out b/ql/src/test/results/clientpositive/perf/tez/query69.q.out
index aad5b81..738508a 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query69.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query69.q.out
@@ -117,197 +117,199 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_232]
-        Limit [LIM_231] (rows=100 width=88)
+      File Output Operator [FS_233]
+        Limit [LIM_232] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_230] (rows=191662559 width=88)
+          Select Operator [SEL_231] (rows=191662559 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_229]
-              Select Operator [SEL_228] (rows=191662559 width=88)
+            SHUFFLE [RS_230]
+              Select Operator [SEL_229] (rows=191662559 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col6"]
-                Group By Operator [GBY_227] (rows=191662559 width=88)
+                Group By Operator [GBY_228] (rows=191662559 width=88)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
                 <-Reducer 5 [SIMPLE_EDGE]
                   SHUFFLE [RS_67]
                     PartitionCols:_col0, _col1, _col2, _col3, _col4
                     Group By Operator [GBY_66] (rows=383325119 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["count()"],keys:_col6, _col7, _col8, _col9, _col10
-                      Select Operator [SEL_65] (rows=383325119 width=88)
-                        Output:["_col6","_col7","_col8","_col9","_col10"]
-                        Filter Operator [FIL_64] (rows=383325119 width=88)
-                          predicate:_col14 is null
-                          Merge Join Operator [MERGEJOIN_180] (rows=766650239 width=88)
-                            Conds:RS_61._col0=RS_226._col0(Left Outer),Output:["_col6","_col7","_col8","_col9","_col10","_col14"]
-                          <-Reducer 19 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_226]
-                              PartitionCols:_col0
-                              Select Operator [SEL_225] (rows=158394413 width=135)
-                                Output:["_col0","_col1"]
-                                Group By Operator [GBY_224] (rows=158394413 width=135)
-                                  Output:["_col0"],keys:KEY._col0
-                                <-Reducer 18 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_58]
-                                    PartitionCols:_col0
-                                    Group By Operator [GBY_57] (rows=316788826 width=135)
-                                      Output:["_col0"],keys:_col1
-                                      Merge Join Operator [MERGEJOIN_178] (rows=316788826 width=135)
-                                        Conds:RS_223._col0=RS_196._col0(Inner),Output:["_col1"]
-                                      <-Map 13 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_196]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_191] (rows=4058 width=1119)
-                                            Output:["_col0"]
-                                            Filter Operator [FIL_190] (rows=4058 width=1119)
-                                              predicate:((d_year = 1999) and d_date_sk is not null and d_moy BETWEEN 1 AND 3)
-                                              TableScan [TS_12] (rows=73049 width=1119)
-                                                default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                                      <-Map 22 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_223]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_222] (rows=287989836 width=135)
-                                            Output:["_col0","_col1"]
-                                            Filter Operator [FIL_221] (rows=287989836 width=135)
-                                              predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_54_date_dim_d_date_sk_min) AND DynamicValue(RS_54_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_54_date_dim_d_date_sk_bloom_filter))) and cs_ship_customer_sk is not null and cs_sold_date_sk is not null)
-                                              TableScan [TS_47] (rows=287989836 width=135)
-                                                default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_ship_customer_sk"]
-                                              <-Reducer 20 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_220]
-                                                  Group By Operator [GBY_219] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_203]
-                                                      Group By Operator [GBY_200] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_197] (rows=4058 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_191]
-                          <-Reducer 4 [ONE_TO_ONE_EDGE]
-                            FORWARD [RS_61]
-                              PartitionCols:_col0
-                              Select Operator [SEL_46] (rows=696954748 width=88)
-                                Output:["_col0","_col6","_col7","_col8","_col9","_col10"]
-                                Filter Operator [FIL_45] (rows=696954748 width=88)
-                                  predicate:_col12 is null
-                                  Merge Join Operator [MERGEJOIN_179] (rows=1393909496 width=88)
-                                    Conds:RS_41._col0=RS_42._col0(Left Semi),RS_41._col0=RS_218._col0(Left Outer),Output:["_col0","_col6","_col7","_col8","_col9","_col10","_col12"]
-                                  <-Reducer 3 [SIMPLE_EDGE]
-                                    PARTITION_ONLY_SHUFFLE [RS_41]
-                                      PartitionCols:_col0
-                                      Merge Join Operator [MERGEJOIN_175] (rows=96800003 width=860)
-                                        Conds:RS_36._col1=RS_189._col0(Inner),Output:["_col0","_col6","_col7","_col8","_col9","_col10"]
-                                      <-Map 10 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_189]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_188] (rows=1861800 width=385)
-                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                            Filter Operator [FIL_187] (rows=1861800 width=385)
-                                              predicate:cd_demo_sk is not null
-                                              TableScan [TS_6] (rows=1861800 width=385)
-                                                default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status","cd_purchase_estimate","cd_credit_rating"]
-                                      <-Reducer 2 [SIMPLE_EDGE]
-                                        SHUFFLE [RS_36]
-                                          PartitionCols:_col1
-                                          Merge Join Operator [MERGEJOIN_174] (rows=88000001 width=860)
-                                            Conds:RS_183._col2=RS_186._col0(Inner),Output:["_col0","_col1"]
-                                          <-Map 1 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_183]
-                                              PartitionCols:_col2
-                                              Select Operator [SEL_182] (rows=80000000 width=860)
-                                                Output:["_col0","_col1","_col2"]
-                                                Filter Operator [FIL_181] (rows=80000000 width=860)
-                                                  predicate:(c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_customer_sk is not null)
-                                                  TableScan [TS_0] (rows=80000000 width=860)
-                                                    default@customer,c,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_addr_sk"]
-                                          <-Map 9 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_186]
-                                              PartitionCols:_col0
-                                              Select Operator [SEL_185] (rows=20000000 width=1014)
-                                                Output:["_col0"]
-                                                Filter Operator [FIL_184] (rows=20000000 width=1014)
-                                                  predicate:((ca_state) IN ('CO', 'IL', 'MN') and ca_address_sk is not null)
-                                                  TableScan [TS_3] (rows=40000000 width=1014)
-                                                    default@customer_address,ca,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
-                                  <-Reducer 12 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_42]
-                                      PartitionCols:_col0
-                                      Group By Operator [GBY_40] (rows=633595212 width=88)
-                                        Output:["_col0"],keys:_col0
-                                        Select Operator [SEL_18] (rows=633595212 width=88)
-                                          Output:["_col0"]
-                                          Merge Join Operator [MERGEJOIN_176] (rows=633595212 width=88)
-                                            Conds:RS_210._col0=RS_192._col0(Inner),Output:["_col1"]
-                                          <-Map 13 [SIMPLE_EDGE] vectorized
-                                            PARTITION_ONLY_SHUFFLE [RS_192]
-                                              PartitionCols:_col0
-                                               Please refer to the previous Select Operator [SEL_191]
-                                          <-Map 11 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_210]
-                                              PartitionCols:_col0
-                                              Select Operator [SEL_209] (rows=575995635 width=88)
-                                                Output:["_col0","_col1"]
-                                                Filter Operator [FIL_208] (rows=575995635 width=88)
-                                                  predicate:((ss_customer_sk BETWEEN DynamicValue(RS_41_c_c_customer_sk_min) AND DynamicValue(RS_41_c_c_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_41_c_c_customer_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_16_date_dim_d_date_sk_min) AND DynamicValue(RS_16_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_16_date_dim_d_date_sk_bloom_filter))) and ss_customer_sk is not null and ss_sold_date_sk is not null)
-                                                  TableScan [TS_9] (rows=575995635 width=88)
-                                                    default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk"]
-                                                  <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                                    BROADCAST [RS_205]
-                                                      Group By Operator [GBY_204] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                      <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                        PARTITION_ONLY_SHUFFLE [RS_201]
-                                                          Group By Operator [GBY_198] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                            Select Operator [SEL_193] (rows=4058 width=1119)
-                                                              Output:["_col0"]
-                                                               Please refer to the previous Select Operator [SEL_191]
-                                                  <-Reducer 8 [BROADCAST_EDGE] vectorized
-                                                    BROADCAST [RS_207]
-                                                      Group By Operator [GBY_206] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=96800000)"]
-                                                      <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
-                                                        PARTITION_ONLY_SHUFFLE [RS_137]
-                                                          Group By Operator [GBY_136] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=96800000)"]
-                                                            Select Operator [SEL_135] (rows=96800003 width=860)
-                                                              Output:["_col0"]
-                                                               Please refer to the previous Merge Join Operator [MERGEJOIN_175]
-                                  <-Reducer 16 [ONE_TO_ONE_EDGE] vectorized
-                                    FORWARD [RS_218]
+                      Top N Key Operator [TNK_105] (rows=383325119 width=88)
+                        keys:_col6, _col7, _col8, _col9, _col10,sort order:+++++,top n:100
+                        Select Operator [SEL_65] (rows=383325119 width=88)
+                          Output:["_col6","_col7","_col8","_col9","_col10"]
+                          Filter Operator [FIL_64] (rows=383325119 width=88)
+                            predicate:_col14 is null
+                            Merge Join Operator [MERGEJOIN_181] (rows=766650239 width=88)
+                              Conds:RS_61._col0=RS_227._col0(Left Outer),Output:["_col6","_col7","_col8","_col9","_col10","_col14"]
+                            <-Reducer 19 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_227]
+                                PartitionCols:_col0
+                                Select Operator [SEL_226] (rows=158394413 width=135)
+                                  Output:["_col0","_col1"]
+                                  Group By Operator [GBY_225] (rows=158394413 width=135)
+                                    Output:["_col0"],keys:KEY._col0
+                                  <-Reducer 18 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_58]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_217] (rows=79201469 width=135)
-                                        Output:["_col0","_col1"]
-                                        Group By Operator [GBY_216] (rows=79201469 width=135)
-                                          Output:["_col0"],keys:KEY._col0
-                                        <-Reducer 15 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_30]
+                                      Group By Operator [GBY_57] (rows=316788826 width=135)
+                                        Output:["_col0"],keys:_col1
+                                        Merge Join Operator [MERGEJOIN_179] (rows=316788826 width=135)
+                                          Conds:RS_224._col0=RS_197._col0(Inner),Output:["_col1"]
+                                        <-Map 13 [SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_197]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_192] (rows=4058 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_191] (rows=4058 width=1119)
+                                                predicate:((d_year = 1999) and d_date_sk is not null and d_moy BETWEEN 1 AND 3)
+                                                TableScan [TS_12] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                        <-Map 22 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_224]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_223] (rows=287989836 width=135)
+                                              Output:["_col0","_col1"]
+                                              Filter Operator [FIL_222] (rows=287989836 width=135)
+                                                predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_54_date_dim_d_date_sk_min) AND DynamicValue(RS_54_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_54_date_dim_d_date_sk_bloom_filter))) and cs_ship_customer_sk is not null and cs_sold_date_sk is not null)
+                                                TableScan [TS_47] (rows=287989836 width=135)
+                                                  default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_ship_customer_sk"]
+                                                <-Reducer 20 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_221]
+                                                    Group By Operator [GBY_220] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_204]
+                                                        Group By Operator [GBY_201] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_198] (rows=4058 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_192]
+                            <-Reducer 4 [ONE_TO_ONE_EDGE]
+                              FORWARD [RS_61]
+                                PartitionCols:_col0
+                                Select Operator [SEL_46] (rows=696954748 width=88)
+                                  Output:["_col0","_col6","_col7","_col8","_col9","_col10"]
+                                  Filter Operator [FIL_45] (rows=696954748 width=88)
+                                    predicate:_col12 is null
+                                    Merge Join Operator [MERGEJOIN_180] (rows=1393909496 width=88)
+                                      Conds:RS_41._col0=RS_42._col0(Left Semi),RS_41._col0=RS_219._col0(Left Outer),Output:["_col0","_col6","_col7","_col8","_col9","_col10","_col12"]
+                                    <-Reducer 3 [SIMPLE_EDGE]
+                                      PARTITION_ONLY_SHUFFLE [RS_41]
+                                        PartitionCols:_col0
+                                        Merge Join Operator [MERGEJOIN_176] (rows=96800003 width=860)
+                                          Conds:RS_36._col1=RS_190._col0(Inner),Output:["_col0","_col6","_col7","_col8","_col9","_col10"]
+                                        <-Map 10 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_190]
                                             PartitionCols:_col0
-                                            Group By Operator [GBY_29] (rows=158402938 width=135)
-                                              Output:["_col0"],keys:_col1
-                                              Merge Join Operator [MERGEJOIN_177] (rows=158402938 width=135)
-                                                Conds:RS_215._col0=RS_194._col0(Inner),Output:["_col1"]
-                                              <-Map 13 [SIMPLE_EDGE] vectorized
-                                                PARTITION_ONLY_SHUFFLE [RS_194]
-                                                  PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_191]
-                                              <-Map 21 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_215]
-                                                  PartitionCols:_col0
-                                                  Select Operator [SEL_214] (rows=144002668 width=135)
-                                                    Output:["_col0","_col1"]
-                                                    Filter Operator [FIL_213] (rows=144002668 width=135)
-                                                      predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_26_date_dim_d_date_sk_min) AND DynamicValue(RS_26_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_26_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_sold_date_sk is not null)
-                                                      TableScan [TS_19] (rows=144002668 width=135)
-                                                        default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_customer_sk"]
-                                                      <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_212]
-                                                          Group By Operator [GBY_211] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            PARTITION_ONLY_SHUFFLE [RS_202]
-                                                              Group By Operator [GBY_199] (rows=1 width=12)
-                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_195] (rows=4058 width=1119)
-                                                                  Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_191]
+                                            Select Operator [SEL_189] (rows=1861800 width=385)
+                                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                              Filter Operator [FIL_188] (rows=1861800 width=385)
+                                                predicate:cd_demo_sk is not null
+                                                TableScan [TS_6] (rows=1861800 width=385)
+                                                  default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status","cd_purchase_estimate","cd_credit_rating"]
+                                        <-Reducer 2 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_36]
+                                            PartitionCols:_col1
+                                            Merge Join Operator [MERGEJOIN_175] (rows=88000001 width=860)
+                                              Conds:RS_184._col2=RS_187._col0(Inner),Output:["_col0","_col1"]
+                                            <-Map 1 [SIMPLE_EDGE] vectorized
+                                              SHUFFLE [RS_184]
+                                                PartitionCols:_col2
+                                                Select Operator [SEL_183] (rows=80000000 width=860)
+                                                  Output:["_col0","_col1","_col2"]
+                                                  Filter Operator [FIL_182] (rows=80000000 width=860)
+                                                    predicate:(c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_customer_sk is not null)
+                                                    TableScan [TS_0] (rows=80000000 width=860)
+                                                      default@customer,c,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_addr_sk"]
+                                            <-Map 9 [SIMPLE_EDGE] vectorized
+                                              SHUFFLE [RS_187]
+                                                PartitionCols:_col0
+                                                Select Operator [SEL_186] (rows=20000000 width=1014)
+                                                  Output:["_col0"]
+                                                  Filter Operator [FIL_185] (rows=20000000 width=1014)
+                                                    predicate:((ca_state) IN ('CO', 'IL', 'MN') and ca_address_sk is not null)
+                                                    TableScan [TS_3] (rows=40000000 width=1014)
+                                                      default@customer_address,ca,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
+                                    <-Reducer 12 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_42]
+                                        PartitionCols:_col0
+                                        Group By Operator [GBY_40] (rows=633595212 width=88)
+                                          Output:["_col0"],keys:_col0
+                                          Select Operator [SEL_18] (rows=633595212 width=88)
+                                            Output:["_col0"]
+                                            Merge Join Operator [MERGEJOIN_177] (rows=633595212 width=88)
+                                              Conds:RS_211._col0=RS_193._col0(Inner),Output:["_col1"]
+                                            <-Map 13 [SIMPLE_EDGE] vectorized
+                                              PARTITION_ONLY_SHUFFLE [RS_193]
+                                                PartitionCols:_col0
+                                                 Please refer to the previous Select Operator [SEL_192]
+                                            <-Map 11 [SIMPLE_EDGE] vectorized
+                                              SHUFFLE [RS_211]
+                                                PartitionCols:_col0
+                                                Select Operator [SEL_210] (rows=575995635 width=88)
+                                                  Output:["_col0","_col1"]
+                                                  Filter Operator [FIL_209] (rows=575995635 width=88)
+                                                    predicate:((ss_customer_sk BETWEEN DynamicValue(RS_41_c_c_customer_sk_min) AND DynamicValue(RS_41_c_c_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_41_c_c_customer_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_16_date_dim_d_date_sk_min) AND DynamicValue(RS_16_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_16_date_dim_d_date_sk_bloom_filter))) and ss_customer_sk is not null and ss_sold_date_sk is not null)
+                                                    TableScan [TS_9] (rows=575995635 width=88)
+                                                      default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk"]
+                                                    <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                                      BROADCAST [RS_206]
+                                                        Group By Operator [GBY_205] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                        <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                          PARTITION_ONLY_SHUFFLE [RS_202]
+                                                            Group By Operator [GBY_199] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                              Select Operator [SEL_194] (rows=4058 width=1119)
+                                                                Output:["_col0"]
+                                                                 Please refer to the previous Select Operator [SEL_192]
+                                                    <-Reducer 8 [BROADCAST_EDGE] vectorized
+                                                      BROADCAST [RS_208]
+                                                        Group By Operator [GBY_207] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=96800000)"]
+                                                        <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
+                                                          PARTITION_ONLY_SHUFFLE [RS_138]
+                                                            Group By Operator [GBY_137] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=96800000)"]
+                                                              Select Operator [SEL_136] (rows=96800003 width=860)
+                                                                Output:["_col0"]
+                                                                 Please refer to the previous Merge Join Operator [MERGEJOIN_176]
+                                    <-Reducer 16 [ONE_TO_ONE_EDGE] vectorized
+                                      FORWARD [RS_219]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_218] (rows=79201469 width=135)
+                                          Output:["_col0","_col1"]
+                                          Group By Operator [GBY_217] (rows=79201469 width=135)
+                                            Output:["_col0"],keys:KEY._col0
+                                          <-Reducer 15 [SIMPLE_EDGE]
+                                            SHUFFLE [RS_30]
+                                              PartitionCols:_col0
+                                              Group By Operator [GBY_29] (rows=158402938 width=135)
+                                                Output:["_col0"],keys:_col1
+                                                Merge Join Operator [MERGEJOIN_178] (rows=158402938 width=135)
+                                                  Conds:RS_216._col0=RS_195._col0(Inner),Output:["_col1"]
+                                                <-Map 13 [SIMPLE_EDGE] vectorized
+                                                  PARTITION_ONLY_SHUFFLE [RS_195]
+                                                    PartitionCols:_col0
+                                                     Please refer to the previous Select Operator [SEL_192]
+                                                <-Map 21 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_216]
+                                                    PartitionCols:_col0
+                                                    Select Operator [SEL_215] (rows=144002668 width=135)
+                                                      Output:["_col0","_col1"]
+                                                      Filter Operator [FIL_214] (rows=144002668 width=135)
+                                                        predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_26_date_dim_d_date_sk_min) AND DynamicValue(RS_26_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_26_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_sold_date_sk is not null)
+                                                        TableScan [TS_19] (rows=144002668 width=135)
+                                                          default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_customer_sk"]
+                                                        <-Reducer 17 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_213]
+                                                            Group By Operator [GBY_212] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              PARTITION_ONLY_SHUFFLE [RS_203]
+                                                                Group By Operator [GBY_200] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_196] (rows=4058 width=1119)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_192]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query7.q.out b/ql/src/test/results/clientpositive/perf/tez/query7.q.out
index 2bb39dd..c78e1e6 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query7.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query7.q.out
@@ -58,126 +58,128 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_139]
-        Limit [LIM_138] (rows=100 width=88)
+      File Output Operator [FS_140]
+        Limit [LIM_139] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_137] (rows=421657640 width=88)
+          Select Operator [SEL_138] (rows=421657640 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_136]
-              Select Operator [SEL_135] (rows=421657640 width=88)
+            SHUFFLE [RS_137]
+              Select Operator [SEL_136] (rows=421657640 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4"]
-                Group By Operator [GBY_134] (rows=421657640 width=88)
+                Group By Operator [GBY_135] (rows=421657640 width=88)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)","sum(VALUE._col2)","count(VALUE._col3)","sum(VALUE._col4)","count(VALUE._col5)","sum(VALUE._col6)","count(VALUE._col7)"],keys:KEY._col0
                 <-Reducer 5 [SIMPLE_EDGE]
                   SHUFFLE [RS_29]
                     PartitionCols:_col0
                     Group By Operator [GBY_28] (rows=843315281 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["sum(_col4)","count(_col4)","sum(_col5)","count(_col5)","sum(_col7)","count(_col7)","sum(_col6)","count(_col6)"],keys:_col18
-                      Merge Join Operator [MERGEJOIN_98] (rows=843315281 width=88)
-                        Conds:RS_24._col1=RS_125._col0(Inner),Output:["_col4","_col5","_col6","_col7","_col18"]
-                      <-Map 14 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_125]
-                          PartitionCols:_col0
-                          Select Operator [SEL_124] (rows=462000 width=1436)
-                            Output:["_col0","_col1"]
-                            Filter Operator [FIL_123] (rows=462000 width=1436)
-                              predicate:i_item_sk is not null
-                              TableScan [TS_12] (rows=462000 width=1436)
-                                default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
-                      <-Reducer 4 [SIMPLE_EDGE]
-                        SHUFFLE [RS_24]
-                          PartitionCols:_col1
-                          Merge Join Operator [MERGEJOIN_97] (rows=766650239 width=88)
-                            Conds:RS_21._col3=RS_117._col0(Inner),Output:["_col1","_col4","_col5","_col6","_col7"]
-                          <-Map 12 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_117]
-                              PartitionCols:_col0
-                              Select Operator [SEL_116] (rows=2300 width=1179)
-                                Output:["_col0"]
-                                Filter Operator [FIL_115] (rows=2300 width=1179)
-                                  predicate:(((p_channel_email = 'N') or (p_channel_event = 'N')) and p_promo_sk is not null)
-                                  TableScan [TS_9] (rows=2300 width=1179)
-                                    default@promotion,promotion,Tbl:COMPLETE,Col:NONE,Output:["p_promo_sk","p_channel_email","p_channel_event"]
-                          <-Reducer 3 [SIMPLE_EDGE]
-                            SHUFFLE [RS_21]
-                              PartitionCols:_col3
-                              Merge Join Operator [MERGEJOIN_96] (rows=696954748 width=88)
-                                Conds:RS_18._col0=RS_109._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7"]
-                              <-Map 10 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_109]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_108] (rows=36524 width=1119)
-                                    Output:["_col0"]
-                                    Filter Operator [FIL_107] (rows=36524 width=1119)
-                                      predicate:((d_year = 1998) and d_date_sk is not null)
-                                      TableScan [TS_6] (rows=73049 width=1119)
-                                        default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                              <-Reducer 2 [SIMPLE_EDGE]
-                                SHUFFLE [RS_18]
-                                  PartitionCols:_col0
-                                  Merge Join Operator [MERGEJOIN_95] (rows=633595212 width=88)
-                                    Conds:RS_133._col2=RS_101._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col5","_col6","_col7"]
-                                  <-Map 8 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_101]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_100] (rows=232725 width=385)
-                                        Output:["_col0"]
-                                        Filter Operator [FIL_99] (rows=232725 width=385)
-                                          predicate:((cd_education_status = 'Primary') and (cd_gender = 'F') and (cd_marital_status = 'W') and cd_demo_sk is not null)
-                                          TableScan [TS_3] (rows=1861800 width=385)
-                                            default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
-                                  <-Map 1 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_133]
-                                      PartitionCols:_col2
-                                      Select Operator [SEL_132] (rows=575995635 width=88)
-                                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-                                        Filter Operator [FIL_131] (rows=575995635 width=88)
-                                          predicate:((ss_cdemo_sk BETWEEN DynamicValue(RS_16_customer_demographics_cd_demo_sk_min) AND DynamicValue(RS_16_customer_demographics_cd_demo_sk_max) and in_bloom_filter(ss_cdemo_sk, DynamicValue(RS_16_customer_demographics_cd_demo_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_25_item_i_item_sk_min) AND DynamicValue(RS_25_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_25_item_i_item_sk_bloom_filter))) and (ss_promo_sk BETWEEN DynamicValue(RS_22_promotion_p_promo_sk_min) AND DynamicValue(RS_22_promotion_p_promo_sk_max) and in_bloom_filter(ss_promo_sk, DynamicValue(RS_22_promotion_p_promo_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and ss_cdemo_sk is not null and ss_item_sk is not null and ss_promo_sk is not null and ss_sold_dat
 e_sk is not null)
-                                          TableScan [TS_0] (rows=575995635 width=88)
-                                            default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_cdemo_sk","ss_promo_sk","ss_quantity","ss_list_price","ss_sales_price","ss_coupon_amt"]
-                                          <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_114]
-                                              Group By Operator [GBY_113] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_112]
-                                                  Group By Operator [GBY_111] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_110] (rows=36524 width=1119)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_108]
-                                          <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_122]
-                                              Group By Operator [GBY_121] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_120]
-                                                  Group By Operator [GBY_119] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_118] (rows=2300 width=1179)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_116]
-                                          <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_130]
-                                              Group By Operator [GBY_129] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_128]
-                                                  Group By Operator [GBY_127] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_126] (rows=462000 width=1436)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_124]
-                                          <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_106]
-                                              Group By Operator [GBY_105] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                PARTITION_ONLY_SHUFFLE [RS_104]
-                                                  Group By Operator [GBY_103] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_102] (rows=232725 width=385)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_100]
+                      Top N Key Operator [TNK_55] (rows=843315281 width=88)
+                        keys:_col18,sort order:+,top n:100
+                        Merge Join Operator [MERGEJOIN_99] (rows=843315281 width=88)
+                          Conds:RS_24._col1=RS_126._col0(Inner),Output:["_col4","_col5","_col6","_col7","_col18"]
+                        <-Map 14 [SIMPLE_EDGE] vectorized
+                          SHUFFLE [RS_126]
+                            PartitionCols:_col0
+                            Select Operator [SEL_125] (rows=462000 width=1436)
+                              Output:["_col0","_col1"]
+                              Filter Operator [FIL_124] (rows=462000 width=1436)
+                                predicate:i_item_sk is not null
+                                TableScan [TS_12] (rows=462000 width=1436)
+                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
+                        <-Reducer 4 [SIMPLE_EDGE]
+                          SHUFFLE [RS_24]
+                            PartitionCols:_col1
+                            Merge Join Operator [MERGEJOIN_98] (rows=766650239 width=88)
+                              Conds:RS_21._col3=RS_118._col0(Inner),Output:["_col1","_col4","_col5","_col6","_col7"]
+                            <-Map 12 [SIMPLE_EDGE] vectorized
+                              SHUFFLE [RS_118]
+                                PartitionCols:_col0
+                                Select Operator [SEL_117] (rows=2300 width=1179)
+                                  Output:["_col0"]
+                                  Filter Operator [FIL_116] (rows=2300 width=1179)
+                                    predicate:(((p_channel_email = 'N') or (p_channel_event = 'N')) and p_promo_sk is not null)
+                                    TableScan [TS_9] (rows=2300 width=1179)
+                                      default@promotion,promotion,Tbl:COMPLETE,Col:NONE,Output:["p_promo_sk","p_channel_email","p_channel_event"]
+                            <-Reducer 3 [SIMPLE_EDGE]
+                              SHUFFLE [RS_21]
+                                PartitionCols:_col3
+                                Merge Join Operator [MERGEJOIN_97] (rows=696954748 width=88)
+                                  Conds:RS_18._col0=RS_110._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7"]
+                                <-Map 10 [SIMPLE_EDGE] vectorized
+                                  SHUFFLE [RS_110]
+                                    PartitionCols:_col0
+                                    Select Operator [SEL_109] (rows=36524 width=1119)
+                                      Output:["_col0"]
+                                      Filter Operator [FIL_108] (rows=36524 width=1119)
+                                        predicate:((d_year = 1998) and d_date_sk is not null)
+                                        TableScan [TS_6] (rows=73049 width=1119)
+                                          default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                                <-Reducer 2 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_18]
+                                    PartitionCols:_col0
+                                    Merge Join Operator [MERGEJOIN_96] (rows=633595212 width=88)
+                                      Conds:RS_134._col2=RS_102._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col5","_col6","_col7"]
+                                    <-Map 8 [SIMPLE_EDGE] vectorized
+                                      PARTITION_ONLY_SHUFFLE [RS_102]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_101] (rows=232725 width=385)
+                                          Output:["_col0"]
+                                          Filter Operator [FIL_100] (rows=232725 width=385)
+                                            predicate:((cd_education_status = 'Primary') and (cd_gender = 'F') and (cd_marital_status = 'W') and cd_demo_sk is not null)
+                                            TableScan [TS_3] (rows=1861800 width=385)
+                                              default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
+                                    <-Map 1 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_134]
+                                        PartitionCols:_col2
+                                        Select Operator [SEL_133] (rows=575995635 width=88)
+                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
+                                          Filter Operator [FIL_132] (rows=575995635 width=88)
+                                            predicate:((ss_cdemo_sk BETWEEN DynamicValue(RS_16_customer_demographics_cd_demo_sk_min) AND DynamicValue(RS_16_customer_demographics_cd_demo_sk_max) and in_bloom_filter(ss_cdemo_sk, DynamicValue(RS_16_customer_demographics_cd_demo_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_25_item_i_item_sk_min) AND DynamicValue(RS_25_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_25_item_i_item_sk_bloom_filter))) and (ss_promo_sk BETWEEN DynamicValue(RS_22_promotion_p_promo_sk_min) AND DynamicValue(RS_22_promotion_p_promo_sk_max) and in_bloom_filter(ss_promo_sk, DynamicValue(RS_22_promotion_p_promo_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and ss_cdemo_sk is not null and ss_item_sk is not null and ss_promo_sk is not null and ss_sold_d
 ate_sk is not null)
+                                            TableScan [TS_0] (rows=575995635 width=88)
+                                              default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_cdemo_sk","ss_promo_sk","ss_quantity","ss_list_price","ss_sales_price","ss_coupon_amt"]
+                                            <-Reducer 11 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_115]
+                                                Group By Operator [GBY_114] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_113]
+                                                    Group By Operator [GBY_112] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_111] (rows=36524 width=1119)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_109]
+                                            <-Reducer 13 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_123]
+                                                Group By Operator [GBY_122] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_121]
+                                                    Group By Operator [GBY_120] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_119] (rows=2300 width=1179)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_117]
+                                            <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_131]
+                                                Group By Operator [GBY_130] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_129]
+                                                    Group By Operator [GBY_128] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_127] (rows=462000 width=1436)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_125]
+                                            <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_107]
+                                                Group By Operator [GBY_106] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  PARTITION_ONLY_SHUFFLE [RS_105]
+                                                    Group By Operator [GBY_104] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_103] (rows=232725 width=385)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_101]
 


[17/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_2.q.out b/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
index 3675d8d..5f5f5f6 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
@@ -303,124 +303,126 @@ Stage-0
     limit:100
     Stage-1
       Reducer 5 vectorized, llap
-      File Output Operator [FS_216]
-        Limit [LIM_215] (rows=12 width=285)
+      File Output Operator [FS_217]
+        Limit [LIM_216] (rows=12 width=285)
           Number of rows:100
-          Select Operator [SEL_214] (rows=12 width=285)
+          Select Operator [SEL_215] (rows=12 width=285)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
           <-Reducer 4 [SIMPLE_EDGE] vectorized, llap
-            SHUFFLE [RS_213]
-              Group By Operator [GBY_212] (rows=12 width=285)
+            SHUFFLE [RS_214]
+              Group By Operator [GBY_213] (rows=12 width=285)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2
               <-Reducer 3 [SIMPLE_EDGE] llap
                 SHUFFLE [RS_49]
                   PartitionCols:_col0, _col1, _col2
                   Group By Operator [GBY_48] (rows=12 width=285)
                     Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["count(_col11)","count(_col21)","count(_col3)"],keys:_col10, _col20, _col2
-                    Merge Join Operator [MERGEJOIN_187] (rows=9275 width=534)
-                      Conds:RS_44._col1, _col3=RS_45._col15, _col17(Inner),Output:["_col2","_col3","_col10","_col11","_col20","_col21"]
-                    <-Reducer 10 [SIMPLE_EDGE] llap
-                      SHUFFLE [RS_45]
-                        PartitionCols:_col15, _col17
-                        Select Operator [SEL_40] (rows=420 width=447)
-                          Output:["_col4","_col5","_col14","_col15","_col17"]
-                          Merge Join Operator [MERGEJOIN_186] (rows=420 width=447)
-                            Conds:RS_37._col4, _col2=RS_38._col4, _col2(Inner),Output:["_col0","_col1","_col14","_col15","_col17"]
-                          <-Reducer 11 [SIMPLE_EDGE] llap
-                            SHUFFLE [RS_38]
-                              PartitionCols:_col4, _col2
-                              Merge Join Operator [MERGEJOIN_185] (rows=10 width=356)
-                                Conds:RS_211._col0=RS_199._col0(Inner),Output:["_col2","_col3","_col4","_col5"]
-                              <-Map 6 [SIMPLE_EDGE] vectorized, llap
-                                SHUFFLE [RS_199]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_196] (rows=25 width=178)
-                                    Output:["_col0"]
-                                    Filter Operator [FIL_193] (rows=25 width=178)
-                                      predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null)
-                                      TableScan [TS_3] (rows=500 width=178)
-                                        default@src,d3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
-                              <-Map 15 [SIMPLE_EDGE] vectorized, llap
-                                SHUFFLE [RS_211]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_210] (rows=7 width=531)
-                                    Output:["_col0","_col2","_col3","_col4","_col5"]
-                                    Filter Operator [FIL_209] (rows=7 width=534)
-                                      predicate:((v1 = 'srv1') and k1 is not null and k2 is not null and k3 is not null and v2 is not null and v3 is not null)
-                                      TableScan [TS_18] (rows=85 width=534)
-                                        default@sr,sr,Tbl:COMPLETE,Col:COMPLETE,Output:["k1","v1","k2","v2","k3","v3"]
-                          <-Reducer 9 [SIMPLE_EDGE] llap
-                            SHUFFLE [RS_37]
-                              PartitionCols:_col4, _col2
-                              Merge Join Operator [MERGEJOIN_184] (rows=42 width=352)
-                                Conds:RS_34._col1=RS_208._col1(Inner),Output:["_col0","_col1","_col2","_col4"]
-                              <-Map 14 [SIMPLE_EDGE] vectorized, llap
-                                SHUFFLE [RS_208]
-                                  PartitionCols:_col1
-                                  Select Operator [SEL_207] (rows=2 width=180)
-                                    Output:["_col1"]
-                                    Filter Operator [FIL_206] (rows=2 width=175)
-                                      predicate:((key = 'src1key') and value is not null)
-                                      TableScan [TS_15] (rows=25 width=175)
-                                        default@src1,src1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
-                              <-Reducer 8 [SIMPLE_EDGE] llap
-                                SHUFFLE [RS_34]
-                                  PartitionCols:_col1
-                                  Merge Join Operator [MERGEJOIN_183] (rows=42 width=352)
-                                    Conds:RS_31._col3=RS_205._col1(Inner),Output:["_col0","_col1","_col2","_col4"]
-                                  <-Map 13 [SIMPLE_EDGE] vectorized, llap
-                                    SHUFFLE [RS_205]
-                                      PartitionCols:_col1
-                                      Select Operator [SEL_204] (rows=6 width=185)
-                                        Output:["_col1"]
-                                        Filter Operator [FIL_203] (rows=6 width=178)
-                                          predicate:((key = 'srcpartkey') and value is not null)
-                                          TableScan [TS_12] (rows=2000 width=178)
-                                            default@srcpart,srcpart,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
-                                  <-Reducer 7 [SIMPLE_EDGE] llap
-                                    SHUFFLE [RS_31]
-                                      PartitionCols:_col3
-                                      Merge Join Operator [MERGEJOIN_182] (rows=7 width=443)
-                                        Conds:RS_202._col0=RS_198._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4"]
-                                      <-Map 6 [SIMPLE_EDGE] vectorized, llap
-                                        SHUFFLE [RS_198]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_195] (rows=2 width=178)
-                                            Output:["_col0"]
-                                            Filter Operator [FIL_192] (rows=2 width=178)
-                                              predicate:((value = 'd1value') and key is not null)
-                                               Please refer to the previous TableScan [TS_3]
-                                      <-Map 12 [SIMPLE_EDGE] vectorized, llap
-                                        SHUFFLE [RS_202]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_201] (rows=7 width=531)
-                                            Output:["_col0","_col1","_col2","_col3","_col4"]
-                                            Filter Operator [FIL_200] (rows=7 width=534)
-                                              predicate:((v3 = 'ssv3') and k1 is not null and k2 is not null and k3 is not null and v1 is not null and v2 is not null)
-                                              TableScan [TS_6] (rows=85 width=534)
-                                                default@ss_n1,ss_n1,Tbl:COMPLETE,Col:COMPLETE,Output:["k1","v1","k2","v2","k3","v3"]
-                    <-Reducer 2 [SIMPLE_EDGE] llap
-                      SHUFFLE [RS_44]
-                        PartitionCols:_col1, _col3
-                        Merge Join Operator [MERGEJOIN_181] (rows=265 width=269)
-                          Conds:RS_190._col0=RS_197._col0(Inner),Output:["_col1","_col2","_col3"]
-                        <-Map 6 [SIMPLE_EDGE] vectorized, llap
-                          SHUFFLE [RS_197]
-                            PartitionCols:_col0
-                            Select Operator [SEL_194] (rows=25 width=178)
-                              Output:["_col0"]
-                              Filter Operator [FIL_191] (rows=25 width=178)
-                                predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null)
-                                 Please refer to the previous TableScan [TS_3]
-                        <-Map 1 [SIMPLE_EDGE] vectorized, llap
-                          SHUFFLE [RS_190]
-                            PartitionCols:_col0
-                            Select Operator [SEL_189] (rows=170 width=356)
-                              Output:["_col0","_col1","_col2","_col3"]
-                              Filter Operator [FIL_188] (rows=170 width=356)
-                                predicate:(k1 is not null and v2 is not null and v3 is not null)
-                                TableScan [TS_0] (rows=170 width=356)
-                                  default@cs,cs,Tbl:COMPLETE,Col:COMPLETE,Output:["k1","v2","k3","v3"]
+                    Top N Key Operator [TNK_91] (rows=9275 width=534)
+                      keys:_col10, _col20, _col2,sort order:+++,top n:100
+                      Merge Join Operator [MERGEJOIN_188] (rows=9275 width=534)
+                        Conds:RS_44._col1, _col3=RS_45._col15, _col17(Inner),Output:["_col2","_col3","_col10","_col11","_col20","_col21"]
+                      <-Reducer 10 [SIMPLE_EDGE] llap
+                        SHUFFLE [RS_45]
+                          PartitionCols:_col15, _col17
+                          Select Operator [SEL_40] (rows=420 width=447)
+                            Output:["_col4","_col5","_col14","_col15","_col17"]
+                            Merge Join Operator [MERGEJOIN_187] (rows=420 width=447)
+                              Conds:RS_37._col4, _col2=RS_38._col4, _col2(Inner),Output:["_col0","_col1","_col14","_col15","_col17"]
+                            <-Reducer 11 [SIMPLE_EDGE] llap
+                              SHUFFLE [RS_38]
+                                PartitionCols:_col4, _col2
+                                Merge Join Operator [MERGEJOIN_186] (rows=10 width=356)
+                                  Conds:RS_212._col0=RS_200._col0(Inner),Output:["_col2","_col3","_col4","_col5"]
+                                <-Map 6 [SIMPLE_EDGE] vectorized, llap
+                                  SHUFFLE [RS_200]
+                                    PartitionCols:_col0
+                                    Select Operator [SEL_197] (rows=25 width=178)
+                                      Output:["_col0"]
+                                      Filter Operator [FIL_194] (rows=25 width=178)
+                                        predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null)
+                                        TableScan [TS_3] (rows=500 width=178)
+                                          default@src,d3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
+                                <-Map 15 [SIMPLE_EDGE] vectorized, llap
+                                  SHUFFLE [RS_212]
+                                    PartitionCols:_col0
+                                    Select Operator [SEL_211] (rows=7 width=531)
+                                      Output:["_col0","_col2","_col3","_col4","_col5"]
+                                      Filter Operator [FIL_210] (rows=7 width=534)
+                                        predicate:((v1 = 'srv1') and k1 is not null and k2 is not null and k3 is not null and v2 is not null and v3 is not null)
+                                        TableScan [TS_18] (rows=85 width=534)
+                                          default@sr,sr,Tbl:COMPLETE,Col:COMPLETE,Output:["k1","v1","k2","v2","k3","v3"]
+                            <-Reducer 9 [SIMPLE_EDGE] llap
+                              SHUFFLE [RS_37]
+                                PartitionCols:_col4, _col2
+                                Merge Join Operator [MERGEJOIN_185] (rows=42 width=352)
+                                  Conds:RS_34._col1=RS_209._col1(Inner),Output:["_col0","_col1","_col2","_col4"]
+                                <-Map 14 [SIMPLE_EDGE] vectorized, llap
+                                  SHUFFLE [RS_209]
+                                    PartitionCols:_col1
+                                    Select Operator [SEL_208] (rows=2 width=180)
+                                      Output:["_col1"]
+                                      Filter Operator [FIL_207] (rows=2 width=175)
+                                        predicate:((key = 'src1key') and value is not null)
+                                        TableScan [TS_15] (rows=25 width=175)
+                                          default@src1,src1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
+                                <-Reducer 8 [SIMPLE_EDGE] llap
+                                  SHUFFLE [RS_34]
+                                    PartitionCols:_col1
+                                    Merge Join Operator [MERGEJOIN_184] (rows=42 width=352)
+                                      Conds:RS_31._col3=RS_206._col1(Inner),Output:["_col0","_col1","_col2","_col4"]
+                                    <-Map 13 [SIMPLE_EDGE] vectorized, llap
+                                      SHUFFLE [RS_206]
+                                        PartitionCols:_col1
+                                        Select Operator [SEL_205] (rows=6 width=185)
+                                          Output:["_col1"]
+                                          Filter Operator [FIL_204] (rows=6 width=178)
+                                            predicate:((key = 'srcpartkey') and value is not null)
+                                            TableScan [TS_12] (rows=2000 width=178)
+                                              default@srcpart,srcpart,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
+                                    <-Reducer 7 [SIMPLE_EDGE] llap
+                                      SHUFFLE [RS_31]
+                                        PartitionCols:_col3
+                                        Merge Join Operator [MERGEJOIN_183] (rows=7 width=443)
+                                          Conds:RS_203._col0=RS_199._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4"]
+                                        <-Map 6 [SIMPLE_EDGE] vectorized, llap
+                                          SHUFFLE [RS_199]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_196] (rows=2 width=178)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_193] (rows=2 width=178)
+                                                predicate:((value = 'd1value') and key is not null)
+                                                 Please refer to the previous TableScan [TS_3]
+                                        <-Map 12 [SIMPLE_EDGE] vectorized, llap
+                                          SHUFFLE [RS_203]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_202] (rows=7 width=531)
+                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                              Filter Operator [FIL_201] (rows=7 width=534)
+                                                predicate:((v3 = 'ssv3') and k1 is not null and k2 is not null and k3 is not null and v1 is not null and v2 is not null)
+                                                TableScan [TS_6] (rows=85 width=534)
+                                                  default@ss_n1,ss_n1,Tbl:COMPLETE,Col:COMPLETE,Output:["k1","v1","k2","v2","k3","v3"]
+                      <-Reducer 2 [SIMPLE_EDGE] llap
+                        SHUFFLE [RS_44]
+                          PartitionCols:_col1, _col3
+                          Merge Join Operator [MERGEJOIN_182] (rows=265 width=269)
+                            Conds:RS_191._col0=RS_198._col0(Inner),Output:["_col1","_col2","_col3"]
+                          <-Map 6 [SIMPLE_EDGE] vectorized, llap
+                            SHUFFLE [RS_198]
+                              PartitionCols:_col0
+                              Select Operator [SEL_195] (rows=25 width=178)
+                                Output:["_col0"]
+                                Filter Operator [FIL_192] (rows=25 width=178)
+                                  predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null)
+                                   Please refer to the previous TableScan [TS_3]
+                          <-Map 1 [SIMPLE_EDGE] vectorized, llap
+                            SHUFFLE [RS_191]
+                              PartitionCols:_col0
+                              Select Operator [SEL_190] (rows=170 width=356)
+                                Output:["_col0","_col1","_col2","_col3"]
+                                Filter Operator [FIL_189] (rows=170 width=356)
+                                  predicate:(k1 is not null and v2 is not null and v3 is not null)
+                                  TableScan [TS_0] (rows=170 width=356)
+                                    default@cs,cs,Tbl:COMPLETE,Col:COMPLETE,Output:["k1","v2","k3","v3"]
 
 PREHOOK: query: explain
 SELECT x.key, z.value, y.value
@@ -992,105 +994,107 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized, llap
-      File Output Operator [FS_232]
-        Limit [LIM_231] (rows=100 width=10)
+      File Output Operator [FS_234]
+        Limit [LIM_233] (rows=100 width=10)
           Number of rows:100
-          Select Operator [SEL_230] (rows=732 width=10)
+          Select Operator [SEL_232] (rows=732 width=10)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized, llap
-            SHUFFLE [RS_229]
-              Group By Operator [GBY_228] (rows=732 width=10)
+            SHUFFLE [RS_231]
+              Group By Operator [GBY_230] (rows=732 width=10)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2
               <-Map 5 [SIMPLE_EDGE] vectorized, llap
-                SHUFFLE [RS_227]
+                SHUFFLE [RS_229]
                   PartitionCols:_col0, _col1, _col2
-                  Group By Operator [GBY_226] (rows=1464 width=10)
+                  Group By Operator [GBY_228] (rows=1464 width=10)
                     Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["count(_col11)","count(_col21)","count(_col3)"],keys:_col10, _col20, _col2
-                    Map Join Operator [MAPJOIN_225] (rows=1464 width=10)
-                      Conds:RS_208._col1, _col3=SEL_224._col15, _col17(Inner),Output:["_col2","_col3","_col10","_col11","_col20","_col21"]
-                    <-Map 2 [BROADCAST_EDGE] vectorized, llap
-                      BROADCAST [RS_208]
-                        PartitionCols:_col1, _col3
-                        Map Join Operator [MAPJOIN_207] (rows=275 width=10)
-                          Conds:RS_204._col0=SEL_206._col0(Inner),Output:["_col1","_col2","_col3"]
-                        <-Map 1 [BROADCAST_EDGE] vectorized, llap
-                          BROADCAST [RS_204]
-                            PartitionCols:_col0
-                            Select Operator [SEL_203] (rows=170 width=34)
-                              Output:["_col0","_col1","_col2","_col3"]
-                              Filter Operator [FIL_202] (rows=170 width=34)
-                                predicate:(k1 is not null and v2 is not null and v3 is not null)
-                                TableScan [TS_0] (rows=170 width=34)
-                                  default@cs,cs,Tbl:COMPLETE,Col:NONE,Output:["k1","v2","k3","v3"]
-                        <-Select Operator [SEL_206] (rows=250 width=10)
-                            Output:["_col0"]
-                            Filter Operator [FIL_205] (rows=250 width=10)
-                              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null)
-                              TableScan [TS_3] (rows=500 width=10)
-                                default@src,d3,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
-                    <-Select Operator [SEL_224] (rows=1331 width=10)
-                        Output:["_col4","_col5","_col14","_col15","_col17"]
-                        Map Join Operator [MAPJOIN_223] (rows=1331 width=10)
-                          Conds:MAPJOIN_222._col4, _col2=RS_218._col4, _col2(Inner),Output:["_col0","_col1","_col14","_col15","_col17"]
-                        <-Map 10 [BROADCAST_EDGE] vectorized, llap
-                          BROADCAST [RS_218]
-                            PartitionCols:_col4, _col2
-                            Map Join Operator [MAPJOIN_217] (rows=275 width=10)
-                              Conds:RS_214._col0=SEL_216._col0(Inner),Output:["_col2","_col3","_col4","_col5"]
-                            <-Map 9 [BROADCAST_EDGE] vectorized, llap
-                              BROADCAST [RS_214]
-                                PartitionCols:_col0
-                                Select Operator [SEL_213] (rows=42 width=34)
-                                  Output:["_col0","_col2","_col3","_col4","_col5"]
-                                  Filter Operator [FIL_212] (rows=42 width=34)
-                                    predicate:((v1 = 'srv1') and k1 is not null and k2 is not null and k3 is not null and v2 is not null and v3 is not null)
-                                    TableScan [TS_18] (rows=85 width=34)
-                                      default@sr,sr,Tbl:COMPLETE,Col:NONE,Output:["k1","v1","k2","v2","k3","v3"]
-                            <-Select Operator [SEL_216] (rows=250 width=10)
-                                Output:["_col0"]
-                                Filter Operator [FIL_215] (rows=250 width=10)
-                                  predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null)
-                                  TableScan [TS_21] (rows=500 width=10)
-                                    default@src,d2,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
-                        <-Map Join Operator [MAPJOIN_222] (rows=1210 width=10)
-                            Conds:MAPJOIN_221._col1=RS_211._col1(Inner),Output:["_col0","_col1","_col2","_col4"]
-                          <-Map 8 [BROADCAST_EDGE] vectorized, llap
-                            BROADCAST [RS_211]
-                              PartitionCols:_col1
-                              Select Operator [SEL_210] (rows=12 width=7)
-                                Output:["_col1"]
-                                Filter Operator [FIL_209] (rows=12 width=7)
-                                  predicate:((key = 'src1key') and value is not null)
-                                  TableScan [TS_15] (rows=25 width=7)
-                                    default@src1,src1,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
-                          <-Map Join Operator [MAPJOIN_221] (rows=1100 width=10)
-                              Conds:RS_201._col3=SEL_220._col1(Inner),Output:["_col0","_col1","_col2","_col4"]
-                            <-Map 4 [BROADCAST_EDGE] vectorized, llap
-                              BROADCAST [RS_201]
-                                PartitionCols:_col3
-                                Map Join Operator [MAPJOIN_200] (rows=275 width=10)
-                                  Conds:RS_197._col0=SEL_199._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4"]
-                                <-Map 3 [BROADCAST_EDGE] vectorized, llap
-                                  BROADCAST [RS_197]
-                                    PartitionCols:_col0
-                                    Select Operator [SEL_196] (rows=42 width=34)
-                                      Output:["_col0","_col1","_col2","_col3","_col4"]
-                                      Filter Operator [FIL_195] (rows=42 width=34)
-                                        predicate:((v3 = 'ssv3') and k1 is not null and k2 is not null and k3 is not null and v1 is not null and v2 is not null)
-                                        TableScan [TS_6] (rows=85 width=34)
-                                          default@ss_n1,ss_n1,Tbl:COMPLETE,Col:NONE,Output:["k1","v1","k2","v2","k3","v3"]
-                                <-Select Operator [SEL_199] (rows=250 width=10)
-                                    Output:["_col0"]
-                                    Filter Operator [FIL_198] (rows=250 width=10)
-                                      predicate:((value = 'd1value') and key is not null)
-                                      TableScan [TS_9] (rows=500 width=10)
-                                        default@src,d1,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
-                            <-Select Operator [SEL_220] (rows=1000 width=10)
-                                Output:["_col1"]
-                                Filter Operator [FIL_219] (rows=1000 width=10)
-                                  predicate:((key = 'srcpartkey') and value is not null)
-                                  TableScan [TS_12] (rows=2000 width=10)
-                                    default@srcpart,srcpart,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+                    Top N Key Operator [TNK_227] (rows=1464 width=10)
+                      keys:_col10, _col20, _col2,sort order:+++,top n:100
+                      Map Join Operator [MAPJOIN_226] (rows=1464 width=10)
+                        Conds:RS_209._col1, _col3=SEL_225._col15, _col17(Inner),Output:["_col2","_col3","_col10","_col11","_col20","_col21"]
+                      <-Map 2 [BROADCAST_EDGE] vectorized, llap
+                        BROADCAST [RS_209]
+                          PartitionCols:_col1, _col3
+                          Map Join Operator [MAPJOIN_208] (rows=275 width=10)
+                            Conds:RS_205._col0=SEL_207._col0(Inner),Output:["_col1","_col2","_col3"]
+                          <-Map 1 [BROADCAST_EDGE] vectorized, llap
+                            BROADCAST [RS_205]
+                              PartitionCols:_col0
+                              Select Operator [SEL_204] (rows=170 width=34)
+                                Output:["_col0","_col1","_col2","_col3"]
+                                Filter Operator [FIL_203] (rows=170 width=34)
+                                  predicate:(k1 is not null and v2 is not null and v3 is not null)
+                                  TableScan [TS_0] (rows=170 width=34)
+                                    default@cs,cs,Tbl:COMPLETE,Col:NONE,Output:["k1","v2","k3","v3"]
+                          <-Select Operator [SEL_207] (rows=250 width=10)
+                              Output:["_col0"]
+                              Filter Operator [FIL_206] (rows=250 width=10)
+                                predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null)
+                                TableScan [TS_3] (rows=500 width=10)
+                                  default@src,d3,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+                      <-Select Operator [SEL_225] (rows=1331 width=10)
+                          Output:["_col4","_col5","_col14","_col15","_col17"]
+                          Map Join Operator [MAPJOIN_224] (rows=1331 width=10)
+                            Conds:MAPJOIN_223._col4, _col2=RS_219._col4, _col2(Inner),Output:["_col0","_col1","_col14","_col15","_col17"]
+                          <-Map 10 [BROADCAST_EDGE] vectorized, llap
+                            BROADCAST [RS_219]
+                              PartitionCols:_col4, _col2
+                              Map Join Operator [MAPJOIN_218] (rows=275 width=10)
+                                Conds:RS_215._col0=SEL_217._col0(Inner),Output:["_col2","_col3","_col4","_col5"]
+                              <-Map 9 [BROADCAST_EDGE] vectorized, llap
+                                BROADCAST [RS_215]
+                                  PartitionCols:_col0
+                                  Select Operator [SEL_214] (rows=42 width=34)
+                                    Output:["_col0","_col2","_col3","_col4","_col5"]
+                                    Filter Operator [FIL_213] (rows=42 width=34)
+                                      predicate:((v1 = 'srv1') and k1 is not null and k2 is not null and k3 is not null and v2 is not null and v3 is not null)
+                                      TableScan [TS_18] (rows=85 width=34)
+                                        default@sr,sr,Tbl:COMPLETE,Col:NONE,Output:["k1","v1","k2","v2","k3","v3"]
+                              <-Select Operator [SEL_217] (rows=250 width=10)
+                                  Output:["_col0"]
+                                  Filter Operator [FIL_216] (rows=250 width=10)
+                                    predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null)
+                                    TableScan [TS_21] (rows=500 width=10)
+                                      default@src,d2,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+                          <-Map Join Operator [MAPJOIN_223] (rows=1210 width=10)
+                              Conds:MAPJOIN_222._col1=RS_212._col1(Inner),Output:["_col0","_col1","_col2","_col4"]
+                            <-Map 8 [BROADCAST_EDGE] vectorized, llap
+                              BROADCAST [RS_212]
+                                PartitionCols:_col1
+                                Select Operator [SEL_211] (rows=12 width=7)
+                                  Output:["_col1"]
+                                  Filter Operator [FIL_210] (rows=12 width=7)
+                                    predicate:((key = 'src1key') and value is not null)
+                                    TableScan [TS_15] (rows=25 width=7)
+                                      default@src1,src1,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+                            <-Map Join Operator [MAPJOIN_222] (rows=1100 width=10)
+                                Conds:RS_202._col3=SEL_221._col1(Inner),Output:["_col0","_col1","_col2","_col4"]
+                              <-Map 4 [BROADCAST_EDGE] vectorized, llap
+                                BROADCAST [RS_202]
+                                  PartitionCols:_col3
+                                  Map Join Operator [MAPJOIN_201] (rows=275 width=10)
+                                    Conds:RS_198._col0=SEL_200._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4"]
+                                  <-Map 3 [BROADCAST_EDGE] vectorized, llap
+                                    BROADCAST [RS_198]
+                                      PartitionCols:_col0
+                                      Select Operator [SEL_197] (rows=42 width=34)
+                                        Output:["_col0","_col1","_col2","_col3","_col4"]
+                                        Filter Operator [FIL_196] (rows=42 width=34)
+                                          predicate:((v3 = 'ssv3') and k1 is not null and k2 is not null and k3 is not null and v1 is not null and v2 is not null)
+                                          TableScan [TS_6] (rows=85 width=34)
+                                            default@ss_n1,ss_n1,Tbl:COMPLETE,Col:NONE,Output:["k1","v1","k2","v2","k3","v3"]
+                                  <-Select Operator [SEL_200] (rows=250 width=10)
+                                      Output:["_col0"]
+                                      Filter Operator [FIL_199] (rows=250 width=10)
+                                        predicate:((value = 'd1value') and key is not null)
+                                        TableScan [TS_9] (rows=500 width=10)
+                                          default@src,d1,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+                              <-Select Operator [SEL_221] (rows=1000 width=10)
+                                  Output:["_col1"]
+                                  Filter Operator [FIL_220] (rows=1000 width=10)
+                                    predicate:((key = 'srcpartkey') and value is not null)
+                                    TableScan [TS_12] (rows=2000 width=10)
+                                      default@srcpart,srcpart,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
 
 PREHOOK: query: explain
 SELECT x.key, z.value, y.value

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out b/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
index 1463b32..252831e 100644
--- a/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
@@ -199,19 +199,24 @@ STAGE PLANS:
                     expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double)
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(_col1)
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 20
+                      Group By Operator
+                        aggregations: sum(_col1)
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col1 (type: double)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
+                          value expressions: _col1 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: no inputs
         Reducer 2 
@@ -295,19 +300,24 @@ STAGE PLANS:
                     expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double)
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(_col1), count(_col1)
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 20
+                      Group By Operator
+                        aggregations: sum(_col1), count(_col1)
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col1 (type: double), _col2 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
+                          value expressions: _col1 (type: double), _col2 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: no inputs
         Reducer 2 
@@ -395,17 +405,22 @@ STAGE PLANS:
                     expressions: cdouble (type: double)
                     outputColumnNames: cdouble
                     Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
+                    Top N Key Operator
+                      sort order: +
                       keys: cdouble (type: double)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: double)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: double)
-                        Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
+                      Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 20
+                      Group By Operator
+                        keys: cdouble (type: double)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: double)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: double)
+                          Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
             Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
@@ -415,13 +430,13 @@ STAGE PLANS:
                 keys: KEY._col0 (type: double)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 20
-                  Statistics: Num rows: 20 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 20 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 20 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 20 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -921,26 +936,42 @@ STAGE PLANS:
                     expressions: key (type: string)
                     outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
+                    Top N Key Operator
+                      sort order: +
                       keys: key (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 2
+                      Group By Operator
+                        aggregations: count()
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col1 (type: bigint)
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
+                          value expressions: _col1 (type: bigint)
+                    Top N Key Operator
+                      sort order: +
+                      keys: key (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 3
+                      Group By Operator
+                        aggregations: count()
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col1 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
+                          value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: no inputs
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out b/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out
index 260ce3d..1027bfe 100644
--- a/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out
+++ b/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out
@@ -200,19 +200,24 @@ STAGE PLANS:
                     expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double)
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(_col1)
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 20
+                      Group By Operator
+                        aggregations: sum(_col1)
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col1 (type: double)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
+                          value expressions: _col1 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: no inputs
         Reducer 2 
@@ -310,19 +315,24 @@ STAGE PLANS:
                     expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double)
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(_col1), count(_col1)
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 20
+                      Group By Operator
+                        aggregations: sum(_col1), count(_col1)
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col1 (type: double), _col2 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
+                          value expressions: _col1 (type: double), _col2 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: no inputs
         Reducer 2 
@@ -424,17 +434,22 @@ STAGE PLANS:
                     expressions: cdouble (type: double)
                     outputColumnNames: cdouble
                     Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
+                    Top N Key Operator
+                      sort order: +
                       keys: cdouble (type: double)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: double)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: double)
-                        Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
+                      Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 20
+                      Group By Operator
+                        keys: cdouble (type: double)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: double)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: double)
+                          Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
             Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
@@ -444,11 +459,11 @@ STAGE PLANS:
                 keys: KEY._col0 (type: double)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: double)
                   sort order: +
-                  Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
                   TopN Hash Memory Usage: 0.3
         Reducer 3 
             Execution mode: vectorized, llap
@@ -456,13 +471,13 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double)
                 outputColumnNames: _col0
-                Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 20
-                  Statistics: Num rows: 20 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 20 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 20 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 20 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/llap_decimal64_reader.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_decimal64_reader.q.out b/ql/src/test/results/clientpositive/llap/llap_decimal64_reader.q.out
index 0041206..88ddd9c 100644
--- a/ql/src/test/results/clientpositive/llap/llap_decimal64_reader.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_decimal64_reader.q.out
@@ -131,17 +131,22 @@ STAGE PLANS:
                   Filter Operator
                     predicate: ((cdecimal1 = 3.35) or (cdecimal1 = 4.46)) (type: boolean)
                     Statistics: Num rows: 24576 Data size: 5505024 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
+                    Top N Key Operator
+                      sort order: ++
                       keys: cdecimal1 (type: decimal(10,2)), cdecimal2 (type: decimal(38,5))
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
+                      Statistics: Num rows: 24576 Data size: 5505024 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 2
+                      Group By Operator
+                        keys: cdecimal1 (type: decimal(10,2)), cdecimal2 (type: decimal(38,5))
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
+                        Reduce Output Operator
+                          key expressions: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
+                          sort order: ++
+                          Map-reduce partition columns: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
+                          Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -227,17 +232,22 @@ STAGE PLANS:
                   Filter Operator
                     predicate: ((cdecimal1 = 3.35) or (cdecimal1 = 4.46)) (type: boolean)
                     Statistics: Num rows: 24576 Data size: 5505024 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
+                    Top N Key Operator
+                      sort order: ++
                       keys: cdecimal1 (type: decimal(10,2)), cdecimal2 (type: decimal(38,5))
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
+                      Statistics: Num rows: 24576 Data size: 5505024 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 2
+                      Group By Operator
+                        keys: cdecimal1 (type: decimal(10,2)), cdecimal2 (type: decimal(38,5))
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
+                        Reduce Output Operator
+                          key expressions: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
+                          sort order: ++
+                          Map-reduce partition columns: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
+                          Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/offset_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/offset_limit.q.out b/ql/src/test/results/clientpositive/llap/offset_limit.q.out
index 0e718fd..97d2ac2 100644
--- a/ql/src/test/results/clientpositive/llap/offset_limit.q.out
+++ b/ql/src/test/results/clientpositive/llap/offset_limit.q.out
@@ -26,19 +26,24 @@ STAGE PLANS:
                     expressions: key (type: string), substr(value, 5) (type: string)
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(_col1)
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 20
+                      Group By Operator
+                        aggregations: sum(_col1)
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: double)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: double)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out b/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out
index 4c62d7f..2e8d5f3 100644
--- a/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out
+++ b/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out
@@ -201,19 +201,24 @@ STAGE PLANS:
                     expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double)
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(_col1)
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 30
+                      Group By Operator
+                        aggregations: sum(_col1)
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col1 (type: double)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
+                          value expressions: _col1 (type: double)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -298,19 +303,24 @@ STAGE PLANS:
                     expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double)
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(_col1), count(_col1)
+                    Top N Key Operator
+                      sort order: +
                       keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 30
+                      Group By Operator
+                        aggregations: sum(_col1), count(_col1)
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col1 (type: double), _col2 (type: bigint)
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
+                          value expressions: _col1 (type: double), _col2 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -399,17 +409,22 @@ STAGE PLANS:
                     expressions: cdouble (type: double)
                     outputColumnNames: cdouble
                     Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
+                    Top N Key Operator
+                      sort order: +
                       keys: cdouble (type: double)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: double)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: double)
-                        Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
+                      Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 30
+                      Group By Operator
+                        keys: cdouble (type: double)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: double)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: double)
+                          Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
             Execution mode: llap
             LLAP IO: all inputs
         Reducer 2 
@@ -419,14 +434,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: double)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 5528 Data size: 33024 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 5528 Data size: 21816 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 20
                   Offset of rows: 10
-                  Statistics: Num rows: 20 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 20 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 20 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 20 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out b/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
index 5c6b481..a804e3c 100644
--- a/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
@@ -238,31 +238,40 @@ STAGE PLANS:
                           projectedOutputColumnNums: [4]
                           selectExpressions: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int
                       Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: sum(_col0)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFSumLong(col 4:int) -> bigint
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            keyExpressions: col 4:int
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: [0]
+                      Top N Key Operator
+                        sort order: +
                         keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkLongOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        top n: 10
+                        Top N Key Vectorization:
+                            className: VectorTopNKeyOperator
+                            keyExpressions: col 4:int
+                            native: true
+                        Group By Operator
+                          aggregations: sum(_col0)
+                          Group By Vectorization:
+                              aggregators: VectorUDAFSumLong(col 4:int) -> bigint
+                              className: VectorGroupByOperator
+                              groupByMode: HASH
+                              keyExpressions: col 4:int
+                              native: false
+                              vectorProcessingMode: HASH
+                              projectedOutputColumnNums: [0]
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
                           Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
-                          TopN Hash Memory Usage: 0.1
-                          value expressions: _col1 (type: bigint)
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Reduce Sink Vectorization:
+                                className: VectorReduceSinkLongOperator
+                                native: true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
+                            TopN Hash Memory Usage: 0.1
+                            value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/parquet_complex_types_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/parquet_complex_types_vectorization.q.out b/ql/src/test/results/clientpositive/llap/parquet_complex_types_vectorization.q.out
index da9dd4e..baeb60e 100644
--- a/ql/src/test/results/clientpositive/llap/parquet_complex_types_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/llap/parquet_complex_types_vectorization.q.out
@@ -214,31 +214,40 @@ STAGE PLANS:
                           projectedOutputColumnNums: [6, 7]
                           selectExpressions: ListIndexColScalar(col 2:array<int>, col 1:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 0:int) -> 7:int
                       Statistics: Num rows: 341 Data size: 38920 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: sum(_col1)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFSumLong(col 7:int) -> bigint
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            keyExpressions: col 6:int
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: [0]
+                      Top N Key Operator
+                        sort order: +
                         keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 341 Data size: 38920 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkLongOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        top n: 10
+                        Top N Key Vectorization:
+                            className: VectorTopNKeyOperator
+                            keyExpressions: col 6:int
+                            native: true
+                        Group By Operator
+                          aggregations: sum(_col1)
+                          Group By Vectorization:
+                              aggregators: VectorUDAFSumLong(col 7:int) -> bigint
+                              className: VectorGroupByOperator
+                              groupByMode: HASH
+                              keyExpressions: col 6:int
+                              native: false
+                              vectorProcessingMode: HASH
+                              projectedOutputColumnNums: [0]
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
                           Statistics: Num rows: 341 Data size: 38920 Basic stats: COMPLETE Column stats: NONE
-                          TopN Hash Memory Usage: 0.1
-                          value expressions: _col1 (type: bigint)
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Reduce Sink Vectorization:
+                                className: VectorReduceSinkLongOperator
+                                native: true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            Statistics: Num rows: 341 Data size: 38920 Basic stats: COMPLETE Column stats: NONE
+                            TopN Hash Memory Usage: 0.1
+                            value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs (cache only)
             Map Vectorization:
@@ -503,31 +512,40 @@ STAGE PLANS:
                           projectedOutputColumnNums: [6, 7]
                           selectExpressions: ListIndexColScalar(col 2:array<int>, col 1:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 0:int) -> 7:int
                       Statistics: Num rows: 341 Data size: 38921 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: sum(_col1)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFSumLong(col 7:int) -> bigint
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            keyExpressions: col 6:int
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: [0]
+                      Top N Key Operator
+                        sort order: +
                         keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 341 Data size: 38921 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkLongOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        top n: 10
+                        Top N Key Vectorization:
+                            className: VectorTopNKeyOperator
+                            keyExpressions: col 6:int
+                            native: true
+                        Group By Operator
+                          aggregations: sum(_col1)
+                          Group By Vectorization:
+                              aggregators: VectorUDAFSumLong(col 7:int) -> bigint
+                              className: VectorGroupByOperator
+                              groupByMode: HASH
+                              keyExpressions: col 6:int
+                              native: false
+                              vectorProcessingMode: HASH
+                              projectedOutputColumnNums: [0]
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
                           Statistics: Num rows: 341 Data size: 38921 Basic stats: COMPLETE Column stats: NONE
-                          TopN Hash Memory Usage: 0.1
-                          value expressions: _col1 (type: bigint)
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Reduce Sink Vectorization:
+                                className: VectorReduceSinkLongOperator
+                                native: true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            Statistics: Num rows: 341 Data size: 38921 Basic stats: COMPLETE Column stats: NONE
+                            TopN Hash Memory Usage: 0.1
+                            value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs (cache only)
             Map Vectorization:
@@ -792,31 +810,40 @@ STAGE PLANS:
                           projectedOutputColumnNums: [6, 7]
                           selectExpressions: ListIndexColScalar(col 2:array<int>, col 1:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 0:int) -> 7:int
                       Statistics: Num rows: 341 Data size: 38923 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: sum(_col1)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFSumLong(col 7:int) -> bigint
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            keyExpressions: col 6:int
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: [0]
+                      Top N Key Operator
+                        sort order: +
                         keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
                         Statistics: Num rows: 341 Data size: 38923 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkLongOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        top n: 10
+                        Top N Key Vectorization:
+                            className: VectorTopNKeyOperator
+                            keyExpressions: col 6:int
+                            native: true
+                        Group By Operator
+                          aggregations: sum(_col1)
+                          Group By Vectorization:
+                              aggregators: VectorUDAFSumLong(col 7:int) -> bigint
+                              className: VectorGroupByOperator
+                              groupByMode: HASH
+                              keyExpressions: col 6:int
+                              native: false
+                              vectorProcessingMode: HASH
+                              projectedOutputColumnNums: [0]
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
                           Statistics: Num rows: 341 Data size: 38923 Basic stats: COMPLETE Column stats: NONE
-                          TopN Hash Memory Usage: 0.1
-                          value expressions: _col1 (type: bigint)
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Reduce Sink Vectorization:
+                                className: VectorReduceSinkLongOperator
+                                native: true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            Statistics: Num rows: 341 Data size: 38923 Basic stats: COMPLETE Column stats: NONE
+                            TopN Hash Memory Usage: 0.1
+                            value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs (cache only)
             Map Vectorization:


[42/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 0000000,2bae133..6fcfbce
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@@ -1,0 -1,514 +1,621 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import java.sql.Connection;
+ import java.sql.Driver;
+ import java.sql.PreparedStatement;
+ import java.sql.ResultSet;
+ import java.sql.ResultSetMetaData;
+ import java.sql.SQLException;
+ import java.sql.SQLTransactionRollbackException;
+ import java.sql.Statement;
+ import java.util.Properties;
+ 
+ import com.google.common.annotations.VisibleForTesting;
++import jline.internal.Log;
+ import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
++import org.apache.zookeeper.txn.TxnHeader;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ /**
+  * Utility methods for creating and destroying txn database/schema, plus methods for
+  * querying against metastore tables.
+  * Placed here in a separate class so it can be shared across unit tests.
+  */
+ public final class TxnDbUtil {
+ 
+   static final private Logger LOG = LoggerFactory.getLogger(TxnDbUtil.class.getName());
+   private static final String TXN_MANAGER = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager";
+ 
+   private static int deadlockCnt = 0;
+ 
+   private TxnDbUtil() {
+     throw new UnsupportedOperationException("Can't initialize class");
+   }
+ 
+   /**
+    * Set up the configuration so it will use the DbTxnManager, concurrency will be set to true,
+    * and the JDBC configs will be set for putting the transaction and lock info in the embedded
+    * metastore.
+    *
+    * @param conf HiveConf to add these values to
+    */
+   public static void setConfValues(Configuration conf) {
+     MetastoreConf.setVar(conf, ConfVars.HIVE_TXN_MANAGER, TXN_MANAGER);
+     MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, true);
+   }
+ 
+   public static void prepDb(Configuration conf) throws Exception {
+     // This is a bogus hack because it copies the contents of the SQL file
+     // intended for creating derby databases, and thus will inexorably get
+     // out of date with it.  I'm open to any suggestions on how to make this
+     // read the file in a build friendly way.
+ 
+     Connection conn = null;
+     Statement stmt = null;
+     try {
+       conn = getConnection(conf);
+       stmt = conn.createStatement();
+       stmt.execute("CREATE TABLE TXNS (" +
+           "  TXN_ID bigint PRIMARY KEY," +
+           "  TXN_STATE char(1) NOT NULL," +
+           "  TXN_STARTED bigint NOT NULL," +
+           "  TXN_LAST_HEARTBEAT bigint NOT NULL," +
+           "  TXN_USER varchar(128) NOT NULL," +
+           "  TXN_HOST varchar(128) NOT NULL," +
+           "  TXN_TYPE integer)");
+ 
+       stmt.execute("CREATE TABLE TXN_COMPONENTS (" +
+           "  TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID)," +
+           "  TC_DATABASE varchar(128) NOT NULL," +
+           "  TC_TABLE varchar(128)," +
+           "  TC_PARTITION varchar(767)," +
+           "  TC_OPERATION_TYPE char(1) NOT NULL," +
+           "  TC_WRITEID bigint)");
+       stmt.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" +
+           "  CTC_TXNID bigint NOT NULL," +
+           "  CTC_DATABASE varchar(128) NOT NULL," +
+           "  CTC_TABLE varchar(128)," +
+           "  CTC_PARTITION varchar(767)," +
+           "  CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL," +
+           "  CTC_WRITEID bigint," +
+           "  CTC_UPDATE_DELETE char(1) NOT NULL)");
+       stmt.execute("CREATE TABLE NEXT_TXN_ID (" + "  NTXN_NEXT bigint NOT NULL)");
+       stmt.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
+ 
+       stmt.execute("CREATE TABLE TXN_TO_WRITE_ID (" +
+           " T2W_TXNID bigint NOT NULL," +
+           " T2W_DATABASE varchar(128) NOT NULL," +
+           " T2W_TABLE varchar(256) NOT NULL," +
+           " T2W_WRITEID bigint NOT NULL)");
+       stmt.execute("CREATE TABLE NEXT_WRITE_ID (" +
+           " NWI_DATABASE varchar(128) NOT NULL," +
+           " NWI_TABLE varchar(256) NOT NULL," +
+           " NWI_NEXT bigint NOT NULL)");
+ 
+       stmt.execute("CREATE TABLE MIN_HISTORY_LEVEL (" +
+           " MHL_TXNID bigint NOT NULL," +
+           " MHL_MIN_OPEN_TXNID bigint NOT NULL," +
+           " PRIMARY KEY(MHL_TXNID))");
+ 
+       stmt.execute("CREATE TABLE HIVE_LOCKS (" +
+           " HL_LOCK_EXT_ID bigint NOT NULL," +
+           " HL_LOCK_INT_ID bigint NOT NULL," +
+           " HL_TXNID bigint NOT NULL," +
+           " HL_DB varchar(128) NOT NULL," +
+           " HL_TABLE varchar(128)," +
+           " HL_PARTITION varchar(767)," +
+           " HL_LOCK_STATE char(1) NOT NULL," +
+           " HL_LOCK_TYPE char(1) NOT NULL," +
+           " HL_LAST_HEARTBEAT bigint NOT NULL," +
+           " HL_ACQUIRED_AT bigint," +
+           " HL_USER varchar(128) NOT NULL," +
+           " HL_HOST varchar(128) NOT NULL," +
+           " HL_HEARTBEAT_COUNT integer," +
+           " HL_AGENT_INFO varchar(128)," +
+           " HL_BLOCKEDBY_EXT_ID bigint," +
+           " HL_BLOCKEDBY_INT_ID bigint," +
+         " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
+       stmt.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
+ 
+       stmt.execute("CREATE TABLE NEXT_LOCK_ID (" + " NL_NEXT bigint NOT NULL)");
+       stmt.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
+ 
+       stmt.execute("CREATE TABLE COMPACTION_QUEUE (" +
+           " CQ_ID bigint PRIMARY KEY," +
+           " CQ_DATABASE varchar(128) NOT NULL," +
+           " CQ_TABLE varchar(128) NOT NULL," +
+           " CQ_PARTITION varchar(767)," +
+           " CQ_STATE char(1) NOT NULL," +
+           " CQ_TYPE char(1) NOT NULL," +
+           " CQ_TBLPROPERTIES varchar(2048)," +
+           " CQ_WORKER_ID varchar(128)," +
+           " CQ_START bigint," +
+           " CQ_RUN_AS varchar(128)," +
+           " CQ_HIGHEST_WRITE_ID bigint," +
+           " CQ_META_INFO varchar(2048) for bit data," +
+           " CQ_HADOOP_JOB_ID varchar(32))");
+ 
+       stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
+       stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
+ 
+       stmt.execute("CREATE TABLE COMPLETED_COMPACTIONS (" +
+           " CC_ID bigint PRIMARY KEY," +
+           " CC_DATABASE varchar(128) NOT NULL," +
+           " CC_TABLE varchar(128) NOT NULL," +
+           " CC_PARTITION varchar(767)," +
+           " CC_STATE char(1) NOT NULL," +
+           " CC_TYPE char(1) NOT NULL," +
+           " CC_TBLPROPERTIES varchar(2048)," +
+           " CC_WORKER_ID varchar(128)," +
+           " CC_START bigint," +
+           " CC_END bigint," +
+           " CC_RUN_AS varchar(128)," +
+           " CC_HIGHEST_WRITE_ID bigint," +
+           " CC_META_INFO varchar(2048) for bit data," +
+           " CC_HADOOP_JOB_ID varchar(32))");
+ 
+       stmt.execute("CREATE TABLE AUX_TABLE (" +
+         " MT_KEY1 varchar(128) NOT NULL," +
+         " MT_KEY2 bigint NOT NULL," +
+         " MT_COMMENT varchar(255)," +
+         " PRIMARY KEY(MT_KEY1, MT_KEY2))");
+ 
+       stmt.execute("CREATE TABLE WRITE_SET (" +
+         " WS_DATABASE varchar(128) NOT NULL," +
+         " WS_TABLE varchar(128) NOT NULL," +
+         " WS_PARTITION varchar(767)," +
+         " WS_TXNID bigint NOT NULL," +
+         " WS_COMMIT_ID bigint NOT NULL," +
+         " WS_OPERATION_TYPE char(1) NOT NULL)"
+       );
+ 
+       stmt.execute("CREATE TABLE REPL_TXN_MAP (" +
+           " RTM_REPL_POLICY varchar(256) NOT NULL, " +
+           " RTM_SRC_TXN_ID bigint NOT NULL, " +
+           " RTM_TARGET_TXN_ID bigint NOT NULL, " +
+           " PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID))"
+       );
+ 
+       stmt.execute("CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (" +
+           "  MRL_TXN_ID BIGINT NOT NULL, " +
+           "  MRL_DB_NAME VARCHAR(128) NOT NULL, " +
+           "  MRL_TBL_NAME VARCHAR(256) NOT NULL, " +
+           "  MRL_LAST_HEARTBEAT BIGINT NOT NULL, " +
+           "  PRIMARY KEY(MRL_TXN_ID))"
+       );
+ 
+       try {
++        stmt.execute("CREATE TABLE \"APP\".\"TBLS\" (\"TBL_ID\" BIGINT NOT NULL, " +
++            " \"CREATE_TIME\" INTEGER NOT NULL, \"DB_ID\" BIGINT, \"LAST_ACCESS_TIME\" INTEGER NOT NULL, " +
++            " \"OWNER\" VARCHAR(767), \"OWNER_TYPE\" VARCHAR(10), \"RETENTION\" INTEGER NOT NULL, " +
++            " \"SD_ID\" BIGINT, \"TBL_NAME\" VARCHAR(256), \"TBL_TYPE\" VARCHAR(128), " +
++            " \"VIEW_EXPANDED_TEXT\" LONG VARCHAR, \"VIEW_ORIGINAL_TEXT\" LONG VARCHAR, " +
++            " \"IS_REWRITE_ENABLED\" CHAR(1) NOT NULL DEFAULT \'N\', " +
++            " \"WRITE_ID\" BIGINT DEFAULT 0, " +
++            " PRIMARY KEY (TBL_ID))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("TBLS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"DBS\" (\"DB_ID\" BIGINT NOT NULL, \"DESC\" " +
++            "VARCHAR(4000), \"DB_LOCATION_URI\" VARCHAR(4000) NOT NULL, \"NAME\" VARCHAR(128), " +
++            "\"OWNER_NAME\" VARCHAR(128), \"OWNER_TYPE\" VARCHAR(10), " +
++            "\"CTLG_NAME\" VARCHAR(256) NOT NULL, PRIMARY KEY (DB_ID))");
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("TBLS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"PARTITIONS\" (" +
++            " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " +
++            " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " +
++            " \"SD_ID\" BIGINT, \"TBL_ID\" BIGINT, " +
++            " \"WRITE_ID\" BIGINT DEFAULT 0, " +
++            " PRIMARY KEY (PART_ID))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("PARTITIONS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"TABLE_PARAMS\" (" +
++            " \"TBL_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
++            " \"PARAM_VALUE\" CLOB, " +
++            " PRIMARY KEY (TBL_ID, PARAM_KEY))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("TABLE_PARAMS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"PARTITION_PARAMS\" (" +
++            " \"PART_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
++            " \"PARAM_VALUE\" VARCHAR(4000), " +
++            " PRIMARY KEY (PART_ID, PARAM_KEY))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("PARTITION_PARAMS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
+         stmt.execute("CREATE TABLE \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\" VARCHAR(256) NOT " +
+ 
+                 "NULL, \"NEXT_VAL\" BIGINT NOT NULL)"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("SEQUENCE_TABLE table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       try {
+         stmt.execute("CREATE TABLE \"APP\".\"NOTIFICATION_SEQUENCE\" (\"NNI_ID\" BIGINT NOT NULL, " +
+ 
+                 "\"NEXT_EVENT_ID\" BIGINT NOT NULL)"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("NOTIFICATION_SEQUENCE table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       try {
+         stmt.execute("CREATE TABLE \"APP\".\"NOTIFICATION_LOG\" (\"NL_ID\" BIGINT NOT NULL, " +
+                 "\"DB_NAME\" VARCHAR(128), \"EVENT_ID\" BIGINT NOT NULL, \"EVENT_TIME\" INTEGER NOT" +
+ 
+                 " NULL, \"EVENT_TYPE\" VARCHAR(32) NOT NULL, \"MESSAGE\" CLOB, \"TBL_NAME\" " +
+                 "VARCHAR" +
+                 "(256), \"MESSAGE_FORMAT\" VARCHAR(16))"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("NOTIFICATION_LOG table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       stmt.execute("INSERT INTO \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", \"NEXT_VAL\") " +
+               "SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', " +
+               "1)) tmp_table WHERE NOT EXISTS ( SELECT \"NEXT_VAL\" FROM \"APP\"" +
+               ".\"SEQUENCE_TABLE\" WHERE \"SEQUENCE_NAME\" = 'org.apache.hadoop.hive.metastore" +
+               ".model.MNotificationLog')");
+ 
+       stmt.execute("INSERT INTO \"APP\".\"NOTIFICATION_SEQUENCE\" (\"NNI_ID\", \"NEXT_EVENT_ID\")" +
+               " SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT " +
+               "\"NEXT_EVENT_ID\" FROM \"APP\".\"NOTIFICATION_SEQUENCE\")");
+ 
+       try {
+         stmt.execute("CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (" +
+                 "WNL_ID bigint NOT NULL," +
+                 "WNL_TXNID bigint NOT NULL," +
+                 "WNL_WRITEID bigint NOT NULL," +
+                 "WNL_DATABASE varchar(128) NOT NULL," +
+                 "WNL_TABLE varchar(128) NOT NULL," +
+                 "WNL_PARTITION varchar(1024) NOT NULL," +
+                 "WNL_TABLE_OBJ clob NOT NULL," +
+                 "WNL_PARTITION_OBJ clob," +
+                 "WNL_FILES clob," +
+                 "WNL_EVENT_TIME integer NOT NULL," +
+                 "PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION))"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("TXN_WRITE_NOTIFICATION_LOG table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       stmt.execute("INSERT INTO \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", \"NEXT_VAL\") " +
+               "SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', " +
+               "1)) tmp_table WHERE NOT EXISTS ( SELECT \"NEXT_VAL\" FROM \"APP\"" +
+               ".\"SEQUENCE_TABLE\" WHERE \"SEQUENCE_NAME\" = 'org.apache.hadoop.hive.metastore" +
+               ".model.MTxnWriteNotificationLog')");
+     } catch (SQLException e) {
+       try {
+         conn.rollback();
+       } catch (SQLException re) {
+         LOG.error("Error rolling back: " + re.getMessage());
+       }
+ 
+       // Another thread might have already created these tables.
+       if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+         LOG.info("Txn tables already exist, returning");
+         return;
+       }
+ 
+       // This might be a deadlock, if so, let's retry
+       if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) {
+         LOG.warn("Caught deadlock, retrying db creation");
+         prepDb(conf);
+       } else {
+         throw e;
+       }
+     } finally {
+       deadlockCnt = 0;
+       closeResources(conn, stmt, null);
+     }
+   }
+ 
+   public static void cleanDb(Configuration conf) throws Exception {
+     int retryCount = 0;
+     while(++retryCount <= 3) {
+       boolean success = true;
+       Connection conn = null;
+       Statement stmt = null;
+       try {
+         conn = getConnection(conf);
+         stmt = conn.createStatement();
+ 
+         // We want to try these, whether they succeed or fail.
+         try {
+           stmt.execute("DROP INDEX HL_TXNID_INDEX");
+         } catch (SQLException e) {
+           if(!("42X65".equals(e.getSQLState()) && 30000 == e.getErrorCode())) {
+             //42X65/3000 means index doesn't exist
+             LOG.error("Unable to drop index HL_TXNID_INDEX " + e.getMessage() +
+               "State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount);
+             success = false;
+           }
+         }
+ 
+         success &= dropTable(stmt, "TXN_COMPONENTS", retryCount);
+         success &= dropTable(stmt, "COMPLETED_TXN_COMPONENTS", retryCount);
+         success &= dropTable(stmt, "TXNS", retryCount);
+         success &= dropTable(stmt, "NEXT_TXN_ID", retryCount);
+         success &= dropTable(stmt, "TXN_TO_WRITE_ID", retryCount);
+         success &= dropTable(stmt, "NEXT_WRITE_ID", retryCount);
+         success &= dropTable(stmt, "MIN_HISTORY_LEVEL", retryCount);
+         success &= dropTable(stmt, "HIVE_LOCKS", retryCount);
+         success &= dropTable(stmt, "NEXT_LOCK_ID", retryCount);
+         success &= dropTable(stmt, "COMPACTION_QUEUE", retryCount);
+         success &= dropTable(stmt, "NEXT_COMPACTION_QUEUE_ID", retryCount);
+         success &= dropTable(stmt, "COMPLETED_COMPACTIONS", retryCount);
+         success &= dropTable(stmt, "AUX_TABLE", retryCount);
+         success &= dropTable(stmt, "WRITE_SET", retryCount);
+         success &= dropTable(stmt, "REPL_TXN_MAP", retryCount);
+         success &= dropTable(stmt, "MATERIALIZATION_REBUILD_LOCKS", retryCount);
+         /*
+          * Don't drop NOTIFICATION_LOG, SEQUENCE_TABLE and NOTIFICATION_SEQUENCE as its used by other
+          * table which are not txn related to generate primary key. So if these tables are dropped
+          *  and other tables are not dropped, then it will create key duplicate error while inserting
+          *  to other table.
+          */
+       } finally {
+         closeResources(conn, stmt, null);
+       }
+       if(success) {
+         return;
+       }
+     }
+     throw new RuntimeException("Failed to clean up txn tables");
+   }
+ 
+   private static boolean dropTable(Statement stmt, String name, int retryCount) throws SQLException {
+     for (int i = 0; i < 3; i++) {
+       try {
+         stmt.execute("DROP TABLE " + name);
+         LOG.debug("Successfully dropped table " + name);
+         return true;
+       } catch (SQLException e) {
+         if ("42Y55".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
+           LOG.debug("Not dropping " + name + " because it doesn't exist");
+           //failed because object doesn't exist
+           return true;
+         }
+         if ("X0Y25".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
+           // Intermittent failure
+           LOG.warn("Intermittent drop failure, retrying, try number " + i);
+           continue;
+         }
+         LOG.error("Unable to drop table " + name + ": " + e.getMessage() +
+             " State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount);
+       }
+     }
+     LOG.error("Failed to drop table, don't know why");
+     return false;
+   }
+ 
+   /**
+    * A tool to count the number of partitions, tables,
+    * and databases locked by a particular lockId.
+    *
+    * @param lockId lock id to look for lock components
+    *
+    * @return number of components, or 0 if there is no lock
+    */
+   public static int countLockComponents(Configuration conf, long lockId) throws Exception {
+     Connection conn = null;
+     PreparedStatement stmt = null;
+     ResultSet rs = null;
+     try {
+       conn = getConnection(conf);
+       stmt = conn.prepareStatement("SELECT count(*) FROM hive_locks WHERE hl_lock_ext_id = ?");
+       stmt.setLong(1, lockId);
+       rs = stmt.executeQuery();
+       if (!rs.next()) {
+         return 0;
+       }
+       return rs.getInt(1);
+     } finally {
+       closeResources(conn, stmt, rs);
+     }
+   }
+ 
+   /**
++   * Return true if the transaction of the given txnId is open.
++   * @param conf    HiveConf
++   * @param txnId   transaction id to search for
++   * @return
++   * @throws Exception
++   */
++  public static boolean isOpenOrAbortedTransaction(Configuration conf, long txnId) throws Exception {
++    Connection conn = null;
++    PreparedStatement stmt = null;
++    ResultSet rs = null;
++    try {
++      conn = getConnection(conf);
++      conn.setAutoCommit(false);
++      conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
++
++      stmt = conn.prepareStatement("SELECT txn_id FROM TXNS WHERE txn_id = ?");
++      stmt.setLong(1, txnId);
++      rs = stmt.executeQuery();
++      if (!rs.next()) {
++        return false;
++      } else {
++        return true;
++      }
++    } finally {
++      closeResources(conn, stmt, rs);
++    }
++  }
++
++  /**
+    * Utility method used to run COUNT queries like "select count(*) from ..." against metastore tables
+    * @param countQuery countQuery text
+    * @return count countQuery result
+    * @throws Exception
+    */
+   public static int countQueryAgent(Configuration conf, String countQuery) throws Exception {
+     Connection conn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       conn = getConnection(conf);
+       stmt = conn.createStatement();
+       rs = stmt.executeQuery(countQuery);
+       if (!rs.next()) {
+         return 0;
+       }
+       return rs.getInt(1);
+     } finally {
+       closeResources(conn, stmt, rs);
+     }
+   }
+   @VisibleForTesting
+   public static String queryToString(Configuration conf, String query) throws Exception {
+     return queryToString(conf, query, true);
+   }
+   public static String queryToString(Configuration conf, String query, boolean includeHeader)
+       throws Exception {
+     Connection conn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     StringBuilder sb = new StringBuilder();
+     try {
+       conn = getConnection(conf);
+       stmt = conn.createStatement();
+       rs = stmt.executeQuery(query);
+       ResultSetMetaData rsmd = rs.getMetaData();
+       if(includeHeader) {
+         for (int colPos = 1; colPos <= rsmd.getColumnCount(); colPos++) {
+           sb.append(rsmd.getColumnName(colPos)).append("   ");
+         }
+         sb.append('\n');
+       }
+       while(rs.next()) {
+         for (int colPos = 1; colPos <= rsmd.getColumnCount(); colPos++) {
+           sb.append(rs.getObject(colPos)).append("   ");
+         }
+         sb.append('\n');
+       }
+     } finally {
+       closeResources(conn, stmt, rs);
+     }
+     return sb.toString();
+   }
+ 
+   static Connection getConnection(Configuration conf) throws Exception {
+     String jdbcDriver = MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER);
+     Driver driver = (Driver) Class.forName(jdbcDriver).newInstance();
+     Properties prop = new Properties();
+     String driverUrl = MetastoreConf.getVar(conf, ConfVars.CONNECT_URL_KEY);
+     String user = MetastoreConf.getVar(conf, ConfVars.CONNECTION_USER_NAME);
+     String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
+     prop.setProperty("user", user);
+     prop.setProperty("password", passwd);
+     Connection conn = driver.connect(driverUrl, prop);
+     conn.setAutoCommit(true);
+     return conn;
+   }
+ 
+   static void closeResources(Connection conn, Statement stmt, ResultSet rs) {
+     if (rs != null) {
+       try {
+         rs.close();
+       } catch (SQLException e) {
+         LOG.error("Error closing ResultSet: " + e.getMessage());
+       }
+     }
+ 
+     if (stmt != null) {
+       try {
+         stmt.close();
+       } catch (SQLException e) {
+         System.err.println("Error closing Statement: " + e.getMessage());
+       }
+     }
+ 
+     if (conn != null) {
+       try {
+         conn.rollback();
+       } catch (SQLException e) {
+         System.err.println("Error rolling back: " + e.getMessage());
+       }
+       try {
+         conn.close();
+       } catch (SQLException e) {
+         System.err.println("Error closing Connection: " + e.getMessage());
+       }
+     }
+   }
+ }


[44/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index 0000000,c2bbba5..7b32c08
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@@ -1,0 -1,1686 +1,1688 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  * <p>
+  * http://www.apache.org/licenses/LICENSE-2.0
+  * <p>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.conf;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.DefaultStorageSchemaReader;
+ import org.apache.hadoop.hive.metastore.HiveAlterHandler;
+ import org.apache.hadoop.hive.metastore.MaterializationsRebuildLockCleanerTask;
+ import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
+ import org.apache.hadoop.hive.metastore.RuntimeStatsCleanerTask;
+ import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
+ import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
+ import org.apache.hadoop.hive.metastore.txn.AcidCompactionHistoryService;
+ import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService;
+ import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService;
+ import org.apache.hadoop.hive.metastore.txn.AcidWriteSetService;
+ import org.apache.hadoop.hive.metastore.utils.StringUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import java.io.File;
+ import java.io.IOException;
+ import java.net.URI;
+ import java.net.URL;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.regex.Matcher;
+ import java.util.regex.Pattern;
+ 
+ /**
+  * A set of definitions of config values used by the Metastore.  One of the key aims of this
+  * class is to provide backwards compatibility with existing Hive configuration keys while
+  * allowing the metastore to have its own, Hive independent keys.   For this reason access to the
+  * underlying Configuration object should always be done via the static methods provided here
+  * rather than directly via {@link Configuration#get(String)} and
+  * {@link Configuration#set(String, String)}.  All the methods of this class will handle checking
+  * both the MetastoreConf key and the Hive key.  The algorithm is, on reads, to check first the
+  * MetastoreConf key, then the Hive key, then return the default if neither are set.  On write
+  * the Metastore key only is set.
+  *
+  * This class does not extend Configuration.  Rather it provides static methods for operating on
+  * a Configuration object.  This allows it to work on HiveConf objects, which otherwise would not
+  * be the case.
+  */
+ public class MetastoreConf {
+ 
+   private static final Logger LOG = LoggerFactory.getLogger(MetastoreConf.class);
+   private static final Pattern TIME_UNIT_SUFFIX = Pattern.compile("([0-9]+)([a-zA-Z]+)");
+ 
+   private static final Map<String, ConfVars> metaConfs = new HashMap<>();
+   private static URL hiveDefaultURL = null;
+   private static URL hiveSiteURL = null;
+   private static URL hiveMetastoreSiteURL = null;
+   private static URL metastoreSiteURL = null;
+   private static AtomicBoolean beenDumped = new AtomicBoolean();
+ 
+   private static Map<String, ConfVars> keyToVars;
+ 
+   @VisibleForTesting
+   static final String TEST_ENV_WORKAROUND = "metastore.testing.env.workaround.dont.ever.set.this.";
+ 
+   public static enum StatsUpdateMode {
+     NONE, EXISTING, ALL
+   }
+ 
+   private static class TimeValue {
+     final long val;
+     final TimeUnit unit;
+ 
+     private TimeValue(long val, TimeUnit unit) {
+       this.val = val;
+       this.unit = unit;
+     }
+ 
+     @Override
+     public String toString() {
+       switch (unit) {
+       case NANOSECONDS: return Long.toString(val) + "ns";
+       case MICROSECONDS: return Long.toString(val) + "us";
+       case MILLISECONDS: return Long.toString(val) + "ms";
+       case SECONDS: return Long.toString(val) + "s";
+       case MINUTES: return Long.toString(val) + "m";
+       case HOURS: return Long.toString(val) + "h";
+       case DAYS: return Long.toString(val) + "d";
+       }
+       throw new RuntimeException("Unknown time unit " + unit);
+     }
+   }
+ 
+   /**
+    * Metastore related options that the db is initialized against. When a conf
+    * var in this is list is changed, the metastore instance for the CLI will
+    * be recreated so that the change will take effect.
+    * TODO - I suspect the vast majority of these don't need to be here.  But it requires testing
+    * before just pulling them out.
+    */
+   public static final MetastoreConf.ConfVars[] metaVars = {
+       ConfVars.WAREHOUSE,
+       ConfVars.REPLDIR,
+       ConfVars.THRIFT_URIS,
+       ConfVars.SERVER_PORT,
+       ConfVars.THRIFT_CONNECTION_RETRIES,
+       ConfVars.THRIFT_FAILURE_RETRIES,
+       ConfVars.CLIENT_CONNECT_RETRY_DELAY,
+       ConfVars.CLIENT_SOCKET_TIMEOUT,
+       ConfVars.CLIENT_SOCKET_LIFETIME,
+       ConfVars.PWD,
+       ConfVars.CONNECT_URL_HOOK,
+       ConfVars.CONNECT_URL_KEY,
+       ConfVars.SERVER_MIN_THREADS,
+       ConfVars.SERVER_MAX_THREADS,
+       ConfVars.TCP_KEEP_ALIVE,
+       ConfVars.KERBEROS_KEYTAB_FILE,
+       ConfVars.KERBEROS_PRINCIPAL,
+       ConfVars.USE_THRIFT_SASL,
+       ConfVars.TOKEN_SIGNATURE,
+       ConfVars.CACHE_PINOBJTYPES,
+       ConfVars.CONNECTION_POOLING_TYPE,
+       ConfVars.VALIDATE_TABLES,
+       ConfVars.DATANUCLEUS_INIT_COL_INFO,
+       ConfVars.VALIDATE_COLUMNS,
+       ConfVars.VALIDATE_CONSTRAINTS,
+       ConfVars.STORE_MANAGER_TYPE,
+       ConfVars.AUTO_CREATE_ALL,
+       ConfVars.DATANUCLEUS_TRANSACTION_ISOLATION,
+       ConfVars.DATANUCLEUS_CACHE_LEVEL2,
+       ConfVars.DATANUCLEUS_CACHE_LEVEL2_TYPE,
+       ConfVars.IDENTIFIER_FACTORY,
+       ConfVars.DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK,
+       ConfVars.AUTHORIZATION_STORAGE_AUTH_CHECKS,
+       ConfVars.BATCH_RETRIEVE_MAX,
+       ConfVars.EVENT_LISTENERS,
+       ConfVars.TRANSACTIONAL_EVENT_LISTENERS,
+       ConfVars.EVENT_CLEAN_FREQ,
+       ConfVars.EVENT_EXPIRY_DURATION,
+       ConfVars.EVENT_MESSAGE_FACTORY,
+       ConfVars.FILTER_HOOK,
+       ConfVars.RAW_STORE_IMPL,
+       ConfVars.END_FUNCTION_LISTENERS,
+       ConfVars.PART_INHERIT_TBL_PROPS,
+       ConfVars.BATCH_RETRIEVE_OBJECTS_MAX,
+       ConfVars.INIT_HOOKS,
+       ConfVars.PRE_EVENT_LISTENERS,
+       ConfVars.HMS_HANDLER_ATTEMPTS,
+       ConfVars.HMS_HANDLER_INTERVAL,
+       ConfVars.HMS_HANDLER_FORCE_RELOAD_CONF,
+       ConfVars.PARTITION_NAME_WHITELIST_PATTERN,
+       ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS,
+       ConfVars.USERS_IN_ADMIN_ROLE,
+       ConfVars.HIVE_TXN_MANAGER,
+       ConfVars.TXN_TIMEOUT,
+       ConfVars.TXN_MAX_OPEN_BATCH,
+       ConfVars.TXN_RETRYABLE_SQLEX_REGEX,
+       ConfVars.STATS_NDV_TUNER,
+       ConfVars.STATS_NDV_DENSITY_FUNCTION,
+       ConfVars.AGGREGATE_STATS_CACHE_ENABLED,
+       ConfVars.AGGREGATE_STATS_CACHE_SIZE,
+       ConfVars.AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
+       ConfVars.AGGREGATE_STATS_CACHE_FPP,
+       ConfVars.AGGREGATE_STATS_CACHE_MAX_VARIANCE,
+       ConfVars.AGGREGATE_STATS_CACHE_TTL,
+       ConfVars.AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT,
+       ConfVars.AGGREGATE_STATS_CACHE_MAX_READER_WAIT,
+       ConfVars.AGGREGATE_STATS_CACHE_MAX_FULL,
+       ConfVars.AGGREGATE_STATS_CACHE_CLEAN_UNTIL,
+       ConfVars.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
+       ConfVars.FILE_METADATA_THREADS
+   };
+ 
+   /**
+    * User configurable Metastore vars
+    */
+   private static final MetastoreConf.ConfVars[] metaConfVars = {
+       ConfVars.TRY_DIRECT_SQL,
+       ConfVars.TRY_DIRECT_SQL_DDL,
+       ConfVars.CLIENT_SOCKET_TIMEOUT,
+       ConfVars.PARTITION_NAME_WHITELIST_PATTERN,
+       ConfVars.CAPABILITY_CHECK,
+       ConfVars.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES
+   };
+ 
+   static {
+     for (ConfVars confVar : metaConfVars) {
+       metaConfs.put(confVar.varname, confVar);
+       metaConfs.put(confVar.hiveName, confVar);
+     }
+   }
+ 
+   /**
+    * Variables that we should never print the value of for security reasons.
+    */
+   private static final Set<String> unprintables = StringUtils.asSet(
+       ConfVars.PWD.varname,
+       ConfVars.PWD.hiveName,
+       ConfVars.SSL_KEYSTORE_PASSWORD.varname,
+       ConfVars.SSL_KEYSTORE_PASSWORD.hiveName,
+       ConfVars.SSL_TRUSTSTORE_PASSWORD.varname,
+       ConfVars.SSL_TRUSTSTORE_PASSWORD.hiveName
+   );
+ 
+   public static ConfVars getMetaConf(String name) {
+     return metaConfs.get(name);
+   }
+ 
+   public enum ConfVars {
+     // alpha order, PLEASE!
+     ADDED_JARS("metastore.added.jars.path", "hive.added.jars.path", "",
+         "This an internal parameter."),
+     AGGREGATE_STATS_CACHE_CLEAN_UNTIL("metastore.aggregate.stats.cache.clean.until",
+         "hive.metastore.aggregate.stats.cache.clean.until", 0.8,
+         "The cleaner thread cleans until cache reaches this % full size."),
+     AGGREGATE_STATS_CACHE_ENABLED("metastore.aggregate.stats.cache.enabled",
+         "hive.metastore.aggregate.stats.cache.enabled", true,
+         "Whether aggregate stats caching is enabled or not."),
+     AGGREGATE_STATS_CACHE_FPP("metastore.aggregate.stats.cache.fpp",
+         "hive.metastore.aggregate.stats.cache.fpp", 0.01,
+         "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
+     AGGREGATE_STATS_CACHE_MAX_FULL("metastore.aggregate.stats.cache.max.full",
+         "hive.metastore.aggregate.stats.cache.max.full", 0.9,
+         "Maximum cache full % after which the cache cleaner thread kicks in."),
+     AGGREGATE_STATS_CACHE_MAX_PARTITIONS("metastore.aggregate.stats.cache.max.partitions",
+         "hive.metastore.aggregate.stats.cache.max.partitions", 10000,
+         "Maximum number of partitions that are aggregated per cache node."),
+     AGGREGATE_STATS_CACHE_MAX_READER_WAIT("metastore.aggregate.stats.cache.max.reader.wait",
+         "hive.metastore.aggregate.stats.cache.max.reader.wait", 1000, TimeUnit.MILLISECONDS,
+         "Number of milliseconds a reader will wait to acquire the readlock before giving up."),
+     AGGREGATE_STATS_CACHE_MAX_VARIANCE("metastore.aggregate.stats.cache.max.variance",
+         "hive.metastore.aggregate.stats.cache.max.variance", 0.01,
+         "Maximum tolerable variance in number of partitions between a cached node and our request (default 1%)."),
+     AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT("metastore.aggregate.stats.cache.max.writer.wait",
+         "hive.metastore.aggregate.stats.cache.max.writer.wait", 5000, TimeUnit.MILLISECONDS,
+         "Number of milliseconds a writer will wait to acquire the writelock before giving up."),
+     AGGREGATE_STATS_CACHE_SIZE("metastore.aggregate.stats.cache.size",
+         "hive.metastore.aggregate.stats.cache.size", 10000,
+         "Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache."),
+     AGGREGATE_STATS_CACHE_TTL("metastore.aggregate.stats.cache.ttl",
+         "hive.metastore.aggregate.stats.cache.ttl", 600, TimeUnit.SECONDS,
+         "Number of seconds for a cached node to be active in the cache before they become stale."),
+     ALTER_HANDLER("metastore.alter.handler", "hive.metastore.alter.impl",
+         HiveAlterHandler.class.getName(),
+         "Alter handler.  For now defaults to the Hive one.  Really need a better default option"),
+     ASYNC_LOG_ENABLED("metastore.async.log.enabled", "hive.async.log.enabled", true,
+         "Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give\n" +
+             " significant performance improvement as logging will be handled in separate thread\n" +
+             " that uses LMAX disruptor queue for buffering log messages.\n" +
+             " Refer https://logging.apache.org/log4j/2.x/manual/async.html for benefits and\n" +
+             " drawbacks."),
+     AUTHORIZATION_STORAGE_AUTH_CHECKS("metastore.authorization.storage.checks",
+         "hive.metastore.authorization.storage.checks", false,
+         "Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" +
+             "for operations like drop-partition (disallow the drop-partition if the user in\n" +
+             "question doesn't have permissions to delete the corresponding directory\n" +
+             "on the storage)."),
+     AUTO_CREATE_ALL("datanucleus.schema.autoCreateAll", "datanucleus.schema.autoCreateAll", false,
+         "Auto creates necessary schema on a startup if one doesn't exist. Set this to false, after creating it once."
+             + "To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not "
+             + "recommended for production use cases, run schematool command instead." ),
+     BATCH_RETRIEVE_MAX("metastore.batch.retrieve.max", "hive.metastore.batch.retrieve.max", 300,
+         "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" +
+             "The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" +
+             "but it may also cause higher memory requirement at the client side."),
+     BATCH_RETRIEVE_OBJECTS_MAX("metastore.batch.retrieve.table.partition.max",
+         "hive.metastore.batch.retrieve.table.partition.max", 1000,
+         "Maximum number of objects that metastore internally retrieves in one batch."),
+     CACHE_PINOBJTYPES("metastore.cache.pinobjtypes", "hive.metastore.cache.pinobjtypes",
+         "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order",
+         "List of comma separated metastore object types that should be pinned in the cache"),
+     CACHED_RAW_STORE_IMPL("metastore.cached.rawstore.impl",
+         "hive.metastore.cached.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
+         "Name of the wrapped RawStore class"),
+     CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY("metastore.cached.rawstore.cache.update.frequency",
+         "hive.metastore.cached.rawstore.cache.update.frequency", 60, TimeUnit.SECONDS,
+         "The time after which metastore cache is updated from metastore DB."),
+     CACHED_RAW_STORE_CACHED_OBJECTS_WHITELIST("metastore.cached.rawstore.cached.object.whitelist",
+         "hive.metastore.cached.rawstore.cached.object.whitelist", ".*", "Comma separated list of regular expressions \n " +
+         "to select the tables (and its partitions, stats etc) that will be cached by CachedStore. \n" +
+         "This can be used in conjunction with hive.metastore.cached.rawstore.cached.object.blacklist. \n" +
+         "Example: .*, db1.*, db2\\.tbl.*. The last item can potentially override patterns specified before."),
+     CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST("metastore.cached.rawstore.cached.object.blacklist",
+          "hive.metastore.cached.rawstore.cached.object.blacklist", "", "Comma separated list of regular expressions \n " +
+          "to filter out the tables (and its partitions, stats etc) that will be cached by CachedStore. \n" +
+          "This can be used in conjunction with hive.metastore.cached.rawstore.cached.object.whitelist. \n" +
+          "Example: db2.*, db3\\.tbl1, db3\\..*. The last item can potentially override patterns specified before. \n" +
+          "The blacklist also overrides the whitelist."),
+     CACHED_RAW_STORE_MAX_CACHE_MEMORY("metastore.cached.rawstore.max.cache.memory",
+         "hive.metastore.cached.rawstore.max.cache.memory", "1Gb", new SizeValidator(),
+         "The maximum memory in bytes that the cached objects can use. "
+         + "Memory used is calculated based on estimated size of tables and partitions in the cache. "
+         + "Setting it to a negative value disables memory estimation."),
+     CAPABILITY_CHECK("metastore.client.capability.check",
+         "hive.metastore.client.capability.check", true,
+         "Whether to check client capabilities for potentially breaking API usage."),
+     CATALOG_DEFAULT("metastore.catalog.default", "metastore.catalog.default", "hive",
+         "The default catalog to use when a catalog is not specified.  Default is 'hive' (the " +
+             "default catalog)."),
+     CATALOGS_TO_CACHE("metastore.cached.rawstore.catalogs", "metastore.cached.rawstore.catalogs",
+         "hive", "Comma separated list of catalogs to cache in the CachedStore. Default is 'hive' " +
+         "(the default catalog).  Empty string means all catalogs will be cached."),
+     CLIENT_CONNECT_RETRY_DELAY("metastore.client.connect.retry.delay",
+         "hive.metastore.client.connect.retry.delay", 1, TimeUnit.SECONDS,
+         "Number of seconds for the client to wait between consecutive connection attempts"),
+     CLIENT_KERBEROS_PRINCIPAL("metastore.client.kerberos.principal",
+         "hive.metastore.client.kerberos.principal",
+         "", // E.g. "hive-metastore/_HOST@EXAMPLE.COM".
+         "The Kerberos principal associated with the HA cluster of hcat_servers."),
+     CLIENT_SOCKET_LIFETIME("metastore.client.socket.lifetime",
+         "hive.metastore.client.socket.lifetime", 0, TimeUnit.SECONDS,
+         "MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
+             "reconnects on the next MetaStore operation. A value of 0s means the connection\n" +
+             "has an infinite lifetime."),
+     CLIENT_SOCKET_TIMEOUT("metastore.client.socket.timeout", "hive.metastore.client.socket.timeout", 600,
+             TimeUnit.SECONDS, "MetaStore Client socket timeout in seconds"),
+     COMPACTOR_HISTORY_REAPER_INTERVAL("metastore.compactor.history.reaper.interval",
+         "hive.compactor.history.reaper.interval", 2, TimeUnit.MINUTES,
+         "Determines how often compaction history reaper runs"),
+     COMPACTOR_HISTORY_RETENTION_ATTEMPTED("metastore.compactor.history.retention.attempted",
+         "hive.compactor.history.retention.attempted", 2,
+         new RangeValidator(0, 100), "Determines how many attempted compaction records will be " +
+         "retained in compaction history for a given table/partition."),
+     COMPACTOR_HISTORY_RETENTION_FAILED("metastore.compactor.history.retention.failed",
+         "hive.compactor.history.retention.failed", 3,
+         new RangeValidator(0, 100), "Determines how many failed compaction records will be " +
+         "retained in compaction history for a given table/partition."),
+     COMPACTOR_HISTORY_RETENTION_SUCCEEDED("metastore.compactor.history.retention.succeeded",
+         "hive.compactor.history.retention.succeeded", 3,
+         new RangeValidator(0, 100), "Determines how many successful compaction records will be " +
+         "retained in compaction history for a given table/partition."),
+     COMPACTOR_INITIATOR_FAILED_THRESHOLD("metastore.compactor.initiator.failed.compacts.threshold",
+         "hive.compactor.initiator.failed.compacts.threshold", 2,
+         new RangeValidator(1, 20), "Number of consecutive compaction failures (per table/partition) " +
+         "after which automatic compactions will not be scheduled any more.  Note that this must be less " +
+         "than hive.compactor.history.retention.failed."),
+     COMPACTOR_INITIATOR_ON("metastore.compactor.initiator.on", "hive.compactor.initiator.on", false,
+         "Whether to run the initiator and cleaner threads on this metastore instance or not.\n" +
+             "Set this to true on one instance of the Thrift metastore service as part of turning\n" +
+             "on Hive transactions. For a complete list of parameters required for turning on\n" +
+             "transactions, see hive.txn.manager."),
+     COMPACTOR_WORKER_THREADS("metastore.compactor.worker.threads",
+         "hive.compactor.worker.threads", 0,
+         "How many compactor worker threads to run on this metastore instance. Set this to a\n" +
+             "positive number on one or more instances of the Thrift metastore service as part of\n" +
+             "turning on Hive transactions. For a complete list of parameters required for turning\n" +
+             "on transactions, see hive.txn.manager.\n" +
+             "Worker threads spawn MapReduce jobs to do compactions. They do not do the compactions\n" +
+             "themselves. Increasing the number of worker threads will decrease the time it takes\n" +
+             "tables or partitions to be compacted once they are determined to need compaction.\n" +
+             "It will also increase the background load on the Hadoop cluster as more MapReduce jobs\n" +
+             "will be running in the background."),
+     CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName",
+         "javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver",
+         "Driver class name for a JDBC metastore"),
+     CONNECTION_POOLING_MAX_CONNECTIONS("datanucleus.connectionPool.maxPoolSize",
+         "datanucleus.connectionPool.maxPoolSize", 10,
+         "Specify the maximum number of connections in the connection pool. Note: The configured size will be used by\n" +
+             "2 connection pools (TxnHandler and ObjectStore). When configuring the max connection pool size, it is\n" +
+             "recommended to take into account the number of metastore instances and the number of HiveServer2 instances\n" +
+             "configured with embedded metastore. To get optimal performance, set config to meet the following condition\n"+
+             "(2 * pool_size * metastore_instances + 2 * pool_size * HS2_instances_with_embedded_metastore) = \n" +
+             "(2 * physical_core_count + hard_disk_count)."),
+     CONNECT_URL_HOOK("metastore.ds.connection.url.hook",
+         "hive.metastore.ds.connection.url.hook", "",
+         "Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"),
+     CONNECT_URL_KEY("javax.jdo.option.ConnectionURL",
+         "javax.jdo.option.ConnectionURL",
+         "jdbc:derby:;databaseName=metastore_db;create=true",
+         "JDBC connect string for a JDBC metastore.\n" +
+             "To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\n" +
+             "For example, jdbc:postgresql://myhost/db?ssl=true for postgres database."),
+     CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType",
+         "datanucleus.connectionPoolingType", "HikariCP", new StringSetValidator("BONECP", "DBCP",
+         "HikariCP", "NONE"),
+         "Specify connection pool library for datanucleus"),
+     CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName",
+         "javax.jdo.option.ConnectionUserName", "APP",
+         "Username to use against metastore database"),
+     CREATE_TABLES_AS_ACID("metastore.create.as.acid", "hive.create.as.acid", false,
+         "Whether the eligible tables should be created as full ACID by default. Does \n" +
+             "not apply to external tables, the ones using storage handlers, etc."),
+     COUNT_OPEN_TXNS_INTERVAL("metastore.count.open.txns.interval", "hive.count.open.txns.interval",
+         1, TimeUnit.SECONDS, "Time in seconds between checks to count open transactions."),
+     DATANUCLEUS_AUTOSTART("datanucleus.autoStartMechanismMode",
+         "datanucleus.autoStartMechanismMode", "ignored", new StringSetValidator("ignored"),
+         "Autostart mechanism for datanucleus.  Currently ignored is the only option supported."),
+     DATANUCLEUS_CACHE_LEVEL2("datanucleus.cache.level2", "datanucleus.cache.level2", false,
+         "Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"),
+     DATANUCLEUS_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type",
+         "datanucleus.cache.level2.type", "none", ""),
+     DATANUCLEUS_INIT_COL_INFO("datanucleus.rdbms.initializeColumnInfo",
+         "datanucleus.rdbms.initializeColumnInfo", "NONE",
+         "initializeColumnInfo setting for DataNucleus; set to NONE at least on Postgres."),
+     DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck",
+         "datanucleus.plugin.pluginRegistryBundleCheck", "LOG",
+         "Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"),
+     DATANUCLEUS_TRANSACTION_ISOLATION("datanucleus.transactionIsolation",
+         "datanucleus.transactionIsolation", "read-committed",
+         "Default transaction isolation level for identity generation."),
+     DATANUCLEUS_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy",
+         "datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""),
+     DBACCESS_SSL_PROPS("metastore.dbaccess.ssl.properties", "hive.metastore.dbaccess.ssl.properties", "",
+         "Comma-separated SSL properties for metastore to access database when JDO connection URL\n" +
+             "enables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd."),
+     DEFAULTPARTITIONNAME("metastore.default.partition.name",
+         "hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__",
+         "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" +
+             "This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" +
+             "The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."),
+     DELEGATION_KEY_UPDATE_INTERVAL("metastore.cluster.delegation.key.update-interval",
+         "hive.cluster.delegation.key.update-interval", 1, TimeUnit.DAYS, ""),
+     DELEGATION_TOKEN_GC_INTERVAL("metastore.cluster.delegation.token.gc-interval",
+         "hive.cluster.delegation.token.gc-interval", 1, TimeUnit.HOURS, ""),
+     DELEGATION_TOKEN_MAX_LIFETIME("metastore.cluster.delegation.token.max-lifetime",
+         "hive.cluster.delegation.token.max-lifetime", 7, TimeUnit.DAYS, ""),
+     DELEGATION_TOKEN_RENEW_INTERVAL("metastore.cluster.delegation.token.renew-interval",
+       "hive.cluster.delegation.token.renew-interval", 1, TimeUnit.DAYS, ""),
+     DELEGATION_TOKEN_STORE_CLS("metastore.cluster.delegation.token.store.class",
+         "hive.cluster.delegation.token.store.class", MetastoreDelegationTokenManager.class.getName(),
+         "Class to store delegation tokens"),
+     DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit",
+         "javax.jdo.option.DetachAllOnCommit", true,
+         "Detaches all objects from session so that they can be used after transaction is committed"),
+     DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("metastore.direct.sql.max.elements.in.clause",
+         "hive.direct.sql.max.elements.in.clause", 1000,
+         "The maximum number of values in a IN clause. Once exceeded, it will be broken into\n" +
+             " multiple OR separated IN clauses."),
+     DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("metastore.direct.sql.max.elements.values.clause",
+         "hive.direct.sql.max.elements.values.clause",
+         1000, "The maximum number of values in a VALUES clause for INSERT statement."),
+     DIRECT_SQL_MAX_QUERY_LENGTH("metastore.direct.sql.max.query.length",
+         "hive.direct.sql.max.query.length", 100, "The maximum\n" +
+         " size of a query string (in KB)."),
+     DIRECT_SQL_PARTITION_BATCH_SIZE("metastore.direct.sql.batch.size",
+         "hive.metastore.direct.sql.batch.size", 0,
+         "Batch size for partition and other object retrieval from the underlying DB in direct\n" +
+             "SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" +
+             "that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" +
+             "may impede performance. -1 means no batching, 0 means automatic batching."),
+     DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES("metastore.disallow.incompatible.col.type.changes",
+         "hive.metastore.disallow.incompatible.col.type.changes", true,
+         "If true, ALTER TABLE operations which change the type of a\n" +
+             "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
+             "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +
+             "datatypes can be converted from string to any type. The map is also serialized as\n" +
+             "a string, which can be read as a string as well. However, with any binary\n" +
+             "serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" +
+             "when subsequently trying to access old partitions.\n" +
+             "\n" +
+             "Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\n" +
+             "not blocked.\n" +
+             "\n" +
+             "See HIVE-4409 for more details."),
+     DUMP_CONFIG_ON_CREATION("metastore.dump.config.on.creation", "metastore.dump.config.on.creation", true,
+         "If true, a printout of the config file (minus sensitive values) will be dumped to the " +
+             "log whenever newMetastoreConf() is called.  Can produce a lot of logs"),
+     END_FUNCTION_LISTENERS("metastore.end.function.listeners",
+         "hive.metastore.end.function.listeners", "",
+         "List of comma separated listeners for the end of metastore functions."),
+     EVENT_CLEAN_FREQ("metastore.event.clean.freq", "hive.metastore.event.clean.freq", 0,
+         TimeUnit.SECONDS, "Frequency at which timer task runs to purge expired events in metastore."),
+     EVENT_EXPIRY_DURATION("metastore.event.expiry.duration", "hive.metastore.event.expiry.duration",
+         0, TimeUnit.SECONDS, "Duration after which events expire from events table"),
+     EVENT_LISTENERS("metastore.event.listeners", "hive.metastore.event.listeners", "",
+         "A comma separated list of Java classes that implement the org.apache.riven.MetaStoreEventListener" +
+             " interface. The metastore event and corresponding listener method will be invoked in separate JDO transactions. " +
+             "Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction."),
+     EVENT_MESSAGE_FACTORY("metastore.event.message.factory",
+         "hive.metastore.event.message.factory",
+         "org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory",
+         "Factory class for making encoding and decoding messages in the events generated."),
+     EVENT_DB_LISTENER_TTL("metastore.event.db.listener.timetolive",
+         "hive.metastore.event.db.listener.timetolive", 86400, TimeUnit.SECONDS,
+         "time after which events will be removed from the database listener queue"),
+     EVENT_DB_NOTIFICATION_API_AUTH("metastore.metastore.event.db.notification.api.auth",
+         "hive.metastore.event.db.notification.api.auth", true,
+         "Should metastore do authorization against database notification related APIs such as get_next_notification.\n" +
+             "If set to true, then only the superusers in proxy settings have the permission"),
+     EXECUTE_SET_UGI("metastore.execute.setugi", "hive.metastore.execute.setugi", true,
+         "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
+             "the client's reported user and group permissions. Note that this property must be set on \n" +
+             "both the client and server sides. Further note that its best effort. \n" +
+             "If client sets its to true and server sets it to false, client setting will be ignored."),
+     EXPRESSION_PROXY_CLASS("metastore.expression.proxy", "hive.metastore.expression.proxy",
+         "org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore",
+         "Class to use to process expressions in partition pruning."),
+     FILE_METADATA_THREADS("metastore.file.metadata.threads",
+         "hive.metastore.hbase.file.metadata.threads", 1,
+         "Number of threads to use to read file metadata in background to cache it."),
+     FILTER_HOOK("metastore.filter.hook", "hive.metastore.filter.hook",
+         org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl.class.getName(),
+         "Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager"
+             + "is set to instance of HiveAuthorizerFactory, then this value is ignored."),
+     FS_HANDLER_CLS("metastore.fs.handler.class", "hive.metastore.fs.handler.class",
+         "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),
+     FS_HANDLER_THREADS_COUNT("metastore.fshandler.threads", "hive.metastore.fshandler.threads", 15,
+         "Number of threads to be allocated for metastore handler for fs operations."),
+     HMS_HANDLER_ATTEMPTS("metastore.hmshandler.retry.attempts", "hive.hmshandler.retry.attempts", 10,
+         "The number of times to retry a HMSHandler call if there were a connection error."),
+     HMS_HANDLER_FORCE_RELOAD_CONF("metastore.hmshandler.force.reload.conf",
+         "hive.hmshandler.force.reload.conf", false,
+         "Whether to force reloading of the HMSHandler configuration (including\n" +
+             "the connection URL, before the next metastore query that accesses the\n" +
+             "datastore. Once reloaded, this value is reset to false. Used for\n" +
+             "testing only."),
+     HMS_HANDLER_INTERVAL("metastore.hmshandler.retry.interval", "hive.hmshandler.retry.interval",
+         2000, TimeUnit.MILLISECONDS, "The time between HMSHandler retry attempts on failure."),
+     IDENTIFIER_FACTORY("datanucleus.identifierFactory",
+         "datanucleus.identifierFactory", "datanucleus1",
+         "Name of the identifier factory to use when generating table/column names etc. \n" +
+             "'datanucleus1' is used for backward compatibility with DataNucleus v1"),
+     INIT_HOOKS("metastore.init.hooks", "hive.metastore.init.hooks", "",
+         "A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" +
+             "An init hook is specified as the name of Java class which extends org.apache.riven.MetaStoreInitListener."),
+     INIT_METADATA_COUNT_ENABLED("metastore.initial.metadata.count.enabled",
+         "hive.metastore.initial.metadata.count.enabled", true,
+         "Enable a metadata count at metastore startup for metrics."),
+     INTEGER_JDO_PUSHDOWN("metastore.integral.jdo.pushdown",
+         "hive.metastore.integral.jdo.pushdown", false,
+         "Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" +
+             "improves metastore perf for integral columns, especially if there's a large number of partitions.\n" +
+             "However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" +
+             "leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" +
+             "is also irrelevant."),
+     KERBEROS_KEYTAB_FILE("metastore.kerberos.keytab.file",
+         "hive.metastore.kerberos.keytab.file", "",
+         "The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."),
+     KERBEROS_PRINCIPAL("metastore.kerberos.principal", "hive.metastore.kerberos.principal",
+         "hive-metastore/_HOST@EXAMPLE.COM",
+         "The service principal for the metastore Thrift server. \n" +
+             "The special string _HOST will be replaced automatically with the correct host name."),
+     LIMIT_PARTITION_REQUEST("metastore.limit.partition.request",
+         "hive.metastore.limit.partition.request", -1,
+         "This limits the number of partitions (whole partition objects) that can be requested " +
+         "from the metastore for a give table. MetaStore API methods using this are: \n" +
+                 "get_partitions, \n" +
+                 "get_partitions_with_auth, \n" +
+                 "get_partitions_by_filter, \n" +
+                 "get_partitions_by_expr.\n" +
+             "The default value \"-1\" means no limit."),
+     LOG4J_FILE("metastore.log4j.file", "hive.log4j.file", "",
+         "Hive log4j configuration file.\n" +
+             "If the property is not set, then logging will be initialized using metastore-log4j2.properties found on the classpath.\n" +
+             "If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
+             "which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
+     MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
+         "javax.jdo.PersistenceManagerFactoryClass",
+         "org.datanucleus.api.jdo.JDOPersistenceManagerFactory",
+         "class implementing the jdo persistence"),
+     MATERIALIZATIONS_INVALIDATION_CACHE_IMPL("metastore.materializations.invalidation.impl",
+         "hive.metastore.materializations.invalidation.impl", "DEFAULT",
+         new StringSetValidator("DEFAULT", "DISABLE"),
+         "The implementation that we should use for the materializations invalidation cache. \n" +
+             "  DEFAULT: Default implementation for invalidation cache\n" +
+             "  DISABLE: Disable invalidation cache (debugging purposes)"),
+     MATERIALIZATIONS_INVALIDATION_CACHE_CLEAN_FREQUENCY("metastore.materializations.invalidation.clean.frequency",
+          "hive.metastore.materializations.invalidation.clean.frequency",
+          3600, TimeUnit.SECONDS, "Frequency at which timer task runs to remove unnecessary transaction entries from" +
+           "materializations invalidation cache."),
+     MATERIALIZATIONS_INVALIDATION_CACHE_EXPIRY_DURATION("metastore.materializations.invalidation.max.duration",
+          "hive.metastore.materializations.invalidation.max.duration",
+          86400, TimeUnit.SECONDS, "Maximum duration for query producing a materialization. After this time, transaction" +
+          "entries that are not relevant for materializations can be removed from invalidation cache."),
+ 
+     RUNTIME_STATS_CLEAN_FREQUENCY("runtime.stats.clean.frequency", "hive.metastore.runtime.stats.clean.frequency", 3600,
+         TimeUnit.SECONDS, "Frequency at which timer task runs to remove outdated runtime stat entries."),
+     RUNTIME_STATS_MAX_AGE("runtime.stats.max.age", "hive.metastore.runtime.stats.max.age", 86400 * 3, TimeUnit.SECONDS,
+         "Stat entries which are older than this are removed."),
+ 
+     // Parameters for exporting metadata on table drop (requires the use of the)
+     // org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
+     METADATA_EXPORT_LOCATION("metastore.metadata.export.location", "hive.metadata.export.location",
+         "",
+         "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
+             "it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" +
+             "metadata being exported to the current user's home directory on HDFS."),
+     MOVE_EXPORTED_METADATA_TO_TRASH("metastore.metadata.move.exported.metadata.to.trash",
+         "hive.metadata.move.exported.metadata.to.trash", true,
+         "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
+             "this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" +
+             "alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."),
+     METRICS_ENABLED("metastore.metrics.enabled", "hive.metastore.metrics.enabled", false,
+         "Enable metrics on the metastore."),
+     METRICS_HADOOP2_COMPONENT_NAME("metastore.metrics.hadoop2.component", "hive.service.metrics.hadoop2.component", "hivemetastore",
+                     "Component name to provide to Hadoop2 Metrics system."),
+     METRICS_JSON_FILE_INTERVAL("metastore.metrics.file.frequency",
+         "hive.service.metrics.file.frequency", 1, TimeUnit.MINUTES,
+         "For json metric reporter, the frequency of updating JSON metrics file."),
+     METRICS_JSON_FILE_LOCATION("metastore.metrics.file.location",
+         "hive.service.metrics.file.location", "/tmp/report.json",
+         "For metric class json metric reporter, the location of local JSON metrics file.  " +
+             "This file will get overwritten at every interval."),
+     METRICS_REPORTERS("metastore.metrics.reporters", "metastore.metrics.reporters", "json,jmx",
+         new StringSetValidator("json", "jmx", "console", "hadoop"),
+         "A comma separated list of metrics reporters to start"),
+     MULTITHREADED("javax.jdo.option.Multithreaded", "javax.jdo.option.Multithreaded", true,
+         "Set this to true if multiple threads access metastore through JDO concurrently."),
+     MAX_OPEN_TXNS("metastore.max.open.txns", "hive.max.open.txns", 100000,
+         "Maximum number of open transactions. If \n" +
+         "current open transactions reach this limit, future open transaction requests will be \n" +
+         "rejected, until this number goes below the limit."),
+     NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead",
+         "javax.jdo.option.NonTransactionalRead", true,
+         "Reads outside of transactions"),
+     NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES("metastore.notification.sequence.lock.max.retries",
+         "hive.notification.sequence.lock.max.retries", 5,
+         "Number of retries required to acquire a lock when getting the next notification sequential ID for entries "
+             + "in the NOTIFICATION_LOG table."),
+     NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL(
+         "metastore.notification.sequence.lock.retry.sleep.interval",
+         "hive.notification.sequence.lock.retry.sleep.interval", 500, TimeUnit.MILLISECONDS,
+         "Sleep interval between retries to acquire a notification lock as described part of property "
+             + NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.name()),
+     ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("metastore.orm.retrieveMapNullsAsEmptyStrings",
+         "hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false,
+         "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " +
+             "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " +
+             "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " +
+             "pruning is the correct behaviour"),
+     PARTITION_NAME_WHITELIST_PATTERN("metastore.partition.name.whitelist.pattern",
+         "hive.metastore.partition.name.whitelist.pattern", "",
+         "Partition names will be checked against this regex pattern and rejected if not matched."),
+     PART_INHERIT_TBL_PROPS("metastore.partition.inherit.table.properties",
+         "hive.metastore.partition.inherit.table.properties", "",
+         "List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" +
+             "* implies all the keys will get inherited."),
+     PRE_EVENT_LISTENERS("metastore.pre.event.listeners", "hive.metastore.pre.event.listeners", "",
+         "List of comma separated listeners for metastore events."),
+     PWD("javax.jdo.option.ConnectionPassword", "javax.jdo.option.ConnectionPassword", "mine",
+         "password to use against metastore database"),
+     RAW_STORE_IMPL("metastore.rawstore.impl", "hive.metastore.rawstore.impl",
+         "org.apache.hadoop.hive.metastore.ObjectStore",
+         "Name of the class that implements org.apache.riven.rawstore interface. \n" +
+             "This class is used to store and retrieval of raw metadata objects such as table, database"),
+     REPLCMDIR("metastore.repl.cmrootdir", "hive.repl.cmrootdir", "/user/hive/cmroot/",
+         "Root dir for ChangeManager, used for deleted files."),
+     REPLCMRETIAN("metastore.repl.cm.retain", "hive.repl.cm.retain",  24, TimeUnit.HOURS,
+         "Time to retain removed files in cmrootdir."),
+     REPLCMINTERVAL("metastore.repl.cm.interval", "hive.repl.cm.interval", 3600, TimeUnit.SECONDS,
+         "Inteval for cmroot cleanup thread."),
+     REPLCMENABLED("metastore.repl.cm.enabled", "hive.repl.cm.enabled", false,
+         "Turn on ChangeManager, so delete files will go to cmrootdir."),
+     REPLDIR("metastore.repl.rootdir", "hive.repl.rootdir", "/user/hive/repl/",
+         "HDFS root dir for all replication dumps."),
+     REPL_COPYFILE_MAXNUMFILES("metastore.repl.copyfile.maxnumfiles",
+         "hive.exec.copyfile.maxnumfiles", 1L,
+         "Maximum number of files Hive uses to do sequential HDFS copies between directories." +
+             "Distributed copies (distcp) will be used instead for larger numbers of files so that copies can be done faster."),
+     REPL_COPYFILE_MAXSIZE("metastore.repl.copyfile.maxsize",
+         "hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
+         "Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." +
+             "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
+     SCHEMA_INFO_CLASS("metastore.schema.info.class", "hive.metastore.schema.info.class",
+         "org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo",
+         "Fully qualified class name for the metastore schema information class \n"
+             + "which is used by schematool to fetch the schema information.\n"
+             + " This class should implement the IMetaStoreSchemaInfo interface"),
+     SCHEMA_VERIFICATION("metastore.schema.verification", "hive.metastore.schema.verification", true,
+         "Enforce metastore schema version consistency.\n" +
+         "True: Verify that version information stored in is compatible with one from Hive jars.  Also disable automatic\n" +
+         "      schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" +
+         "      proper metastore schema migration. (Default)\n" +
+         "False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."),
+     SCHEMA_VERIFICATION_RECORD_VERSION("metastore.schema.verification.record.version",
+         "hive.metastore.schema.verification.record.version", false,
+         "When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n" +
+             " enabled the MS will be unusable."),
+     SERDES_USING_METASTORE_FOR_SCHEMA("metastore.serdes.using.metastore.for.schema",
+         "hive.serdes.using.metastore.for.schema",
+         "org.apache.hadoop.hive.ql.io.orc.OrcSerde," +
+             "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," +
+             "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," +
+             "org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe," +
+             "org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe," +
+             "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe," +
+             "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe," +
+             "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe",
+         "SerDes retrieving schema from metastore. This is an internal parameter."),
+     SERVER_MAX_MESSAGE_SIZE("metastore.server.max.message.size",
+         "hive.metastore.server.max.message.size", 100*1024*1024L,
+         "Maximum message size in bytes a HMS will accept."),
+     SERVER_MAX_THREADS("metastore.server.max.threads",
+         "hive.metastore.server.max.threads", 1000,
+         "Maximum number of worker threads in the Thrift server's pool."),
+     SERVER_MIN_THREADS("metastore.server.min.threads", "hive.metastore.server.min.threads", 200,
+         "Minimum number of worker threads in the Thrift server's pool."),
+     SERVER_PORT("metastore.thrift.port", "hive.metastore.port", 9083,
+         "Hive metastore listener port"),
+     SSL_KEYSTORE_PASSWORD("metastore.keystore.password", "hive.metastore.keystore.password", "",
+         "Metastore SSL certificate keystore password."),
+     SSL_KEYSTORE_PATH("metastore.keystore.path", "hive.metastore.keystore.path", "",
+         "Metastore SSL certificate keystore location."),
+     SSL_PROTOCOL_BLACKLIST("metastore.ssl.protocol.blacklist", "hive.ssl.protocol.blacklist",
+         "SSLv2,SSLv3", "SSL Versions to disable for all Hive Servers"),
+     SSL_TRUSTSTORE_PATH("metastore.truststore.path", "hive.metastore.truststore.path", "",
+         "Metastore SSL certificate truststore location."),
+     SSL_TRUSTSTORE_PASSWORD("metastore.truststore.password", "hive.metastore.truststore.password", "",
+         "Metastore SSL certificate truststore password."),
+     STATS_AUTO_GATHER("metastore.stats.autogather", "hive.stats.autogather", true,
+         "A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."),
+     STATS_FETCH_BITVECTOR("metastore.stats.fetch.bitvector", "hive.stats.fetch.bitvector", false,
+         "Whether we fetch bitvector when we compute ndv. Users can turn it off if they want to use old schema"),
+     STATS_NDV_TUNER("metastore.stats.ndv.tuner", "hive.metastore.stats.ndv.tuner", 0.0,
+         "Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \n" +
+             "The lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\n" +
+             "Its value should be between 0.0 (i.e., choose lower bound) and 1.0 (i.e., choose higher bound)"),
+     STATS_NDV_DENSITY_FUNCTION("metastore.stats.ndv.densityfunction",
+         "hive.metastore.stats.ndv.densityfunction", false,
+         "Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions"),
+     STATS_DEFAULT_AGGREGATOR("metastore.stats.default.aggregator", "hive.stats.default.aggregator",
+         "",
+         "The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
+     STATS_DEFAULT_PUBLISHER("metastore.stats.default.publisher", "hive.stats.default.publisher", "",
+         "The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
+     STATS_AUTO_UPDATE("metastore.stats.auto.analyze", "hive.metastore.stats.auto.analyze", "none",
+         new EnumValidator(StatsUpdateMode.values()),
+         "Whether to update stats in the background; none - no, all - for all tables, existing - only existing, out of date, stats."),
+     STATS_AUTO_UPDATE_NOOP_WAIT("metastore.stats.auto.analyze.noop.wait",
+         "hive.metastore.stats.auto.analyze.noop.wait", 5L, TimeUnit.MINUTES,
+         new TimeValidator(TimeUnit.MINUTES),
+         "How long to sleep if there were no stats needing update during an update iteration.\n" +
+         "This is a setting to throttle table/partition checks when nothing is being changed; not\n" +
+         "the analyze queries themselves."),
+     STATS_AUTO_UPDATE_WORKER_COUNT("metastore.stats.auto.analyze.worker.count",
+         "hive.metastore.stats.auto.analyze.worker.count", 1,
+         "Number of parallel analyze commands to run for background stats update."),
+     STORAGE_SCHEMA_READER_IMPL("metastore.storage.schema.reader.impl", "metastore.storage.schema.reader.impl",
+         DefaultStorageSchemaReader.class.getName(),
+         "The class to use to read schemas from storage.  It must implement " +
+         "org.apache.hadoop.hive.metastore.StorageSchemaReader"),
+     STORE_MANAGER_TYPE("datanucleus.storeManagerType", "datanucleus.storeManagerType", "rdbms", "metadata store type"),
+     STRICT_MANAGED_TABLES("metastore.strict.managed.tables", "hive.strict.managed.tables", false,
+             "Whether strict managed tables mode is enabled. With this mode enabled, " +
+             "only transactional tables (both full and insert-only) are allowed to be created as managed tables"),
+     SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("metastore.support.special.characters.tablename",
+         "hive.support.special.characters.tablename", true,
+         "This flag should be set to true to enable support for special characters in table names.\n"
+             + "When it is set to false, only [a-zA-Z_0-9]+ are supported.\n"
+             + "The only supported special character right now is '/'. This flag applies only to quoted table names.\n"
+             + "The default value is true."),
+     TASK_THREADS_ALWAYS("metastore.task.threads.always", "metastore.task.threads.always",
+         EventCleanerTask.class.getName() + "," + RuntimeStatsCleanerTask.class.getName() + "," +
+         "org.apache.hadoop.hive.metastore.repl.DumpDirCleanerTask" + "," +
+             "org.apache.hadoop.hive.metastore.HiveProtoEventsCleanerTask",
+         "Comma separated list of tasks that will be started in separate threads.  These will " +
+             "always be started, regardless of whether the metastore is running in embedded mode " +
+             "or in server mode.  They must implement " + MetastoreTaskThread.class.getName()),
+     TASK_THREADS_REMOTE_ONLY("metastore.task.threads.remote", "metastore.task.threads.remote",
+         AcidHouseKeeperService.class.getName() + "," +
+             AcidOpenTxnsCounterService.class.getName() + "," +
+             AcidCompactionHistoryService.class.getName() + "," +
+             AcidWriteSetService.class.getName() + "," +
+             MaterializationsRebuildLockCleanerTask.class.getName(),
+         "Command separated list of tasks that will be started in separate threads.  These will be" +
+             " started only when the metastore is running as a separate service.  They must " +
+             "implement " + MetastoreTaskThread.class.getName()),
+     TCP_KEEP_ALIVE("metastore.server.tcp.keepalive",
+         "hive.metastore.server.tcp.keepalive", true,
+         "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
+     THREAD_POOL_SIZE("metastore.thread.pool.size", "no.such", 10,
+         "Number of threads in the thread pool.  These will be used to execute all background " +
+             "processes."),
+     THRIFT_CONNECTION_RETRIES("metastore.connect.retries", "hive.metastore.connect.retries", 3,
+         "Number of retries while opening a connection to metastore"),
+     THRIFT_FAILURE_RETRIES("metastore.failure.retries", "hive.metastore.failure.retries", 1,
+         "Number of retries upon failure of Thrift metastore calls"),
+     THRIFT_URIS("metastore.thrift.uris", "hive.metastore.uris", "",
+         "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."),
+     THRIFT_URI_SELECTION("metastore.thrift.uri.selection", "hive.metastore.uri.selection", "RANDOM",
+         new StringSetValidator("RANDOM", "SEQUENTIAL"),
+         "Determines the selection mechanism used by metastore client to connect to remote " +
+         "metastore.  SEQUENTIAL implies that the first valid metastore from the URIs specified " +
+         "as part of hive.metastore.uris will be picked.  RANDOM implies that the metastore " +
+         "will be picked randomly"),
+     TIMEDOUT_TXN_REAPER_START("metastore.timedout.txn.reaper.start",
+         "hive.timedout.txn.reaper.start", 100, TimeUnit.SECONDS,
+         "Time delay of 1st reaper run after metastore start"),
+     TIMEDOUT_TXN_REAPER_INTERVAL("metastore.timedout.txn.reaper.interval",
+         "hive.timedout.txn.reaper.interval", 180, TimeUnit.SECONDS,
+         "Time interval describing how often the reaper runs"),
+     TOKEN_SIGNATURE("metastore.token.signature", "hive.metastore.token.signature", "",
+         "The delegation token service name to match when selecting a token from the current user's tokens."),
+     TRANSACTIONAL_EVENT_LISTENERS("metastore.transactional.event.listeners",
+         "hive.metastore.transactional.event.listeners", "",
+         "A comma separated list of Java classes that implement the org.apache.riven.MetaStoreEventListener" +
+             " interface. Both the metastore event and corresponding listener method will be invoked in the same JDO transaction."),
+     TRY_DIRECT_SQL("metastore.try.direct.sql", "hive.metastore.try.direct.sql", true,
+         "Whether the metastore should try to use direct SQL queries instead of the\n" +
+             "DataNucleus for certain read paths. This can improve metastore performance when\n" +
+             "fetching many partitions or column statistics by orders of magnitude; however, it\n" +
+             "is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\n" +
+             "the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't\n" +
+             "work for all queries on your datastore. If all SQL queries fail (for example, your\n" +
+             "metastore is backed by MongoDB), you might want to disable this to save the\n" +
+             "try-and-fall-back cost."),
+     TRY_DIRECT_SQL_DDL("metastore.try.direct.sql.ddl", "hive.metastore.try.direct.sql.ddl", true,
+         "Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" +
+             "modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" +
+             "select query has incorrect syntax or something similar inside a transaction, the\n" +
+             "entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" +
+             "should disable the usage of direct SQL inside transactions if that happens in your case."),
+     TXN_MAX_OPEN_BATCH("metastore.txn.max.open.batch", "hive.txn.max.open.batch", 1000,
+         "Maximum number of transactions that can be fetched in one call to open_txns().\n" +
+             "This controls how many transactions streaming agents such as Flume or Storm open\n" +
+             "simultaneously. The streaming agent then writes that number of entries into a single\n" +
+             "file (per Flume agent or Storm bolt). Thus increasing this value decreases the number\n" +
+             "of delta files created by streaming agents. But it also increases the number of open\n" +
+             "transactions that Hive has to track at any given time, which may negatively affect\n" +
+             "read performance."),
+     TXN_RETRYABLE_SQLEX_REGEX("metastore.txn.retryable.sqlex.regex",
+         "hive.txn.retryable.sqlex.regex", "", "Comma separated list\n" +
+         "of regular expression patterns for SQL state, error code, and error message of\n" +
+         "retryable SQLExceptions, that's suitable for the metastore DB.\n" +
+         "For example: Can't serialize.*,40001$,^Deadlock,.*ORA-08176.*\n" +
+         "The string that the regex will be matched against is of the following form, where ex is a SQLException:\n" +
+         "ex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", ErrorCode=\" + ex.getErrorCode() + \")\""),
+     TXN_STORE_IMPL("metastore.txn.store.impl", "hive.metastore.txn.store.impl",
+         "org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler",
+         "Name of class that implements org.apache.riven.txn.TxnStore.  This " +
+             "class is used to store and retrieve transactions and locks"),
+     TXN_TIMEOUT("metastore.txn.timeout", "hive.txn.timeout", 300, TimeUnit.SECONDS,
+         "time after which transactions are declared aborted if the client has not sent a heartbeat."),
+     URI_RESOLVER("metastore.uri.resolver", "hive.metastore.uri.resolver", "",
+             "If set, fully qualified class name of resolver for hive metastore uri's"),
+     USERS_IN_ADMIN_ROLE("metastore.users.in.admin.role", "hive.users.in.admin.role", "", false,
+         "Comma separated list of users who are in admin role for bootstrapping.\n" +
+             "More users can be added in ADMIN role later."),
+     USE_SSL("metastore.use.SSL", "hive.metastore.use.SSL", false,
+         "Set this to true for using SSL encryption in HMS server."),
+     USE_THRIFT_SASL("metastore.sasl.enabled", "hive.metastore.sasl.enabled", false,
+         "If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."),
+     USE_THRIFT_FRAMED_TRANSPORT("metastore.thrift.framed.transport.enabled",
+         "hive.metastore.thrift.framed.transport.enabled", false,
+         "If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."),
+     USE_THRIFT_COMPACT_PROTOCOL("metastore.thrift.compact.protocol.enabled",
+         "hive.metastore.thrift.compact.protocol.enabled", false,
+         "If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" +
+             "Setting it to true will break compatibility with older clients running TBinaryProtocol."),
+     VALIDATE_COLUMNS("datanucleus.schema.validateColumns", "datanucleus.schema.validateColumns", false,
+         "validates existing schema against code. turn this on if you want to verify existing schema"),
+     VALIDATE_CONSTRAINTS("datanucleus.schema.validateConstraints",
+         "datanucleus.schema.validateConstraints", false,
+         "validates existing schema against code. turn this on if you want to verify existing schema"),
+     VALIDATE_TABLES("datanucleus.schema.validateTables",
+         "datanucleus.schema.validateTables", false,
+         "validates existing schema against code. turn this on if you want to verify existing schema"),
+     WAREHOUSE("metastore.warehouse.dir", "hive.metastore.warehouse.dir", "/user/hive/warehouse",
+         "location of default database for the warehouse"),
+     WAREHOUSE_EXTERNAL("metastore.warehouse.external.dir",
+         "hive.metastore.warehouse.external.dir", "",
+         "Default location for external tables created in the warehouse. " +
+         "If not set or null, then the normal warehouse location will be used as the default location."),
+     WRITE_SET_REAPER_INTERVAL("metastore.writeset.reaper.interval",
+         "hive.writeset.reaper.interval", 60, TimeUnit.SECONDS,
+         "Frequency of WriteSet reaper runs"),
+     WM_DEFAULT_POOL_SIZE("metastore.wm.default.pool.size",
+         "hive.metastore.wm.default.pool.size", 4,
+         "The size of a default pool to create when creating an empty resource plan;\n" +
+         "If not positive, no default pool will be created."),
+     RAWSTORE_PARTITION_BATCH_SIZE("metastore.rawstore.batch.size",
+         "metastore.rawstore.batch.size", -1,
+         "Batch size for partition and other object retrieval from the underlying DB in JDO.\n" +
+         "The JDO implementation such as DataNucleus may run into issues when the generated queries are\n" +
+         "too large. Use this parameter to break the query into multiple batches. -1 means no batching."),
+ 
+     // Hive values we have copied and use as is
+     // These two are used to indicate that we are running tests
+     HIVE_IN_TEST("hive.in.test", "hive.in.test", false, "internal usage only, true in test mode"),
+     HIVE_IN_TEZ_TEST("hive.in.tez.test", "hive.in.tez.test", false,
+         "internal use only, true when in testing tez"),
+     // We need to track this as some listeners pass it through our config and we need to honor
+     // the system properties.
+     HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager",
+         "hive.security.authorization.manager",
+         "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory",
+         "The Hive client authorization manager class name. The user defined authorization class should implement \n" +
+             "interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider."),
+     HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager",
+         "hive.security.metastore.authenticator.manager",
+         "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
+         "authenticator manager class name to be used in the metastore for authentication. \n" +
+             "The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
+     HIVE_METASTORE_AUTHORIZATION_AUTH_READS("hive.security.metastore.authorization.auth.reads",
+         "hive.security.metastore.authorization.auth.reads", true,
+         "If this is true, metastore authorizer authorizes read actions on database, table"),
+     // The metastore shouldn't care what txn manager Hive is running, but in various tests it
+     // needs to set these values.  We should do the work to detangle this.
+     HIVE_TXN_MANAGER("hive.txn.manager", "hive.txn.manager",
+         "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
+         "Set to org.apache.hadoop.hive.ql.lockmgr.DbTxnManager as part of turning on Hive\n" +
+             "transactions, which also requires appropriate settings for hive.compactor.initiator.on,\n" +
+             "hive.compactor.worker.threads, hive.support.concurrency (true),\n" +
+             "and hive.exec.dynamic.partition.mode (nonstrict).\n" +
+             "The default DummyTxnManager replicates pre-Hive-0.13 behavior and provides\n" +
+             "no transactions."),
+     // Metastore always support concurrency, but certain ACID tests depend on this being set.  We
+     // need to do the work to detangle this
+     HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", "hive.support.concurrency", false,
+         "Whether Hive supports concurrency control or not. \n" +
+             "A ZooKeeper instance must be up and running when using zookeeper Hive lock manager "),
++    HIVE_TXN_STATS_ENABLED("hive.txn.stats.enabled", "hive.txn.stats.enabled", true,
++        "Whether Hive supports transactional stats (accurate stats for transactional tables)"),
+ 
+     // Deprecated Hive values that we are keeping for backwards compatibility.
+     @Deprecated
+     HIVE_CODAHALE_METRICS_REPORTER_CLASSES("hive.service.metrics.codahale.reporter.classes",
+         "hive.service.metrics.codahale.reporter.classes", "",
+         "Use METRICS_REPORTERS instead.  Comma separated list of reporter implementation classes " +
+             "for metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics. Overrides "
+             + "HIVE_METRICS_REPORTER conf if present.  This will be overridden by " +
+             "METRICS_REPORTERS if it is present"),
+     @Deprecated
+     HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "hive.service.metrics.reporter", "",
+         "Reporter implementations for metric class "
+             + "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;" +
+             "Deprecated, use METRICS_REPORTERS instead. This configuraiton will be"
+             + " overridden by HIVE_CODAHALE_METRICS_REPORTER_CLASSES and METRICS_REPORTERS if " +
+             "present. Comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2"),
+ 
+     // These are all values that we put here just for testing
+     STR_TEST_ENTRY("test.str", "hive.test.str", "defaultval", "comment"),
+     STR_SET_ENTRY("test.str.set", "hive.test.str.set", "a", new StringSetValidator("a", "b", "c"), ""),
+     STR_LIST_ENTRY("test.str.list", "hive.test.str.list", "a,b,c",
+         "no comment"),
+     LONG_TEST_ENTRY("test.long", "hive.test.long", 42, "comment"),
+     DOUBLE_TEST_ENTRY("test.double", "hive.test.double", 3.141592654, "comment"),
+     TIME_TEST_ENTRY("test.time", "hive.test.time", 1, TimeUnit.SECONDS, "comment"),
+     TIME_VALIDATOR_ENTRY_INCLUSIVE("test.time.validator.inclusive", "hive.test.time.validator.inclusive", 1,
+         TimeUnit.SECONDS,
+         new TimeValidator(TimeUnit.MILLISECONDS, 500L, true, 1500L, true), "comment"),
+     TIME_VALIDATOR_ENTRY_EXCLUSIVE("test.time.validator.exclusive", "hive.test.time.validator.exclusive", 1,
+         TimeUnit.SECONDS,
+         new TimeValidator(TimeUnit.MILLISECONDS, 500L, false, 1500L, false), "comment"),
+     BOOLEAN_TEST_ENTRY("test.bool", "hive.test.bool", true, "comment"),
+     CLASS_TEST_ENTRY("test.class", "hive.test.class", "", "comment");
+ 
+     private final String varname;
+     private final String hiveName;
+     private final Object defaultVal;
+     private final Validator validator;
+     private final boolean caseSensitive;
+     private final String description;
+ 
+     ConfVars(String varname, String hiveName, String defaultVal, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       validator = null;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, String defaultVal, Validator validator,
+              String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       this.validator = validator;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, String defaultVal, boolean caseSensitive,
+              String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       validator = null;
+       this.caseSensitive = caseSensitive;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, long defaultVal, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       validator = null;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, long defaultVal, Validator validator,
+              String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       this.validator = validator;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, boolean defaultVal, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       validator = null;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, double defaultVal, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       validator = null;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, long defaultVal, TimeUnit unit, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = new TimeValue(defaultVal, unit);
+       validator = new TimeValidator(unit);
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, long defaultVal, TimeUnit unit,
+              Validator validator, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = new TimeValue(defaultVal, unit);
+       this.validator = validator;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     public void validate(String value) throws IllegalArgumentException {
+       if (validator != null) {
+         validator.validate(value);
+       }
+     }
+ 
+     public boolean isCaseSensitive() {
+       return caseSensitive;
+     }
+ 
+     /**
+      * If you are calling this, you're probably doing it wrong.  You shouldn't need to use the
+      * underlying variable name.  Use one of the getVar methods instead.  Only use this if you
+      * are 100% sure you know you're doing.  The reason for this is that MetastoreConf goes to a
+      * lot of trouble to make sure it checks both Hive and Metastore values for config keys.  If
+      * you call {@link Configuration#get(String)} you are undermining that.
+      * @return variable name
+      */
+     public String getVarname() {
+       return varname;
+     }
+ 
+     /**
+      * Use this method if you need to set a system property and are going to instantiate the
+      * configuration file via HiveConf.  This is because HiveConf only looks for values it knows,
+      * so it will miss all of the metastore.* ones.  Do not use this to explicitly set or get the
+      * underlying config value unless you are 100% sure you know what you're doing.
+      * The reason for this is that MetastoreConf goes to a
+      * lot of trouble to make sure it checks both Hive and Metastore values for config keys.  If
+      * you call {@link Configuration#get(String)} you are undermining that.
+      * @return hive.* configuration key
+      */
+     public String getHiveName() {
+       return hiveName;
+     }
+ 
+     public Object getDefaultVal() {
+       return defaultVal;
+     }
+ 
+     public String getDescription() {
+       return description;
+     }
+ 
+     /**
+      * This is useful if you need the variable name for a LOG message or
+      * {@link System#setProperty(String, String)}, beware however that you should only use this
+      * with setProperty if you're going to create a configuration via
+      * {@link MetastoreConf#newMetastoreConf()}.  If you are going to create it with HiveConf,
+      * then use {@link #getHiveName()}.
+      * @return metastore.* configuration key
+      */
+     @Override
+     public String toString() {
+       return varname;
+     }
+   }
+ 
+   public static final ConfVars[] dataNucleusAndJdoConfs = {
+       ConfVars.AUTO_CREATE_ALL,
+       ConfVars.CONNECTION_DRIVER,
+       ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS,
+       ConfVars.CONNECTION_POOLING_TYPE,
+       ConfVars.CONNECT_URL_KEY,
+       ConfVars.CONNECTION_USER_NAME,
+       ConfVars.DATANUCLEUS_AUTOSTART,
+       ConfVars.DATANUCLEUS_CACHE_LEVEL2,
+       ConfVars.DATANUCLEUS_CACHE_LEVEL2_TYPE,
+       ConfVars.DATANUCLEUS_INIT_COL_INFO,
+       ConfVars.DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK,
+       ConfVars.DATANUCLEUS_TRANSACTION_ISOLATION,
+       ConfVars.DATANUCLEUS_USE_LEGACY_VALUE_STRATEGY,
+       ConfVars.DETACH_ALL_ON_COMMIT,
+       ConfVars.IDENTIFIER_FACTORY,
+       ConfVars.MANAGER_FACTORY_CLASS,
+       ConfVars.MULTITHREADED,
+       ConfVars.NON_TRANSACTIONAL_READ,
+       ConfVars.PWD,
+       ConfVars.STORE_MANAGER_TYPE,
+       ConfVars.VALIDATE_COLUMNS,
+       ConfVars.VALIDATE_CONSTRAINTS,
+       ConfVars.VALIDATE_TABLES
+   };
+ 
+   // Make sure no one calls this
+   private MetastoreConf() {
+     throw new RuntimeException("You should never be creating one of these!");
+   }
+ 
+   public static void setHiveSiteLocation(URL location) {
+     hiveSiteURL = location;
+   }
+ 
+   public static Configuration newMetastoreConf() {
+     return newMetastoreConf(new Configuration());
+   }
+ 
+   public static Configuration newMetastoreConf(Configuration conf) {
+ 
+     ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+     if (classLoader == null) {
+       classLoader = MetastoreConf.class.getClassLoader();
+     }
+     // We don't add this to the resources because we don't want to read config values from it.
+     // But we do find it because we want to remember where it is for later in case anyone calls
+     // getHiveDefaultLocation().
+     hiveDefaultURL = classLoader.getResource("hive-default.xml");
+ 
+     // Add in hive-site.xml.  We add this first so that it gets overridden by the new metastore
+     // specific files if they exist.
+     if(hiveSiteURL == null) {
+       /*
+        * this 'if' is pretty lame - QTestUtil.QTestUtil() uses hiveSiteURL to load a specific
+        * hive-site.xml from data/conf/<subdir> so this makes it follow the same logic - otherwise
+        * HiveConf and MetastoreConf may load different hive-site.xml  ( For example,
+        * HiveConf uses data/conf/spark/hive-site.xml and MetastoreConf data/conf/hive-site.xml)
+        */
+       hiveSiteURL = findConfigFile(classLoader, "hive-site.xml");
+     }
+     if (hiveSiteURL != null) {
+       conf.addResource(hiveSiteURL);
+     }
+ 
+     // Now add hivemetastore-site.xml.  Again we add this before our own config files so that the
+     // newer overrides the older.
+     hiveMetastoreSiteURL = findConfigFile(classLoader, "hivemetastore-site.xml");
+     if (hiveMetastoreSiteURL != null) {
+       conf.addResource(hiveMetastoreSiteURL);
+     }
+ 
+     // Add in our conf file
+     metastoreSiteURL = findConfigFile(classLoader, "metastore-site.xml");
+     if (metastoreSiteURL !=  null) {
+       conf.addResource(metastoreSiteURL);
+     }
+ 
+     // If a system property that matches one of our conf value names is set then use the value
+     // it's set to to set our own conf value.
+     for (ConfVars var : ConfVars.values()) {
+       if (System.getProperty(var.varname) != null) {
+         LOG.debug("Setting conf value " + var.varname + " using value " +
+             System.getProperty(var.varname));
+         conf.set(var.varname, System.getProperty(var.varname));
+       }
+     }
+ 
+     // Pick up any system properties that start with "hive." and set them in our config.  This
+     // way we can properly pull any Hive values from the environment without needing to know all
+     // of the Hive config values.
+     System.getProperties().stringPropertyNames().stream()
+         .filter(s -> s.startsWith("hive."))
+         .forEach(s -> {
+           String v = System.getProperty(s);
+           LOG.debug("Picking up system property " + s + " with value " + v);
+           conf.set(s, v);
+         });
+ 
+     // If we are going to validate the schema, make sure we don't create it
+     if (getBoolVar(conf, ConfVars.SCHEMA_VERIFICATION)) {
+       setBoolVar(conf, ConfVars.AUTO_CREATE_ALL, false);
+     }
+ 
+     if (!beenDumped.getAndSet(true) && getBoolVar(conf, ConfVars.DUMP_CONFIG_ON_CREATION) &&
+         LOG.isDebugEnabled()) {
+       LOG.debug(dumpConfig(conf));
+     }
+     return conf;
+   }
+ 
+   private static URL findConfigFile(ClassLoader classLoader, String name) {
+     // First, look in the classpath
+     URL result = classLoader.getResource(name);
+     if (result == null) {
+       // Nope, so look to see if our conf dir has been explicitly set
+       result = seeIfConfAtThisLocation("METASTORE_CONF_DIR", name, false);
+       if (result == null) {
+         // Nope, so look to see if our home dir has been explicitly set
+         result = seeIfConfAtThisLocation("METASTORE_HOME", name, true);
+         if (result == null) {
+           // Nope, so look to see if Hive's conf dir has been explicitly set
+           result = seeIfConfAtThisLocation("HIVE_CONF_DIR", name, false);
+           if (result == null) {
+             // Nope, so look to see if Hive's home dir has been explicitly set
+             result = seeIfConfAtThisLocation("HIVE_HOME", name, true);
+             if (result == null) {
+               // Nope, so look to see if we can find a conf file by finding our jar, going up one
+               // directory, and looking for a conf directory.
+               URI jarUri = null;
+               try {
+                 jarUri = MetastoreConf.class.getProtectionDomain().getCodeSource().getLocation().toURI();
+               } catch (Throwable e) {
+                 LOG.warn("Cannot get jar URI", e);
+               }
+               result = seeIfConfAtThisLocation(new File(jarUri).getParent(), name, true);
+               // At this point if we haven't found it, screw it, we don't know where it is
+               if (result == null) {
+                 LOG.info("Unable to find config file " + name);
+               }
+             }
+           }
+         }
+       }
+     }
+     LOG.info("Found configuration file " + result);
+     return result;
+   }
+ 
+   private static URL seeIfConfAtThisLocation(String envVar, String name, boolean inConfDir) {
+     String path = System.getenv(envVar);
+     if (path == null) {
+       // Workaround for testing since tests can't set the env vars.
+       path = System.getProperty(TEST_ENV_WORKAROUND + envVar);
+     }
+     if (path != null) {
+       String suffix = inConfDir ? "conf" + File.separatorChar + name : name;
+       return checkConfigFile(new File(path, suffix));
+     }
+     return null;
+   }
+ 
+   private static URL checkConfigFile(File f) {
+     try {
+       return (f.exists() && f.isFile()) ? f.toURI().toURL() : null;
+     } catch (Throwable e) {
+       LOG.warn("Error looking for config " + f, e);
+       return null;
+     }
+   }
+ 
+   // In all of the getters, we try the metastore value name first.  If it is not set we try the
+   // Hive value name.
+ 
+   /**
+    * Get the variable as a string
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return value, or default value if value not in config file
+    */
+   public static String getVar(Configuration conf, ConfVars var) {
+     assert var.defaultVal.getClass() == String.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.get(var.hiveName, (String)var.defaultVal) : val;
+   }
+ 
+   /**
+    * Get the variable as a string
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @param defaultVal default to return if the variable is unset
+    * @return value, or default value passed in if the value is not in the config file
+    */
+   public static String getVar(Configuration conf, ConfVars var, String defaultVal) {
+     assert var.defaultVal.getClass() == String.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.get(var.hiveName, defaultVal) : val;
+   }
+ 
+   /**
+    * Treat a configuration value as a comma separated list.
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return collection of strings.  If the value is unset it will return an empty collection.
+    */
+   public static Collection<String> getStringCollection(Configuration conf, ConfVars var) {
+     assert var.defaultVal.getClass() == String.class;
+     String val = conf.get(var.varname);
+     if (val == null) {
+       val = conf.get(var.hiveName, (String)var.defaultVal);
+     }
+     if (val == null) {
+       return Collections.emptySet();
+     }
+     return StringUtils.asSet(val.split(","));
+   }
+ 
+   /**
+    * Set the variable as a string
+    * @param conf configuration file to set it in
+    * @param var variable to set
+    * @param val value to set it to
+    */
+   public static void setVar(Configuration conf, ConfVars var, String val) {
+     assert var.defaultVal.getClass() == String.class;
+     conf.set(var.varname, val);
+   }
+ 
+   /**
+    * Get the variable as a int.  Note that all integer valued variables are stored as longs, thus
+    * this downcasts from a long to an in.
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return value, or default value if value not in config file
+    */
+   public static int getIntVar(Configuration conf, ConfVars var) {
+     long val = getLongVar(conf, var);
+     assert val <= Integer.MAX_VALUE;
+     return (int)val;
+   }
+ 
+   /**
+    * Get the variable as a long
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return value, or default value if value not in config file
+    */
+   public static long getLongVar(Configuration conf, ConfVars var) {
+     assert var.defaultVal.getClass() == Long.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.getLong(var.hiveName, (Long)var.defaultVal) : Long.valueOf(val);
+   }
+ 
+   /**
+    * Set the variable as a long
+    * @param conf configuration file to set it in
+    * @param var variable to set
+    * @param val value to set it to
+    */
+   public static void setLongVar(Configuration conf, ConfVars var, long val) {
+     assert var.defaultVal.getClass() == Long.class;
+     conf.setLong(var.varname, val);
+   }
+ 
+   /**
+    * Get the variable as a boolean
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return value, or default value if value not in config file
+    */
+   public static boolean getBoolVar(Configuration conf, ConfVars var) {
+     assert var.defaultVal.getClass() == Boolean.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.getBoolean(var.hiveName, (Boolean)var.defaultVal) : Boolean.valueOf(val);
+   }
+ 
+   /**
+    * Set the variable as a boolean
+    * @param conf configuration file to set it in
+    * @param var variable to set
+    * @param val value to set it to
+    */
+   public static void setBoolVar(Configuration conf, ConfVars var, boolean val) {
+     assert var.defaultVal.getClass() == Boolean.class;
+     conf.setBoolean(var.varname, val);
+   }
+ 
+   /**
+    * Get the variable as a double
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return value, or default value if value not in config file
+    */
+   public static double getDoubleVar(Configuration conf, ConfVars var) {
+     assert var.defaultVal.getClass() == Double.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.getDouble(var.hiveName, (Double)var.defaultVal) : Double.valueOf(val);
+   }
+ 
+   /**
+    * Set the variable as a double
+    * @param conf configuration file to set it in
+    * @param var variable to set
+    * @param val value to set it to
+    */
+   public static void setDoubleVar(Configuration conf, ConfVars var, double val) {
+     assert var.defaultVal.getClass() == Double.class;
+     conf.setDouble(var.varname, val);
+   }
+ 
+   public static long getSizeVar(Configuration conf, ConfVars var) {
+     return SizeValidator.toSizeBytes(getVar(conf, var));
+   }
+ 
+   /**
+    * Get a class instance based on a configuration value
+    * @param conf configuration file to retrieve it from
+    * @param var variable to retrieve
+    * @param defaultValue default class to return if the value isn't set
+    * @param xface interface that class must implement
+    * @param <I> interface that class implements
+    * @return instance of the class
+    */
+   public static <I> Class<? extends I> getClass(Configuration conf, ConfVars var,
+                                                 Class<? extends I> defaultValue,
+                                                 Class<I> xface) {
+     assert var.defaultVal.getClass() == String.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.getClass(var.hiveName, defaultValue, xface) :
+         conf.getClass(var.varname, def

<TRUNCATED>

[31/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
index 0000000,abbcda3..c5977b2
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
@@@ -1,0 -1,211 +1,218 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.List;
++
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ 
+ import static org.junit.Assert.assertEquals;
+ 
+ 
+ /**
+  * A wrapper around {@link ObjectStore} that allows us to inject custom behaviour
+  * on to some of the methods for testing.
+  */
+ public class InjectableBehaviourObjectStore extends ObjectStore {
+   public InjectableBehaviourObjectStore() {
+     super();
+   }
+ 
+   /**
+    * A utility class that allows people injecting behaviour to determine if their injections occurred.
+    */
+   public static abstract class BehaviourInjection<T, F>
+       implements com.google.common.base.Function<T, F>{
+     protected boolean injectionPathCalled = false;
+     protected boolean nonInjectedPathCalled = false;
+ 
+     public void assertInjectionsPerformed(
+         boolean expectedInjectionCalled, boolean expectedNonInjectedPathCalled){
+       assertEquals(expectedInjectionCalled, injectionPathCalled);
+       assertEquals(expectedNonInjectedPathCalled, nonInjectedPathCalled);
+     }
+   }
+ 
+   /**
+    * A utility class to pass the arguments of the caller to the stub method.
+    */
+   public class CallerArguments {
+     public String dbName;
+     public String tblName;
+     public String funcName;
+     public String constraintTblName;
+ 
+     public CallerArguments(String dbName) {
+       this.dbName = dbName;
+     }
+   }
+ 
+   private static com.google.common.base.Function<Table, Table> getTableModifier =
+       com.google.common.base.Functions.identity();
+   private static com.google.common.base.Function<Partition, Partition> getPartitionModifier =
+           com.google.common.base.Functions.identity();
+   private static com.google.common.base.Function<List<String>, List<String>> listPartitionNamesModifier =
+           com.google.common.base.Functions.identity();
+   private static com.google.common.base.Function<NotificationEventResponse, NotificationEventResponse>
+           getNextNotificationModifier = com.google.common.base.Functions.identity();
+ 
+   private static com.google.common.base.Function<CallerArguments, Boolean> callerVerifier = null;
+ 
+   // Methods to set/reset getTable modifier
+   public static void setGetTableBehaviour(com.google.common.base.Function<Table, Table> modifier){
+     getTableModifier = (modifier == null) ? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetGetTableBehaviour(){
+     setGetTableBehaviour(null);
+   }
+ 
+   // Methods to set/reset getPartition modifier
+   public static void setGetPartitionBehaviour(com.google.common.base.Function<Partition, Partition> modifier){
+     getPartitionModifier = (modifier == null) ? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetGetPartitionBehaviour(){
+     setGetPartitionBehaviour(null);
+   }
+ 
+   // Methods to set/reset listPartitionNames modifier
+   public static void setListPartitionNamesBehaviour(com.google.common.base.Function<List<String>, List<String>> modifier){
+     listPartitionNamesModifier = (modifier == null)? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetListPartitionNamesBehaviour(){
+     setListPartitionNamesBehaviour(null);
+   }
+ 
+   // Methods to set/reset getNextNotification modifier
+   public static void setGetNextNotificationBehaviour(
+           com.google.common.base.Function<NotificationEventResponse,NotificationEventResponse> modifier){
+     getNextNotificationModifier = (modifier == null)? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetGetNextNotificationBehaviour(){
+     setGetNextNotificationBehaviour(null);
+   }
+ 
+   // Methods to set/reset caller checker
+   public static void setCallerVerifier(com.google.common.base.Function<CallerArguments, Boolean> verifier){
+     callerVerifier = verifier;
+   }
+ 
+   public static void resetCallerVerifier(){
+     setCallerVerifier(null);
+   }
+ 
+   // ObjectStore methods to be overridden with injected behavior
+   @Override
+   public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+     return getTableModifier.apply(super.getTable(catName, dbName, tableName));
+   }
+ 
+   @Override
++  public Table getTable(String catName, String dbName, String tableName,
++      long txnId, String writeIdList) throws MetaException {
++    return getTableModifier.apply(super.getTable(catName, dbName, tableName, txnId, writeIdList));
++  }
++
++  @Override
+   public Partition getPartition(String catName, String dbName, String tableName,
+                                 List<String> partVals) throws NoSuchObjectException, MetaException {
+     return getPartitionModifier.apply(super.getPartition(catName, dbName, tableName, partVals));
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tableName, short max)
+           throws MetaException {
+     return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, max));
+   }
+ 
+   @Override
+   public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+     return getNextNotificationModifier.apply(super.getNextNotification(rqst));
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(tbl.getDbName());
+       args.tblName = tbl.getTableName();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Create Table operation on DB: "
+                 + args.dbName + " table: " + args.tblName);
+       }
+     }
+     super.createTable(tbl);
+   }
+ 
+   @Override
+   public void createFunction(Function func) throws InvalidObjectException, MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(func.getDbName());
+       args.funcName = func.getFunctionName();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Create Function operation on DB: "
+                 + args.dbName + " function: " + args.funcName);
+       }
+     }
+     super.createFunction(func);
+   }
+ 
+   @Override
+   public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks) throws InvalidObjectException,
+           MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(pks.get(0).getTable_db());
+       args.constraintTblName = pks.get(0).getTable_name();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Add Primary Key operation on DB: "
+                 + args.dbName + " table: " + args.constraintTblName);
+       }
+     }
+     return super.addPrimaryKeys(pks);
+   }
+ 
+   @Override
+   public List<String> addForeignKeys(List<SQLForeignKey> fks) throws InvalidObjectException,
+           MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(fks.get(0).getFktable_db());
+       args.constraintTblName = fks.get(0).getFktable_name();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Add Foreign Key operation on DB: "
+                 + args.dbName + " table: " + args.constraintTblName);
+       }
+     }
+     return super.addForeignKeys(fks);
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
index 0000000,adc82b0..533cabb
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
@@@ -1,0 -1,121 +1,121 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.mockito.Mockito;
+ 
+ import java.util.Arrays;
+ 
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ 
+ @Category(MetastoreUnitTest.class)
+ public class TestHiveAlterHandler {
+ 
+   private Configuration conf = MetastoreConf.newMetastoreConf();
+ 
+   @Test
+   public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
+     FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+     FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+     FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+     FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+ 
+     StorageDescriptor oldSd = new StorageDescriptor();
+     oldSd.setCols(Arrays.asList(col1, col2, col3));
+     Table oldTable = new Table();
+     oldTable.setDbName("default");
+     oldTable.setTableName("test_table");
+     oldTable.setSd(oldSd);
+ 
+     StorageDescriptor newSd = new StorageDescriptor(oldSd);
+     newSd.setCols(Arrays.asList(col1, col2, col3, col4));
+     Table newTable = new Table(oldTable);
+     newTable.setSd(newSd);
+ 
+     RawStore msdb = Mockito.mock(RawStore.class);
+     Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
+         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3"));
+     HiveAlterHandler handler = new HiveAlterHandler();
+     handler.setConf(conf);
 -    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
++    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
+   }
+ 
+   @Test
+   public void testAlterTableDelColUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
+     FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+     FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+     FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+     FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+ 
+     StorageDescriptor oldSd = new StorageDescriptor();
+     oldSd.setCols(Arrays.asList(col1, col2, col3, col4));
+     Table oldTable = new Table();
+     oldTable.setDbName("default");
+     oldTable.setTableName("test_table");
+     oldTable.setSd(oldSd);
+ 
+     StorageDescriptor newSd = new StorageDescriptor(oldSd);
+     newSd.setCols(Arrays.asList(col1, col2, col3));
+     Table newTable = new Table(oldTable);
+     newTable.setSd(newSd);
+ 
+     RawStore msdb = Mockito.mock(RawStore.class);
+     HiveAlterHandler handler = new HiveAlterHandler();
+     handler.setConf(conf);
 -    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
++    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
+     Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics(
+         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")
+     );
+   }
+ 
+   @Test
+   public void testAlterTableChangePosNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
+     FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+     FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+     FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+     FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+ 
+     StorageDescriptor oldSd = new StorageDescriptor();
+     oldSd.setCols(Arrays.asList(col1, col2, col3, col4));
+     Table oldTable = new Table();
+     oldTable.setDbName("default");
+     oldTable.setTableName("test_table");
+     oldTable.setSd(oldSd);
+ 
+     StorageDescriptor newSd = new StorageDescriptor(oldSd);
+     newSd.setCols(Arrays.asList(col1, col4, col2, col3));
+     Table newTable = new Table(oldTable);
+     newTable.setSd(newSd);
+ 
+     RawStore msdb = Mockito.mock(RawStore.class);
+     Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
+         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"));
+     HiveAlterHandler handler = new HiveAlterHandler();
+     handler.setConf(conf);
 -    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
++    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
+   }
+ 
+ }


[12/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query25.q.out b/ql/src/test/results/clientpositive/perf/tez/query25.q.out
index 77a9a07..b68c54a 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query25.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query25.q.out
@@ -118,195 +118,197 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_269]
-        Limit [LIM_268] (rows=100 width=88)
+      File Output Operator [FS_270]
+        Limit [LIM_269] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_267] (rows=421657640 width=88)
+          Select Operator [SEL_268] (rows=421657640 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_266]
-              Group By Operator [GBY_265] (rows=421657640 width=88)
+            SHUFFLE [RS_267]
+              Group By Operator [GBY_266] (rows=421657640 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3
               <-Reducer 5 [SIMPLE_EDGE]
                 SHUFFLE [RS_49]
                   PartitionCols:_col0, _col1, _col2, _col3
                   Group By Operator [GBY_48] (rows=843315281 width=88)
                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col20)","sum(_col12)"],keys:_col25, _col26, _col28, _col29
-                    Merge Join Operator [MERGEJOIN_213] (rows=843315281 width=88)
-                      Conds:RS_44._col3=RS_251._col0(Inner),Output:["_col5","_col12","_col20","_col25","_col26","_col28","_col29"]
-                    <-Map 20 [SIMPLE_EDGE] vectorized
-                      SHUFFLE [RS_251]
-                        PartitionCols:_col0
-                        Select Operator [SEL_250] (rows=1704 width=1910)
-                          Output:["_col0","_col1","_col2"]
-                          Filter Operator [FIL_249] (rows=1704 width=1910)
-                            predicate:s_store_sk is not null
-                            TableScan [TS_32] (rows=1704 width=1910)
-                              default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id","s_store_name"]
-                    <-Reducer 4 [SIMPLE_EDGE]
-                      SHUFFLE [RS_44]
-                        PartitionCols:_col3
-                        Merge Join Operator [MERGEJOIN_212] (rows=766650239 width=88)
-                          Conds:RS_41._col1=RS_242._col0(Inner),Output:["_col3","_col5","_col12","_col20","_col25","_col26"]
-                        <-Map 18 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_242]
-                            PartitionCols:_col0
-                            Select Operator [SEL_241] (rows=462000 width=1436)
-                              Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_240] (rows=462000 width=1436)
-                                predicate:i_item_sk is not null
-                                TableScan [TS_29] (rows=462000 width=1436)
-                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc"]
-                        <-Reducer 3 [SIMPLE_EDGE]
-                          SHUFFLE [RS_41]
-                            PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_211] (rows=696954748 width=88)
-                              Conds:RS_38._col1, _col2, _col4=RS_39._col8, _col9, _col10(Inner),Output:["_col1","_col3","_col5","_col12","_col20"]
-                            <-Reducer 11 [SIMPLE_EDGE]
-                              SHUFFLE [RS_39]
-                                PartitionCols:_col8, _col9, _col10
-                                Merge Join Operator [MERGEJOIN_210] (rows=348467716 width=135)
-                                  Conds:RS_25._col2, _col1=RS_26._col1, _col2(Inner),Output:["_col3","_col8","_col9","_col10","_col11"]
-                                <-Reducer 13 [SIMPLE_EDGE]
-                                  PARTITION_ONLY_SHUFFLE [RS_26]
-                                    PartitionCols:_col1, _col2
-                                    Merge Join Operator [MERGEJOIN_209] (rows=63350266 width=77)
-                                      Conds:RS_233._col0=RS_224._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                    <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_224]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_219] (rows=4058 width=1119)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_216] (rows=4058 width=1119)
-                                            predicate:((d_year = 2000) and d_date_sk is not null and d_moy BETWEEN 4 AND 10)
-                                            TableScan [TS_3] (rows=73049 width=1119)
-                                              default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                                    <-Map 17 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_233]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_232] (rows=57591150 width=77)
-                                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                                          Filter Operator [FIL_231] (rows=57591150 width=77)
-                                            predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
-                                            TableScan [TS_12] (rows=57591150 width=77)
-                                              default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_net_loss"]
-                                <-Reducer 10 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_25]
-                                    PartitionCols:_col2, _col1
-                                    Merge Join Operator [MERGEJOIN_208] (rows=316788826 width=135)
-                                      Conds:RS_264._col0=RS_222._col0(Inner),Output:["_col1","_col2","_col3"]
-                                    <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_222]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_218] (rows=4058 width=1119)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_215] (rows=4058 width=1119)
-                                            predicate:((d_year = 2000) and d_date_sk is not null and d_moy BETWEEN 4 AND 10)
-                                             Please refer to the previous TableScan [TS_3]
-                                    <-Map 16 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_264]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_263] (rows=287989836 width=135)
-                                          Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_262] (rows=287989836 width=135)
-                                            predicate:((cs_bill_customer_sk BETWEEN DynamicValue(RS_26_store_returns_sr_customer_sk_min) AND DynamicValue(RS_26_store_returns_sr_customer_sk_max) and in_bloom_filter(cs_bill_customer_sk, DynamicValue(RS_26_store_returns_sr_customer_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_26_store_returns_sr_item_sk_min) AND DynamicValue(RS_26_store_returns_sr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_26_store_returns_sr_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_42_item_i_item_sk_min) AND DynamicValue(RS_42_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_42_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_23_d3_d_date_sk_min) AND DynamicValue(RS_23_d3_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_23_d3_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
-                                            TableScan [TS_6] (rows=287989836 width=135)
-                                              default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_net_profit"]
-                                            <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_236]
-                                                Group By Operator [GBY_234] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
-                                                <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
-                                                  PARTITION_ONLY_SHUFFLE [RS_107]
-                                                    Group By Operator [GBY_106] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                      Select Operator [SEL_105] (rows=63350266 width=77)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_209]
-                                            <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_239]
-                                                Group By Operator [GBY_237] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
-                                                <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
-                                                  PARTITION_ONLY_SHUFFLE [RS_122]
-                                                    Group By Operator [GBY_121] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                      Select Operator [SEL_120] (rows=63350266 width=77)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_209]
-                                            <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_248]
-                                                Group By Operator [GBY_246] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_245]
-                                                    Group By Operator [GBY_244] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_243] (rows=462000 width=1436)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_241]
-                                            <-Reducer 12 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_261]
-                                                Group By Operator [GBY_260] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  PARTITION_ONLY_SHUFFLE [RS_228]
-                                                    Group By Operator [GBY_226] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_223] (rows=4058 width=1119)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_218]
-                            <-Reducer 2 [SIMPLE_EDGE]
-                              SHUFFLE [RS_38]
-                                PartitionCols:_col1, _col2, _col4
-                                Merge Join Operator [MERGEJOIN_207] (rows=633595212 width=88)
-                                  Conds:RS_259._col0=RS_220._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
-                                <-Map 8 [SIMPLE_EDGE] vectorized
-                                  PARTITION_ONLY_SHUFFLE [RS_220]
-                                    PartitionCols:_col0
-                                    Select Operator [SEL_217] (rows=18262 width=1119)
-                                      Output:["_col0"]
-                                      Filter Operator [FIL_214] (rows=18262 width=1119)
-                                        predicate:((d_moy = 4) and (d_year = 2000) and d_date_sk is not null)
-                                         Please refer to the previous TableScan [TS_3]
-                                <-Map 1 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_259]
-                                    PartitionCols:_col0
-                                    Select Operator [SEL_258] (rows=575995635 width=88)
-                                      Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                      Filter Operator [FIL_257] (rows=575995635 width=88)
-                                        predicate:((ss_customer_sk BETWEEN DynamicValue(RS_26_store_returns_sr_customer_sk_min) AND DynamicValue(RS_26_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_26_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_26_store_returns_sr_item_sk_min) AND DynamicValue(RS_26_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_26_store_returns_sr_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_42_item_i_item_sk_min) AND DynamicValue(RS_42_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_42_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_36_d1_d_date_sk_min) AND DynamicValue(RS_36_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_36_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_45_store_s_store_sk_min) AND DynamicValue(RS_45_store_s_store_sk_max) and
  in_bloom_filter(ss_store_sk, DynamicValue(RS_45_store_s_store_sk_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
-                                        TableScan [TS_0] (rows=575995635 width=88)
-                                          default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_net_profit"]
-                                        <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_235]
-                                             Please refer to the previous Group By Operator [GBY_234]
-                                        <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_238]
-                                             Please refer to the previous Group By Operator [GBY_237]
-                                        <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_247]
-                                             Please refer to the previous Group By Operator [GBY_246]
-                                        <-Reducer 21 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_256]
-                                            Group By Operator [GBY_255] (rows=1 width=12)
-                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                            <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_254]
-                                                Group By Operator [GBY_253] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                  Select Operator [SEL_252] (rows=1704 width=1910)
-                                                    Output:["_col0"]
-                                                     Please refer to the previous Select Operator [SEL_250]
-                                        <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_230]
-                                            Group By Operator [GBY_229] (rows=1 width=12)
-                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                            <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                              PARTITION_ONLY_SHUFFLE [RS_227]
-                                                Group By Operator [GBY_225] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                  Select Operator [SEL_221] (rows=18262 width=1119)
-                                                    Output:["_col0"]
-                                                     Please refer to the previous Select Operator [SEL_217]
+                    Top N Key Operator [TNK_95] (rows=843315281 width=88)
+                      keys:_col25, _col26, _col28, _col29,sort order:++++,top n:100
+                      Merge Join Operator [MERGEJOIN_214] (rows=843315281 width=88)
+                        Conds:RS_44._col3=RS_252._col0(Inner),Output:["_col5","_col12","_col20","_col25","_col26","_col28","_col29"]
+                      <-Map 20 [SIMPLE_EDGE] vectorized
+                        SHUFFLE [RS_252]
+                          PartitionCols:_col0
+                          Select Operator [SEL_251] (rows=1704 width=1910)
+                            Output:["_col0","_col1","_col2"]
+                            Filter Operator [FIL_250] (rows=1704 width=1910)
+                              predicate:s_store_sk is not null
+                              TableScan [TS_32] (rows=1704 width=1910)
+                                default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id","s_store_name"]
+                      <-Reducer 4 [SIMPLE_EDGE]
+                        SHUFFLE [RS_44]
+                          PartitionCols:_col3
+                          Merge Join Operator [MERGEJOIN_213] (rows=766650239 width=88)
+                            Conds:RS_41._col1=RS_243._col0(Inner),Output:["_col3","_col5","_col12","_col20","_col25","_col26"]
+                          <-Map 18 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_243]
+                              PartitionCols:_col0
+                              Select Operator [SEL_242] (rows=462000 width=1436)
+                                Output:["_col0","_col1","_col2"]
+                                Filter Operator [FIL_241] (rows=462000 width=1436)
+                                  predicate:i_item_sk is not null
+                                  TableScan [TS_29] (rows=462000 width=1436)
+                                    default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc"]
+                          <-Reducer 3 [SIMPLE_EDGE]
+                            SHUFFLE [RS_41]
+                              PartitionCols:_col1
+                              Merge Join Operator [MERGEJOIN_212] (rows=696954748 width=88)
+                                Conds:RS_38._col1, _col2, _col4=RS_39._col8, _col9, _col10(Inner),Output:["_col1","_col3","_col5","_col12","_col20"]
+                              <-Reducer 11 [SIMPLE_EDGE]
+                                SHUFFLE [RS_39]
+                                  PartitionCols:_col8, _col9, _col10
+                                  Merge Join Operator [MERGEJOIN_211] (rows=348467716 width=135)
+                                    Conds:RS_25._col2, _col1=RS_26._col1, _col2(Inner),Output:["_col3","_col8","_col9","_col10","_col11"]
+                                  <-Reducer 13 [SIMPLE_EDGE]
+                                    PARTITION_ONLY_SHUFFLE [RS_26]
+                                      PartitionCols:_col1, _col2
+                                      Merge Join Operator [MERGEJOIN_210] (rows=63350266 width=77)
+                                        Conds:RS_234._col0=RS_225._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                      <-Map 8 [SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_225]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_220] (rows=4058 width=1119)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_217] (rows=4058 width=1119)
+                                              predicate:((d_year = 2000) and d_date_sk is not null and d_moy BETWEEN 4 AND 10)
+                                              TableScan [TS_3] (rows=73049 width=1119)
+                                                default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                      <-Map 17 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_234]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_233] (rows=57591150 width=77)
+                                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                                            Filter Operator [FIL_232] (rows=57591150 width=77)
+                                              predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
+                                              TableScan [TS_12] (rows=57591150 width=77)
+                                                default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_net_loss"]
+                                  <-Reducer 10 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_25]
+                                      PartitionCols:_col2, _col1
+                                      Merge Join Operator [MERGEJOIN_209] (rows=316788826 width=135)
+                                        Conds:RS_265._col0=RS_223._col0(Inner),Output:["_col1","_col2","_col3"]
+                                      <-Map 8 [SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_223]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_219] (rows=4058 width=1119)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_216] (rows=4058 width=1119)
+                                              predicate:((d_year = 2000) and d_date_sk is not null and d_moy BETWEEN 4 AND 10)
+                                               Please refer to the previous TableScan [TS_3]
+                                      <-Map 16 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_265]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_264] (rows=287989836 width=135)
+                                            Output:["_col0","_col1","_col2","_col3"]
+                                            Filter Operator [FIL_263] (rows=287989836 width=135)
+                                              predicate:((cs_bill_customer_sk BETWEEN DynamicValue(RS_26_store_returns_sr_customer_sk_min) AND DynamicValue(RS_26_store_returns_sr_customer_sk_max) and in_bloom_filter(cs_bill_customer_sk, DynamicValue(RS_26_store_returns_sr_customer_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_26_store_returns_sr_item_sk_min) AND DynamicValue(RS_26_store_returns_sr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_26_store_returns_sr_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_42_item_i_item_sk_min) AND DynamicValue(RS_42_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_42_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_23_d3_d_date_sk_min) AND DynamicValue(RS_23_d3_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_23_d3_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
+                                              TableScan [TS_6] (rows=287989836 width=135)
+                                                default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_net_profit"]
+                                              <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_237]
+                                                  Group By Operator [GBY_235] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
+                                                  <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
+                                                    PARTITION_ONLY_SHUFFLE [RS_108]
+                                                      Group By Operator [GBY_107] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
+                                                        Select Operator [SEL_106] (rows=63350266 width=77)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_210]
+                                              <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_240]
+                                                  Group By Operator [GBY_238] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
+                                                  <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
+                                                    PARTITION_ONLY_SHUFFLE [RS_123]
+                                                      Group By Operator [GBY_122] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
+                                                        Select Operator [SEL_121] (rows=63350266 width=77)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_210]
+                                              <-Reducer 19 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_249]
+                                                  Group By Operator [GBY_247] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_246]
+                                                      Group By Operator [GBY_245] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_244] (rows=462000 width=1436)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_242]
+                                              <-Reducer 12 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_262]
+                                                  Group By Operator [GBY_261] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    PARTITION_ONLY_SHUFFLE [RS_229]
+                                                      Group By Operator [GBY_227] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_224] (rows=4058 width=1119)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_219]
+                              <-Reducer 2 [SIMPLE_EDGE]
+                                SHUFFLE [RS_38]
+                                  PartitionCols:_col1, _col2, _col4
+                                  Merge Join Operator [MERGEJOIN_208] (rows=633595212 width=88)
+                                    Conds:RS_260._col0=RS_221._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
+                                  <-Map 8 [SIMPLE_EDGE] vectorized
+                                    PARTITION_ONLY_SHUFFLE [RS_221]
+                                      PartitionCols:_col0
+                                      Select Operator [SEL_218] (rows=18262 width=1119)
+                                        Output:["_col0"]
+                                        Filter Operator [FIL_215] (rows=18262 width=1119)
+                                          predicate:((d_moy = 4) and (d_year = 2000) and d_date_sk is not null)
+                                           Please refer to the previous TableScan [TS_3]
+                                  <-Map 1 [SIMPLE_EDGE] vectorized
+                                    SHUFFLE [RS_260]
+                                      PartitionCols:_col0
+                                      Select Operator [SEL_259] (rows=575995635 width=88)
+                                        Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                        Filter Operator [FIL_258] (rows=575995635 width=88)
+                                          predicate:((ss_customer_sk BETWEEN DynamicValue(RS_26_store_returns_sr_customer_sk_min) AND DynamicValue(RS_26_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_26_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_26_store_returns_sr_item_sk_min) AND DynamicValue(RS_26_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_26_store_returns_sr_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_42_item_i_item_sk_min) AND DynamicValue(RS_42_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_42_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_36_d1_d_date_sk_min) AND DynamicValue(RS_36_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_36_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_45_store_s_store_sk_min) AND DynamicValue(RS_45_store_s_store_sk_max) a
 nd in_bloom_filter(ss_store_sk, DynamicValue(RS_45_store_s_store_sk_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
+                                          TableScan [TS_0] (rows=575995635 width=88)
+                                            default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_net_profit"]
+                                          <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                            BROADCAST [RS_236]
+                                               Please refer to the previous Group By Operator [GBY_235]
+                                          <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                            BROADCAST [RS_239]
+                                               Please refer to the previous Group By Operator [GBY_238]
+                                          <-Reducer 19 [BROADCAST_EDGE] vectorized
+                                            BROADCAST [RS_248]
+                                               Please refer to the previous Group By Operator [GBY_247]
+                                          <-Reducer 21 [BROADCAST_EDGE] vectorized
+                                            BROADCAST [RS_257]
+                                              Group By Operator [GBY_256] (rows=1 width=12)
+                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                              <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                SHUFFLE [RS_255]
+                                                  Group By Operator [GBY_254] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                    Select Operator [SEL_253] (rows=1704 width=1910)
+                                                      Output:["_col0"]
+                                                       Please refer to the previous Select Operator [SEL_251]
+                                          <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                            BROADCAST [RS_231]
+                                              Group By Operator [GBY_230] (rows=1 width=12)
+                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                              <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                PARTITION_ONLY_SHUFFLE [RS_228]
+                                                  Group By Operator [GBY_226] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                    Select Operator [SEL_222] (rows=18262 width=1119)
+                                                      Output:["_col0"]
+                                                       Please refer to the previous Select Operator [SEL_218]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query26.q.out b/ql/src/test/results/clientpositive/perf/tez/query26.q.out
index 0c6c0f8..a90e09f 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query26.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query26.q.out
@@ -58,126 +58,128 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_139]
-        Limit [LIM_138] (rows=100 width=135)
+      File Output Operator [FS_140]
+        Limit [LIM_139] (rows=100 width=135)
           Number of rows:100
-          Select Operator [SEL_137] (rows=210822976 width=135)
+          Select Operator [SEL_138] (rows=210822976 width=135)
             Output:["_col0","_col1","_col2","_col3","_col4"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_136]
-              Select Operator [SEL_135] (rows=210822976 width=135)
+            SHUFFLE [RS_137]
+              Select Operator [SEL_136] (rows=210822976 width=135)
                 Output:["_col0","_col1","_col2","_col3","_col4"]
-                Group By Operator [GBY_134] (rows=210822976 width=135)
+                Group By Operator [GBY_135] (rows=210822976 width=135)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)","sum(VALUE._col2)","count(VALUE._col3)","sum(VALUE._col4)","count(VALUE._col5)","sum(VALUE._col6)","count(VALUE._col7)"],keys:KEY._col0
                 <-Reducer 5 [SIMPLE_EDGE]
                   SHUFFLE [RS_29]
                     PartitionCols:_col0
                     Group By Operator [GBY_28] (rows=421645953 width=135)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["sum(_col4)","count(_col4)","sum(_col5)","count(_col5)","sum(_col7)","count(_col7)","sum(_col6)","count(_col6)"],keys:_col18
-                      Merge Join Operator [MERGEJOIN_98] (rows=421645953 width=135)
-                        Conds:RS_24._col2=RS_125._col0(Inner),Output:["_col4","_col5","_col6","_col7","_col18"]
-                      <-Map 14 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_125]
-                          PartitionCols:_col0
-                          Select Operator [SEL_124] (rows=462000 width=1436)
-                            Output:["_col0","_col1"]
-                            Filter Operator [FIL_123] (rows=462000 width=1436)
-                              predicate:i_item_sk is not null
-                              TableScan [TS_12] (rows=462000 width=1436)
-                                default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
-                      <-Reducer 4 [SIMPLE_EDGE]
-                        SHUFFLE [RS_24]
-                          PartitionCols:_col2
-                          Merge Join Operator [MERGEJOIN_97] (rows=383314495 width=135)
-                            Conds:RS_21._col3=RS_117._col0(Inner),Output:["_col2","_col4","_col5","_col6","_col7"]
-                          <-Map 12 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_117]
-                              PartitionCols:_col0
-                              Select Operator [SEL_116] (rows=2300 width=1179)
-                                Output:["_col0"]
-                                Filter Operator [FIL_115] (rows=2300 width=1179)
-                                  predicate:(((p_channel_email = 'N') or (p_channel_event = 'N')) and p_promo_sk is not null)
-                                  TableScan [TS_9] (rows=2300 width=1179)
-                                    default@promotion,promotion,Tbl:COMPLETE,Col:NONE,Output:["p_promo_sk","p_channel_email","p_channel_event"]
-                          <-Reducer 3 [SIMPLE_EDGE]
-                            SHUFFLE [RS_21]
-                              PartitionCols:_col3
-                              Merge Join Operator [MERGEJOIN_96] (rows=348467716 width=135)
-                                Conds:RS_18._col0=RS_109._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6","_col7"]
-                              <-Map 10 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_109]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_108] (rows=36524 width=1119)
-                                    Output:["_col0"]
-                                    Filter Operator [FIL_107] (rows=36524 width=1119)
-                                      predicate:((d_year = 1998) and d_date_sk is not null)
-                                      TableScan [TS_6] (rows=73049 width=1119)
-                                        default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                              <-Reducer 2 [SIMPLE_EDGE]
-                                SHUFFLE [RS_18]
-                                  PartitionCols:_col0
-                                  Merge Join Operator [MERGEJOIN_95] (rows=316788826 width=135)
-                                    Conds:RS_133._col1=RS_101._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6","_col7"]
-                                  <-Map 8 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_101]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_100] (rows=232725 width=385)
-                                        Output:["_col0"]
-                                        Filter Operator [FIL_99] (rows=232725 width=385)
-                                          predicate:((cd_education_status = 'Primary') and (cd_gender = 'F') and (cd_marital_status = 'W') and cd_demo_sk is not null)
-                                          TableScan [TS_3] (rows=1861800 width=385)
-                                            default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
-                                  <-Map 1 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_133]
-                                      PartitionCols:_col1
-                                      Select Operator [SEL_132] (rows=287989836 width=135)
-                                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-                                        Filter Operator [FIL_131] (rows=287989836 width=135)
-                                          predicate:((cs_bill_cdemo_sk BETWEEN DynamicValue(RS_16_customer_demographics_cd_demo_sk_min) AND DynamicValue(RS_16_customer_demographics_cd_demo_sk_max) and in_bloom_filter(cs_bill_cdemo_sk, DynamicValue(RS_16_customer_demographics_cd_demo_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_25_item_i_item_sk_min) AND DynamicValue(RS_25_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_25_item_i_item_sk_bloom_filter))) and (cs_promo_sk BETWEEN DynamicValue(RS_22_promotion_p_promo_sk_min) AND DynamicValue(RS_22_promotion_p_promo_sk_max) and in_bloom_filter(cs_promo_sk, DynamicValue(RS_22_promotion_p_promo_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and cs_bill_cdemo_sk is not null and cs_item_sk is not null and cs_promo_sk is not null 
 and cs_sold_date_sk is not null)
-                                          TableScan [TS_0] (rows=287989836 width=135)
-                                            default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_cdemo_sk","cs_item_sk","cs_promo_sk","cs_quantity","cs_list_price","cs_sales_price","cs_coupon_amt"]
-                                          <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_114]
-                                              Group By Operator [GBY_113] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_112]
-                                                  Group By Operator [GBY_111] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_110] (rows=36524 width=1119)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_108]
-                                          <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_122]
-                                              Group By Operator [GBY_121] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_120]
-                                                  Group By Operator [GBY_119] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_118] (rows=2300 width=1179)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_116]
-                                          <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_130]
-                                              Group By Operator [GBY_129] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_128]
-                                                  Group By Operator [GBY_127] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_126] (rows=462000 width=1436)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_124]
-                                          <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_106]
-                                              Group By Operator [GBY_105] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                PARTITION_ONLY_SHUFFLE [RS_104]
-                                                  Group By Operator [GBY_103] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_102] (rows=232725 width=385)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_100]
+                      Top N Key Operator [TNK_55] (rows=421645953 width=135)
+                        keys:_col18,sort order:+,top n:100
+                        Merge Join Operator [MERGEJOIN_99] (rows=421645953 width=135)
+                          Conds:RS_24._col2=RS_126._col0(Inner),Output:["_col4","_col5","_col6","_col7","_col18"]
+                        <-Map 14 [SIMPLE_EDGE] vectorized
+                          SHUFFLE [RS_126]
+                            PartitionCols:_col0
+                            Select Operator [SEL_125] (rows=462000 width=1436)
+                              Output:["_col0","_col1"]
+                              Filter Operator [FIL_124] (rows=462000 width=1436)
+                                predicate:i_item_sk is not null
+                                TableScan [TS_12] (rows=462000 width=1436)
+                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
+                        <-Reducer 4 [SIMPLE_EDGE]
+                          SHUFFLE [RS_24]
+                            PartitionCols:_col2
+                            Merge Join Operator [MERGEJOIN_98] (rows=383314495 width=135)
+                              Conds:RS_21._col3=RS_118._col0(Inner),Output:["_col2","_col4","_col5","_col6","_col7"]
+                            <-Map 12 [SIMPLE_EDGE] vectorized
+                              SHUFFLE [RS_118]
+                                PartitionCols:_col0
+                                Select Operator [SEL_117] (rows=2300 width=1179)
+                                  Output:["_col0"]
+                                  Filter Operator [FIL_116] (rows=2300 width=1179)
+                                    predicate:(((p_channel_email = 'N') or (p_channel_event = 'N')) and p_promo_sk is not null)
+                                    TableScan [TS_9] (rows=2300 width=1179)
+                                      default@promotion,promotion,Tbl:COMPLETE,Col:NONE,Output:["p_promo_sk","p_channel_email","p_channel_event"]
+                            <-Reducer 3 [SIMPLE_EDGE]
+                              SHUFFLE [RS_21]
+                                PartitionCols:_col3
+                                Merge Join Operator [MERGEJOIN_97] (rows=348467716 width=135)
+                                  Conds:RS_18._col0=RS_110._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6","_col7"]
+                                <-Map 10 [SIMPLE_EDGE] vectorized
+                                  SHUFFLE [RS_110]
+                                    PartitionCols:_col0
+                                    Select Operator [SEL_109] (rows=36524 width=1119)
+                                      Output:["_col0"]
+                                      Filter Operator [FIL_108] (rows=36524 width=1119)
+                                        predicate:((d_year = 1998) and d_date_sk is not null)
+                                        TableScan [TS_6] (rows=73049 width=1119)
+                                          default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                                <-Reducer 2 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_18]
+                                    PartitionCols:_col0
+                                    Merge Join Operator [MERGEJOIN_96] (rows=316788826 width=135)
+                                      Conds:RS_134._col1=RS_102._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6","_col7"]
+                                    <-Map 8 [SIMPLE_EDGE] vectorized
+                                      PARTITION_ONLY_SHUFFLE [RS_102]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_101] (rows=232725 width=385)
+                                          Output:["_col0"]
+                                          Filter Operator [FIL_100] (rows=232725 width=385)
+                                            predicate:((cd_education_status = 'Primary') and (cd_gender = 'F') and (cd_marital_status = 'W') and cd_demo_sk is not null)
+                                            TableScan [TS_3] (rows=1861800 width=385)
+                                              default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
+                                    <-Map 1 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_134]
+                                        PartitionCols:_col1
+                                        Select Operator [SEL_133] (rows=287989836 width=135)
+                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
+                                          Filter Operator [FIL_132] (rows=287989836 width=135)
+                                            predicate:((cs_bill_cdemo_sk BETWEEN DynamicValue(RS_16_customer_demographics_cd_demo_sk_min) AND DynamicValue(RS_16_customer_demographics_cd_demo_sk_max) and in_bloom_filter(cs_bill_cdemo_sk, DynamicValue(RS_16_customer_demographics_cd_demo_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_25_item_i_item_sk_min) AND DynamicValue(RS_25_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_25_item_i_item_sk_bloom_filter))) and (cs_promo_sk BETWEEN DynamicValue(RS_22_promotion_p_promo_sk_min) AND DynamicValue(RS_22_promotion_p_promo_sk_max) and in_bloom_filter(cs_promo_sk, DynamicValue(RS_22_promotion_p_promo_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and cs_bill_cdemo_sk is not null and cs_item_sk is not null and cs_promo_sk is not nul
 l and cs_sold_date_sk is not null)
+                                            TableScan [TS_0] (rows=287989836 width=135)
+                                              default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_cdemo_sk","cs_item_sk","cs_promo_sk","cs_quantity","cs_list_price","cs_sales_price","cs_coupon_amt"]
+                                            <-Reducer 11 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_115]
+                                                Group By Operator [GBY_114] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_113]
+                                                    Group By Operator [GBY_112] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_111] (rows=36524 width=1119)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_109]
+                                            <-Reducer 13 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_123]
+                                                Group By Operator [GBY_122] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_121]
+                                                    Group By Operator [GBY_120] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_119] (rows=2300 width=1179)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_117]
+                                            <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_131]
+                                                Group By Operator [GBY_130] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_129]
+                                                    Group By Operator [GBY_128] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_127] (rows=462000 width=1436)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_125]
+                                            <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_107]
+                                                Group By Operator [GBY_106] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  PARTITION_ONLY_SHUFFLE [RS_105]
+                                                    Group By Operator [GBY_104] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_103] (rows=232725 width=385)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_101]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query27.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query27.q.out b/ql/src/test/results/clientpositive/perf/tez/query27.q.out
index df1e15f..7ea13c8 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query27.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query27.q.out
@@ -62,128 +62,130 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_140]
-        Limit [LIM_139] (rows=100 width=88)
+      File Output Operator [FS_141]
+        Limit [LIM_140] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_138] (rows=1264972921 width=88)
+          Select Operator [SEL_139] (rows=1264972921 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_137]
-              Select Operator [SEL_136] (rows=1264972921 width=88)
+            SHUFFLE [RS_138]
+              Select Operator [SEL_137] (rows=1264972921 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                Group By Operator [GBY_135] (rows=1264972921 width=88)
+                Group By Operator [GBY_136] (rows=1264972921 width=88)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)","sum(VALUE._col2)","count(VALUE._col3)","sum(VALUE._col4)","count(VALUE._col5)","sum(VALUE._col6)","count(VALUE._col7)"],keys:KEY._col0, KEY._col1, KEY._col2
                 <-Reducer 5 [SIMPLE_EDGE]
                   SHUFFLE [RS_30]
                     PartitionCols:_col0, _col1, _col2
                     Group By Operator [GBY_29] (rows=2529945843 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"],aggregations:["sum(_col2)","count(_col2)","sum(_col3)","count(_col3)","sum(_col4)","count(_col4)","sum(_col5)","count(_col5)"],keys:_col0, _col1, 0L
-                      Select Operator [SEL_27] (rows=843315281 width=88)
-                        Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                        Merge Join Operator [MERGEJOIN_99] (rows=843315281 width=88)
-                          Conds:RS_24._col1=RS_126._col0(Inner),Output:["_col4","_col5","_col6","_col7","_col15","_col17"]
-                        <-Map 14 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_126]
-                            PartitionCols:_col0
-                            Select Operator [SEL_125] (rows=462000 width=1436)
-                              Output:["_col0","_col1"]
-                              Filter Operator [FIL_124] (rows=462000 width=1436)
-                                predicate:i_item_sk is not null
-                                TableScan [TS_12] (rows=462000 width=1436)
-                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
-                        <-Reducer 4 [SIMPLE_EDGE]
-                          SHUFFLE [RS_24]
-                            PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_98] (rows=766650239 width=88)
-                              Conds:RS_21._col3=RS_118._col0(Inner),Output:["_col1","_col4","_col5","_col6","_col7","_col15"]
-                            <-Map 12 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_118]
-                                PartitionCols:_col0
-                                Select Operator [SEL_117] (rows=852 width=1910)
-                                  Output:["_col0","_col1"]
-                                  Filter Operator [FIL_116] (rows=852 width=1910)
-                                    predicate:((s_state) IN ('SD', 'FL', 'MI', 'LA', 'MO', 'SC') and s_store_sk is not null)
-                                    TableScan [TS_9] (rows=1704 width=1910)
-                                      default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_state"]
-                            <-Reducer 3 [SIMPLE_EDGE]
-                              SHUFFLE [RS_21]
-                                PartitionCols:_col3
-                                Merge Join Operator [MERGEJOIN_97] (rows=696954748 width=88)
-                                  Conds:RS_18._col0=RS_110._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7"]
-                                <-Map 10 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_110]
-                                    PartitionCols:_col0
-                                    Select Operator [SEL_109] (rows=36524 width=1119)
-                                      Output:["_col0"]
-                                      Filter Operator [FIL_108] (rows=36524 width=1119)
-                                        predicate:((d_year = 2001) and d_date_sk is not null)
-                                        TableScan [TS_6] (rows=73049 width=1119)
-                                          default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                                <-Reducer 2 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_18]
-                                    PartitionCols:_col0
-                                    Merge Join Operator [MERGEJOIN_96] (rows=633595212 width=88)
-                                      Conds:RS_134._col2=RS_102._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col5","_col6","_col7"]
-                                    <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_102]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_101] (rows=232725 width=385)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_100] (rows=232725 width=385)
-                                            predicate:((cd_education_status = '2 yr Degree') and (cd_gender = 'M') and (cd_marital_status = 'U') and cd_demo_sk is not null)
-                                            TableScan [TS_3] (rows=1861800 width=385)
-                                              default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
-                                    <-Map 1 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_134]
-                                        PartitionCols:_col2
-                                        Select Operator [SEL_133] (rows=575995635 width=88)
-                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-                                          Filter Operator [FIL_132] (rows=575995635 width=88)
-                                            predicate:((ss_cdemo_sk BETWEEN DynamicValue(RS_16_customer_demographics_cd_demo_sk_min) AND DynamicValue(RS_16_customer_demographics_cd_demo_sk_max) and in_bloom_filter(ss_cdemo_sk, DynamicValue(RS_16_customer_demographics_cd_demo_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_25_item_i_item_sk_min) AND DynamicValue(RS_25_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_25_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_22_store_s_store_sk_min) AND DynamicValue(RS_22_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_22_store_s_store_sk_bloom_filter))) and ss_cdemo_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is no
 t null)
-                                            TableScan [TS_0] (rows=575995635 width=88)
-                                              default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_cdemo_sk","ss_store_sk","ss_quantity","ss_list_price","ss_sales_price","ss_coupon_amt"]
-                                            <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_115]
-                                                Group By Operator [GBY_114] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_113]
-                                                    Group By Operator [GBY_112] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_111] (rows=36524 width=1119)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_109]
-                                            <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_123]
-                                                Group By Operator [GBY_122] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_121]
-                                                    Group By Operator [GBY_120] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_119] (rows=852 width=1910)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_117]
-                                            <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_131]
-                                                Group By Operator [GBY_130] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_129]
-                                                    Group By Operator [GBY_128] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_127] (rows=462000 width=1436)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_125]
-                                            <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_107]
-                                                Group By Operator [GBY_106] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  PARTITION_ONLY_SHUFFLE [RS_105]
-                                                    Group By Operator [GBY_104] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_103] (rows=232725 width=385)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_101]
+                      Top N Key Operator [TNK_56] (rows=843315281 width=88)
+                        keys:_col0, _col1, 0L,sort order:+++,top n:100
+                        Select Operator [SEL_27] (rows=843315281 width=88)
+                          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                          Merge Join Operator [MERGEJOIN_100] (rows=843315281 width=88)
+                            Conds:RS_24._col1=RS_127._col0(Inner),Output:["_col4","_col5","_col6","_col7","_col15","_col17"]
+                          <-Map 14 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_127]
+                              PartitionCols:_col0
+                              Select Operator [SEL_126] (rows=462000 width=1436)
+                                Output:["_col0","_col1"]
+                                Filter Operator [FIL_125] (rows=462000 width=1436)
+                                  predicate:i_item_sk is not null
+                                  TableScan [TS_12] (rows=462000 width=1436)
+                                    default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
+                          <-Reducer 4 [SIMPLE_EDGE]
+                            SHUFFLE [RS_24]
+                              PartitionCols:_col1
+                              Merge Join Operator [MERGEJOIN_99] (rows=766650239 width=88)
+                                Conds:RS_21._col3=RS_119._col0(Inner),Output:["_col1","_col4","_col5","_col6","_col7","_col15"]
+                              <-Map 12 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_119]
+                                  PartitionCols:_col0
+                                  Select Operator [SEL_118] (rows=852 width=1910)
+                                    Output:["_col0","_col1"]
+                                    Filter Operator [FIL_117] (rows=852 width=1910)
+                                      predicate:((s_state) IN ('SD', 'FL', 'MI', 'LA', 'MO', 'SC') and s_store_sk is not null)
+                                      TableScan [TS_9] (rows=1704 width=1910)
+                                        default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_state"]
+                              <-Reducer 3 [SIMPLE_EDGE]
+                                SHUFFLE [RS_21]
+                                  PartitionCols:_col3
+                                  Merge Join Operator [MERGEJOIN_98] (rows=696954748 width=88)
+                                    Conds:RS_18._col0=RS_111._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7"]
+                                  <-Map 10 [SIMPLE_EDGE] vectorized
+                                    SHUFFLE [RS_111]
+                                      PartitionCols:_col0
+                                      Select Operator [SEL_110] (rows=36524 width=1119)
+                                        Output:["_col0"]
+                                        Filter Operator [FIL_109] (rows=36524 width=1119)
+                                          predicate:((d_year = 2001) and d_date_sk is not null)
+                                          TableScan [TS_6] (rows=73049 width=1119)
+                                            default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                                  <-Reducer 2 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_18]
+                                      PartitionCols:_col0
+                                      Merge Join Operator [MERGEJOIN_97] (rows=633595212 width=88)
+                                        Conds:RS_135._col2=RS_103._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col5","_col6","_col7"]
+                                      <-Map 8 [SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_103]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_102] (rows=232725 width=385)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_101] (rows=232725 width=385)
+                                              predicate:((cd_education_status = '2 yr Degree') and (cd_gender = 'M') and (cd_marital_status = 'U') and cd_demo_sk is not null)
+                                              TableScan [TS_3] (rows=1861800 width=385)
+                                                default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
+                                      <-Map 1 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_135]
+                                          PartitionCols:_col2
+                                          Select Operator [SEL_134] (rows=575995635 width=88)
+                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
+                                            Filter Operator [FIL_133] (rows=575995635 width=88)
+                                              predicate:((ss_cdemo_sk BETWEEN DynamicValue(RS_16_customer_demographics_cd_demo_sk_min) AND DynamicValue(RS_16_customer_demographics_cd_demo_sk_max) and in_bloom_filter(ss_cdemo_sk, DynamicValue(RS_16_customer_demographics_cd_demo_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_25_item_i_item_sk_min) AND DynamicValue(RS_25_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_25_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_22_store_s_store_sk_min) AND DynamicValue(RS_22_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_22_store_s_store_sk_bloom_filter))) and ss_cdemo_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is 
 not null)
+                                              TableScan [TS_0] (rows=575995635 width=88)
+                                                default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_cdemo_sk","ss_store_sk","ss_quantity","ss_list_price","ss_sales_price","ss_coupon_amt"]
+                                              <-Reducer 11 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_116]
+                                                  Group By Operator [GBY_115] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_114]
+                                                      Group By Operator [GBY_113] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_112] (rows=36524 width=1119)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_110]
+                                              <-Reducer 13 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_124]
+                                                  Group By Operator [GBY_123] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_122]
+                                                      Group By Operator [GBY_121] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_120] (rows=852 width=1910)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_118]
+                                              <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_132]
+                                                  Group By Operator [GBY_131] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    SHUFFLE [RS_130]
+                                                      Group By Operator [GBY_129] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_128] (rows=462000 width=1436)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_126]
+                                              <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                                BROADCAST [RS_108]
+                                                  Group By Operator [GBY_107] (rows=1 width=12)
+                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                  <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                    PARTITION_ONLY_SHUFFLE [RS_106]
+                                                      Group By Operator [GBY_105] (rows=1 width=12)
+                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                        Select Operator [SEL_104] (rows=232725 width=385)
+                                                          Output:["_col0"]
+                                                           Please refer to the previous Select Operator [SEL_102]
 


[32/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 0000000,d91f737..bc04e06
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@@ -1,0 -1,3424 +1,3546 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
++import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
+ 
+ import java.io.IOException;
+ import java.lang.reflect.Constructor;
+ import java.lang.reflect.InvocationHandler;
+ import java.lang.reflect.InvocationTargetException;
+ import java.lang.reflect.Method;
+ import java.lang.reflect.Proxy;
+ import java.net.InetAddress;
+ import java.net.URI;
+ import java.net.UnknownHostException;
+ import java.nio.ByteBuffer;
+ import java.security.PrivilegedExceptionAction;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.LinkedHashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.NoSuchElementException;
+ import java.util.Random;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicInteger;
+ 
+ import javax.security.auth.login.LoginException;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.hooks.URIResolverHook;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.ReflectionUtils;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.thrift.TApplicationException;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.protocol.TBinaryProtocol;
+ import org.apache.thrift.protocol.TCompactProtocol;
+ import org.apache.thrift.protocol.TProtocol;
+ import org.apache.thrift.transport.TFramedTransport;
+ import org.apache.thrift.transport.TSocket;
+ import org.apache.thrift.transport.TTransport;
+ import org.apache.thrift.transport.TTransportException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.collect.Lists;
+ 
+ /**
+  * Hive Metastore Client.
+  * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient
+  * are not public and can change. Hence this is marked as unstable.
+  * For users who require retry mechanism when the connection between metastore and client is
+  * broken, RetryingMetaStoreClient class should be used.
+  */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoCloseable {
+   /**
+    * Capabilities of the current client. If this client talks to a MetaStore server in a manner
+    * implying the usage of some expanded features that require client-side support that this client
+    * doesn't have (e.g. a getting a table of a new type), it will get back failures when the
+    * capability checking is enabled (the default).
+    */
+   public final static ClientCapabilities VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES));
+   // Test capability for tests.
+   public final static ClientCapabilities TEST_VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES, ClientCapability.TEST_CAPABILITY));
+ 
+   ThriftHiveMetastore.Iface client = null;
+   private TTransport transport = null;
+   private boolean isConnected = false;
+   private URI metastoreUris[];
+   private final HiveMetaHookLoader hookLoader;
+   protected final Configuration conf;  // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client.
+   protected boolean fastpath = false;
+   private String tokenStrForm;
+   private final boolean localMetaStore;
+   private final MetaStoreFilterHook filterHook;
+   private final URIResolverHook uriResolverHook;
+   private final int fileMetadataBatchSize;
+ 
+   private Map<String, String> currentMetaVars;
+ 
+   private static final AtomicInteger connCount = new AtomicInteger(0);
+ 
+   // for thrift connects
+   private int retries = 5;
+   private long retryDelaySeconds = 0;
+   private final ClientCapabilities version;
+ 
+   static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClientPreCatalog.class);
+ 
+   public HiveMetaStoreClientPreCatalog(Configuration conf) throws MetaException {
+     this(conf, null, true);
+   }
+ 
+   public HiveMetaStoreClientPreCatalog(Configuration conf, HiveMetaHookLoader hookLoader) throws MetaException {
+     this(conf, hookLoader, true);
+   }
+ 
+   public HiveMetaStoreClientPreCatalog(Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded)
+     throws MetaException {
+ 
+     this.hookLoader = hookLoader;
+     if (conf == null) {
+       conf = MetastoreConf.newMetastoreConf();
+       this.conf = conf;
+     } else {
+       this.conf = new Configuration(conf);
+     }
+     version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION;
+     filterHook = loadFilterHooks();
+     uriResolverHook = loadUriResolverHook();
+     fileMetadataBatchSize = MetastoreConf.getIntVar(
+         conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX);
+ 
+     String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS);
+     localMetaStore = MetastoreConf.isEmbeddedMetaStore(msUri);
+     if (localMetaStore) {
+       if (!allowEmbedded) {
+         throw new MetaException("Embedded metastore is not allowed here. Please configure "
+             + ConfVars.THRIFT_URIS.toString() + "; it is currently set to [" + msUri + "]");
+       }
+       // instantiate the metastore server handler directly instead of connecting
+       // through the network
+       client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
+       isConnected = true;
+       snapshotActiveConf();
+       return;
+     }
+ 
+     // get the number retries
+     retries = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES);
+     retryDelaySeconds = MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
+ 
+     // user wants file store based configuration
+     if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS) != null) {
+       resolveUris();
+     } else {
+       LOG.error("NOT getting uris from conf");
+       throw new MetaException("MetaStoreURIs not found in conf file");
+     }
+ 
+     //If HADOOP_PROXY_USER is set in env or property,
+     //then need to create metastore client that proxies as that user.
+     String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
+     String proxyUser = System.getenv(HADOOP_PROXY_USER);
+     if (proxyUser == null) {
+       proxyUser = System.getProperty(HADOOP_PROXY_USER);
+     }
+     //if HADOOP_PROXY_USER is set, create DelegationToken using real user
+     if(proxyUser != null) {
+       LOG.info(HADOOP_PROXY_USER + " is set. Using delegation "
+           + "token for HiveMetaStore connection.");
+       try {
+         UserGroupInformation.getLoginUser().getRealUser().doAs(
+             new PrivilegedExceptionAction<Void>() {
+               @Override
+               public Void run() throws Exception {
+                 open();
+                 return null;
+               }
+             });
+         String delegationTokenPropString = "DelegationTokenForHiveMetaStoreServer";
+         String delegationTokenStr = getDelegationToken(proxyUser, proxyUser);
+         SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr,
+             delegationTokenPropString);
+         MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString);
+         close();
+       } catch (Exception e) {
+         LOG.error("Error while setting delegation token for " + proxyUser, e);
+         if(e instanceof MetaException) {
+           throw (MetaException)e;
+         } else {
+           throw new MetaException(e.getMessage());
+         }
+       }
+     }
+     // finally open the store
+     open();
+   }
+ 
+   private void resolveUris() throws MetaException {
+     String metastoreUrisString[] =  MetastoreConf.getVar(conf,
+             ConfVars.THRIFT_URIS).split(",");
+ 
+     List<URI> metastoreURIArray = new ArrayList<URI>();
+     try {
+       int i = 0;
+       for (String s : metastoreUrisString) {
+         URI tmpUri = new URI(s);
+         if (tmpUri.getScheme() == null) {
+           throw new IllegalArgumentException("URI: " + s
+                   + " does not have a scheme");
+         }
+         if (uriResolverHook != null) {
+           metastoreURIArray.addAll(uriResolverHook.resolveURI(tmpUri));
+         } else {
+           metastoreURIArray.add(new URI(
+                   tmpUri.getScheme(),
+                   tmpUri.getUserInfo(),
+                   HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()),
+                   tmpUri.getPort(),
+                   tmpUri.getPath(),
+                   tmpUri.getQuery(),
+                   tmpUri.getFragment()
+           ));
+         }
+       }
+       metastoreUris = new URI[metastoreURIArray.size()];
+       for (int j = 0; j < metastoreURIArray.size(); j++) {
+         metastoreUris[j] = metastoreURIArray.get(j);
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         List uriList = Arrays.asList(metastoreUris);
+         Collections.shuffle(uriList);
+         metastoreUris = (URI[]) uriList.toArray();
+       }
+     } catch (IllegalArgumentException e) {
+       throw (e);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+   }
+ 
+ 
+   private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException {
+     Class<? extends MetaStoreFilterHook> authProviderClass = MetastoreConf.
+         getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class,
+             MetaStoreFilterHook.class);
+     String msg = "Unable to create instance of " + authProviderClass.getName() + ": ";
+     try {
+       Constructor<? extends MetaStoreFilterHook> constructor =
+           authProviderClass.getConstructor(Configuration.class);
+       return constructor.newInstance(conf);
+     } catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException e) {
+       throw new IllegalStateException(msg + e.getMessage(), e);
+     }
+   }
+ 
+   //multiple clients may initialize the hook at the same time
+   synchronized private URIResolverHook loadUriResolverHook() throws IllegalStateException {
+ 
+     String uriResolverClassName =
+             MetastoreConf.getAsString(conf, ConfVars.URI_RESOLVER);
+     if (uriResolverClassName.equals("")) {
+       return null;
+     } else {
+       LOG.info("Loading uri resolver" + uriResolverClassName);
+       try {
+         Class<?> uriResolverClass = Class.forName(uriResolverClassName, true,
+                 JavaUtils.getClassLoader());
+         return (URIResolverHook) ReflectionUtils.newInstance(uriResolverClass, null);
+       } catch (Exception e) {
+         LOG.error("Exception loading uri resolver hook" + e);
+         return null;
+       }
+     }
+   }
+ 
+   /**
+    * Swaps the first element of the metastoreUris array with a random element from the
+    * remainder of the array.
+    */
+   private void promoteRandomMetaStoreURI() {
+     if (metastoreUris.length <= 1) {
+       return;
+     }
+     Random rng = new Random();
+     int index = rng.nextInt(metastoreUris.length - 1) + 1;
+     URI tmp = metastoreUris[0];
+     metastoreUris[0] = metastoreUris[index];
+     metastoreUris[index] = tmp;
+   }
+ 
+   @VisibleForTesting
+   public TTransport getTTransport() {
+     return transport;
+   }
+ 
+   @Override
+   public boolean isLocalMetaStore() {
+     return localMetaStore;
+   }
+ 
+   @Override
+   public boolean isCompatibleWith(Configuration conf) {
+     // Make a copy of currentMetaVars, there is a race condition that
+ 	// currentMetaVars might be changed during the execution of the method
+     Map<String, String> currentMetaVarsCopy = currentMetaVars;
+     if (currentMetaVarsCopy == null) {
+       return false; // recreate
+     }
+     boolean compatible = true;
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       // Since metaVars are all of different types, use string for comparison
+       String oldVar = currentMetaVarsCopy.get(oneVar.getVarname());
+       String newVar = MetastoreConf.getAsString(conf, oneVar);
+       if (oldVar == null ||
+           (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) {
+         LOG.info("Mestastore configuration " + oneVar.toString() +
+             " changed from " + oldVar + " to " + newVar);
+         compatible = false;
+       }
+     }
+     return compatible;
+   }
+ 
+   @Override
+   public void setHiveAddedJars(String addedJars) {
+     MetastoreConf.setVar(conf, ConfVars.ADDED_JARS, addedJars);
+   }
+ 
+   @Override
+   public void reconnect() throws MetaException {
+     if (localMetaStore) {
+       // For direct DB connections we don't yet support reestablishing connections.
+       throw new MetaException("For direct MetaStore DB connections, we don't support retries" +
+           " at the client level.");
+     } else {
+       close();
+ 
+       if (uriResolverHook != null) {
+         //for dynamic uris, re-lookup if there are new metastore locations
+         resolveUris();
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         // Swap the first element of the metastoreUris[] with a random element from the rest
+         // of the array. Rationale being that this method will generally be called when the default
+         // connection has died and the default connection is likely to be the first array element.
+         promoteRandomMetaStoreURI();
+       }
+       open();
+     }
+   }
+ 
+   /**
+    * @param dbname
+    * @param tbl_name
+    * @param new_tbl
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see
+    *   org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table(
+    *   java.lang.String, java.lang.String,
+    *   org.apache.hadoop.hive.metastore.api.Table)
+    */
+   @Override
+   public void alter_table(String dbname, String tbl_name, Table new_tbl)
+       throws InvalidOperationException, MetaException, TException {
+     alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null);
+   }
+ 
+   @Override
+   public void alter_table(String defaultDatabaseName, String tblName, Table table,
+       boolean cascade) throws InvalidOperationException, MetaException, TException {
+     EnvironmentContext environmentContext = new EnvironmentContext();
+     if (cascade) {
+       environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
+     }
+     alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext);
+   }
+ 
+   @Override
+   public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl,
+       EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
+     client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext);
+   }
+ 
+   /**
+    * @param dbname
+    * @param name
+    * @param part_vals
+    * @param newPart
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition(
+    *      java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition)
+    */
+   @Override
+   public void renamePartition(final String dbname, final String name, final List<String> part_vals, final Partition newPart)
+       throws InvalidOperationException, MetaException, TException {
+     client.rename_partition(dbname, name, part_vals, newPart);
+   }
+ 
+   private void open() throws MetaException {
+     isConnected = false;
+     TTransportException tte = null;
+     boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL);
+     boolean useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL);
+     boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT);
+     boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL);
+     int clientSocketTimeout = (int) MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
+ 
+     for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
+       for (URI store : metastoreUris) {
+         LOG.info("Trying to connect to metastore with URI " + store);
+ 
+         try {
+           if (useSSL) {
+             try {
+               String trustStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_TRUSTSTORE_PATH).trim();
+               if (trustStorePath.isEmpty()) {
+                 throw new IllegalArgumentException(ConfVars.SSL_TRUSTSTORE_PATH.toString()
+                     + " Not configured for SSL connection");
+               }
+               String trustStorePassword =
+                   MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD);
+ 
+               // Create an SSL socket and connect
+               transport = SecurityUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout,
+                   trustStorePath, trustStorePassword );
+               LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet());
+             } catch(IOException e) {
+               throw new IllegalArgumentException(e);
+             } catch(TTransportException e) {
+               tte = e;
+               throw new MetaException(e.toString());
+             }
+           } else {
+             transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
+           }
+ 
+           if (useSasl) {
+             // Wrap thrift connection with SASL for secure connection.
+             try {
+               HadoopThriftAuthBridge.Client authBridge =
+                 HadoopThriftAuthBridge.getBridge().createClient();
+ 
+               // check if we should use delegation tokens to authenticate
+               // the call below gets hold of the tokens if they are set up by hadoop
+               // this should happen on the map/reduce tasks if the client added the
+               // tokens into hadoop's credential store in the front end during job
+               // submission.
+               String tokenSig = MetastoreConf.getVar(conf, ConfVars.TOKEN_SIGNATURE);
+               // tokenSig could be null
+               tokenStrForm = SecurityUtils.getTokenStrForm(tokenSig);
+ 
+               if(tokenStrForm != null) {
+                 LOG.info("HMSC::open(): Found delegation token. Creating DIGEST-based thrift connection.");
+                 // authenticate using delegation tokens via the "DIGEST" mechanism
+                 transport = authBridge.createClientTransport(null, store.getHost(),
+                     "DIGEST", tokenStrForm, transport,
+                         MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               } else {
+                 LOG.info("HMSC::open(): Could not find delegation token. Creating KERBEROS-based thrift connection.");
+                 String principalConfig =
+                     MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL);
+                 transport = authBridge.createClientTransport(
+                     principalConfig, store.getHost(), "KERBEROS", null,
+                     transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               }
+             } catch (IOException ioe) {
+               LOG.error("Couldn't create client transport", ioe);
+               throw new MetaException(ioe.toString());
+             }
+           } else {
+             if (useFramedTransport) {
+               transport = new TFramedTransport(transport);
+             }
+           }
+ 
+           final TProtocol protocol;
+           if (useCompactProtocol) {
+             protocol = new TCompactProtocol(transport);
+           } else {
+             protocol = new TBinaryProtocol(transport);
+           }
+           client = new ThriftHiveMetastore.Client(protocol);
+           try {
+             if (!transport.isOpen()) {
+               transport.open();
+               LOG.info("Opened a connection to metastore, current connections: " + connCount.incrementAndGet());
+             }
+             isConnected = true;
+           } catch (TTransportException e) {
+             tte = e;
+             if (LOG.isDebugEnabled()) {
+               LOG.warn("Failed to connect to the MetaStore Server...", e);
+             } else {
+               // Don't print full exception trace if DEBUG is not on.
+               LOG.warn("Failed to connect to the MetaStore Server...");
+             }
+           }
+ 
+           if (isConnected && !useSasl && MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)){
+             // Call set_ugi, only in unsecure mode.
+             try {
+               UserGroupInformation ugi = SecurityUtils.getUGI();
+               client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
+             } catch (LoginException e) {
+               LOG.warn("Failed to do login. set_ugi() is not successful, " +
+                        "Continuing without it.", e);
+             } catch (IOException e) {
+               LOG.warn("Failed to find ugi of client set_ugi() is not successful, " +
+                   "Continuing without it.", e);
+             } catch (TException e) {
+               LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. "
+                   + "Continuing without it.", e);
+             }
+           }
+         } catch (MetaException e) {
+           LOG.error("Unable to connect to metastore with URI " + store
+                     + " in attempt " + attempt, e);
+         }
+         if (isConnected) {
+           break;
+         }
+       }
+       // Wait before launching the next round of connection retries.
+       if (!isConnected && retryDelaySeconds > 0) {
+         try {
+           LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt.");
+           Thread.sleep(retryDelaySeconds * 1000);
+         } catch (InterruptedException ignore) {}
+       }
+     }
+ 
+     if (!isConnected) {
+       throw new MetaException("Could not connect to meta store using any of the URIs provided." +
+         " Most recent failure: " + StringUtils.stringifyException(tte));
+     }
+ 
+     snapshotActiveConf();
+ 
+     LOG.info("Connected to metastore.");
+   }
+ 
+   private void snapshotActiveConf() {
+     currentMetaVars = new HashMap<>(MetastoreConf.metaVars.length);
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       currentMetaVars.put(oneVar.getVarname(), MetastoreConf.getAsString(conf, oneVar));
+     }
+   }
+ 
+   @Override
+   public String getTokenStrForm() throws IOException {
+     return tokenStrForm;
+    }
+ 
+   @Override
+   public void close() {
+     isConnected = false;
+     currentMetaVars = null;
+     try {
+       if (null != client) {
+         client.shutdown();
+       }
+     } catch (TException e) {
+       LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e);
+     }
+     // Transport would have got closed via client.shutdown(), so we dont need this, but
+     // just in case, we make this call.
+     if ((transport != null) && transport.isOpen()) {
+       transport.close();
+       LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet());
+     }
+   }
+ 
+   @Override
+   public void setMetaConf(String key, String value) throws TException {
+     client.setMetaConf(key, value);
+   }
+ 
+   @Override
+   public String getMetaConf(String key) throws TException {
+     return client.getMetaConf(key);
+   }
+ 
+   /**
+    * @param new_part
+    * @return the added partition
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition)
+    */
+   @Override
+   public Partition add_partition(Partition new_part) throws TException {
+     return add_partition(new_part, null);
+   }
+ 
+   public Partition add_partition(Partition new_part, EnvironmentContext envContext)
+       throws TException {
+     Partition p = client.add_partition_with_environment_context(new_part, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   /**
+    * @param new_parts
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List)
+    */
+   @Override
+   public int add_partitions(List<Partition> new_parts) throws TException {
+     return client.add_partitions(new_parts);
+   }
+ 
+   @Override
+   public List<Partition> add_partitions(
+       List<Partition> parts, boolean ifNotExists, boolean needResults) throws TException {
+     if (parts.isEmpty()) {
+       return needResults ? new ArrayList<>() : null;
+     }
+     Partition part = parts.get(0);
+     AddPartitionsRequest req = new AddPartitionsRequest(
+         part.getDbName(), part.getTableName(), parts, ifNotExists);
+     req.setNeedResult(needResults);
+     AddPartitionsResult result = client.add_partitions_req(req);
+     return needResults ? filterHook.filterPartitions(result.getPartitions()) : null;
+   }
+ 
+   @Override
+   public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException {
+     return client.add_partitions_pspec(partitionSpec.toPartitionSpec());
+   }
+ 
+   /**
+    * @param table_name
+    * @param db_name
+    * @param part_vals
+    * @return the appended partition
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String,
+    *      java.lang.String, java.util.List)
+    */
+   @Override
+   public Partition appendPartition(String db_name, String table_name,
+       List<String> part_vals) throws TException {
+     return appendPartition(db_name, table_name, part_vals, null);
+   }
+ 
+   public Partition appendPartition(String db_name, String table_name, List<String> part_vals,
+       EnvironmentContext envContext) throws TException {
+     Partition p = client.append_partition_with_environment_context(db_name, table_name,
+         part_vals, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   @Override
+   public Partition appendPartition(String dbName, String tableName, String partName)
+       throws TException {
+     return appendPartition(dbName, tableName, partName, (EnvironmentContext)null);
+   }
+ 
+   public Partition appendPartition(String dbName, String tableName, String partName,
+       EnvironmentContext envContext) throws TException {
+     Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName,
+         partName, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   /**
+    * Exchange the partition between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partition after exchanging
+    */
+   @Override
+   public Partition exchange_partition(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws MetaException,
+       NoSuchObjectException, InvalidObjectException, TException {
+     return client.exchange_partition(partitionSpecs, sourceDb, sourceTable,
+         destDb, destinationTableName);
+   }
+ 
+   /**
+    * Exchange the partitions between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partitions after exchanging
+    */
+   @Override
+   public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws MetaException,
+       NoSuchObjectException, InvalidObjectException, TException {
+     return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable,
+         destDb, destinationTableName);
+   }
+ 
+   @Override
+   public void validatePartitionNameCharacters(List<String> partVals)
+       throws TException, MetaException {
+     client.partition_name_has_valid_characters(partVals, true);
+   }
+ 
+   /**
+    * Create a new Database
+    * @param db
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database)
+    */
+   @Override
+   public void createDatabase(Database db)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
+     client.create_database(db);
+   }
+ 
+   /**
+    * @param tbl
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
+    */
+   @Override
+   public void createTable(Table tbl) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     createTable(tbl, null);
+   }
+ 
+   public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       create_table_with_environment_context(tbl, envContext);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     }
+     finally {
+       if (!success && (hook != null)) {
+         try {
+           hook.rollbackCreateTable(tbl);
+         } catch (Exception e){
+           LOG.error("Create rollback failed with", e);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public void createTableWithConstraints(Table tbl,
+       List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+       List<SQLUniqueConstraint> uniqueConstraints,
+       List<SQLNotNullConstraint> notNullConstraints,
+       List<SQLDefaultConstraint> defaultConstraints,
+       List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, InvalidObjectException,
+         MetaException, NoSuchObjectException, TException {
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       client.create_table_with_constraints(tbl, primaryKeys, foreignKeys,
+           uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackCreateTable(tbl);
+       }
+     }
+   }
+ 
+   @Override
+   public void dropConstraint(String dbName, String tableName, String constraintName) throws
+     NoSuchObjectException, MetaException, TException {
+     client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName));
+   }
+ 
+   @Override
+   public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols));
+   }
+ 
+   @Override
+   public void addForeignKey(List<SQLForeignKey> foreignKeyCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols));
+   }
+ 
+   @Override
+   public void addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols));
+   }
+ 
+   @Override
+   public void addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols));
+   }
+ 
+   @Override
+   public void addDefaultConstraint(List<SQLDefaultConstraint> defaultConstraints) throws
+       NoSuchObjectException, MetaException, TException {
+     client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints));
+   }
+ 
+   @Override
+   public void addCheckConstraint(List<SQLCheckConstraint> checkConstraints) throws MetaException,
+       NoSuchObjectException, TException {
+     client.add_check_constraint(new AddCheckConstraintRequest(checkConstraints));
+   }
+ 
+   /**
+    * @param type
+    * @return true or false
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type)
+    */
+   public boolean createType(Type type) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, TException {
+     return client.create_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @throws NoSuchObjectException
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean)
+    */
+   @Override
+   public void dropDatabase(String name)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(name, true, false, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(name, deleteData, ignoreUnknownDb, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     try {
+       getDatabase(name);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownDb) {
+         throw e;
+       }
+       return;
+     }
+ 
+     if (cascade) {
+        List<String> tableList = getAllTables(name);
+        for (String table : tableList) {
+          try {
+            // Subclasses can override this step (for example, for temporary tables)
+            dropTable(name, table, deleteData, true);
+          } catch (UnsupportedOperationException e) {
+            // Ignore Index tables, those will be dropped with parent tables
+          }
+         }
+     }
+     client.drop_database(name, deleteData, cascade);
+   }
+ 
+   /**
+    * @param tbl_name
+    * @param db_name
+    * @param part_vals
+    * @return true or false
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
+    *      java.lang.String, java.util.List, boolean)
+    */
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals) throws NoSuchObjectException, MetaException,
+       TException {
+     return dropPartition(db_name, tbl_name, part_vals, true, null);
+   }
+ 
+   public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+       EnvironmentContext env_context) throws NoSuchObjectException, MetaException, TException {
+     return dropPartition(db_name, tbl_name, part_vals, true, env_context);
+   }
+ 
+   @Override
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData)
+       throws NoSuchObjectException, MetaException, TException {
+     return dropPartition(dbName, tableName, partName, deleteData, null);
+   }
+ 
+   private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() {
+     Map<String, String> warehouseOptions = new HashMap<>();
+     warehouseOptions.put("ifPurge", "TRUE");
+     return new EnvironmentContext(warehouseOptions);
+   }
+ 
+   /*
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, boolean ifPurge)
+       throws NoSuchObjectException, MetaException, TException {
+ 
+     return dropPartition(dbName, tableName, partName, deleteData,
+                          ifPurge? getEnvironmentContextWithIfPurgeSet() : null);
+   }
+   */
+ 
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData,
+       EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException {
+     return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName,
+         deleteData, envContext);
+   }
+ 
+   /**
+    * @param db_name
+    * @param tbl_name
+    * @param part_vals
+    * @param deleteData
+    *          delete the underlying data or just delete the table in metadata
+    * @return true or false
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
+    *      java.lang.String, java.util.List, boolean)
+    */
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, boolean deleteData) throws NoSuchObjectException,
+       MetaException, TException {
+     return dropPartition(db_name, tbl_name, part_vals, deleteData, null);
+   }
+ 
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, PartitionDropOptions options) throws TException {
+     return dropPartition(db_name, tbl_name, part_vals, options.deleteData,
+                          options.purgeData? getEnvironmentContextWithIfPurgeSet() : null);
+   }
+ 
+   public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+       boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException,
+       MetaException, TException {
+     return client.drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData,
+         envContext);
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+                                         List<ObjectPair<Integer, byte[]>> partExprs, PartitionDropOptions options)
+       throws TException {
+     RequestPartsSpec rps = new RequestPartsSpec();
+     List<DropPartitionsExpr> exprs = new ArrayList<>(partExprs.size());
+     for (ObjectPair<Integer, byte[]> partExpr : partExprs) {
+       DropPartitionsExpr dpe = new DropPartitionsExpr();
+       dpe.setExpr(partExpr.getSecond());
+       dpe.setPartArchiveLevel(partExpr.getFirst());
+       exprs.add(dpe);
+     }
+     rps.setExprs(exprs);
+     DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
+     req.setDeleteData(options.deleteData);
+     req.setNeedResult(options.returnResults);
+     req.setIfExists(options.ifExists);
+     if (options.purgeData) {
+       LOG.info("Dropped partitions will be purged!");
+       req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet());
+     }
+     return client.drop_partitions_req(req).getPartitions();
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
+ 
+     return dropPartitions(dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists)
+                                               .returnResults(needResult));
+ 
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists) throws NoSuchObjectException, MetaException, TException {
+     // By default, we need the results from dropPartitions();
+     return dropPartitions(dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists));
+   }
+ 
+   /**
+    * {@inheritDoc}
+    * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+    */
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
+   }
+ 
+   /**
+    * Drop the table and choose whether to save the data in the trash.
+    * @param ifPurge completely purge the table (skipping trash) while removing
+    *                data from warehouse
+    * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+    */
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, boolean ifPurge)
+       throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException {
+     //build new environmentContext with ifPurge;
+     EnvironmentContext envContext = null;
+     if(ifPurge){
+       Map<String, String> warehouseOptions;
+       warehouseOptions = new HashMap<>();
+       warehouseOptions.put("ifPurge", "TRUE");
+       envContext = new EnvironmentContext(warehouseOptions);
+     }
+     dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext);
+   }
+ 
+   /**
+    * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+    */
+   @Override
+   public void dropTable(String dbname, String name)
+       throws NoSuchObjectException, MetaException, TException {
+     dropTable(dbname, name, true, true, null);
+   }
+ 
+   /**
+    * Drop the table and choose whether to: delete the underlying table data;
+    * throw if the table doesn't exist; save the data in the trash.
+    *
+    * @param dbname
+    * @param name
+    * @param deleteData
+    *          delete the underlying data or just delete the table in metadata
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @param envContext
+    *          for communicating with thrift
+    * @throws MetaException
+    *           could not drop table properly
+    * @throws NoSuchObjectException
+    *           the table wasn't found
+    * @throws TException
+    *           a thrift communication error occurred
+    * @throws UnsupportedOperationException
+    *           dropping an index table is not allowed
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
+    *      java.lang.String, boolean)
+    */
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     Table tbl;
+     try {
+       tbl = getTable(dbname, name);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+       return;
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preDropTable(tbl);
+     }
+     boolean success = false;
+     try {
+       drop_table_with_environment_context(dbname, name, deleteData, envContext);
+       if (hook != null) {
+         hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge"))));
+       }
+       success=true;
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackDropTable(tbl);
+       }
+     }
+   }
+ 
+   /**
+    * Truncate the table/partitions in the DEFAULT database.
+    * @param dbName
+    *          The db to which the table to be truncate belongs to
+    * @param tableName
+    *          The table to truncate
+    * @param partNames
+    *          List of partitions to truncate. NULL will truncate the whole table/all partitions
+    * @throws MetaException
+    * @throws TException
+    *           Could not truncate table properly.
+    */
+   @Override
+   public void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException {
+     client.truncate_table(dbName, tableName, partNames);
+   }
+ 
+   /**
+    * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
+    *
+    * @param request Inputs for path of the data files to be recycled to cmroot and
+    *                isPurge flag when set to true files which needs to be recycled are not moved to Trash
+    * @return Response which is currently void
+    */
+   @Override
+   public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException {
+     return client.cm_recycle(request);
+   }
+ 
+   /**
+    * @param type
+    * @return true if the type is dropped
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String)
+    */
+   public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException {
+     return client.drop_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @return map of types
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String)
+    */
+   public Map<String, Type> getTypeAll(String name) throws MetaException,
+       TException {
+     Map<String, Type> result = null;
+     Map<String, Type> fromClient = client.get_type_all(name);
+     if (fromClient != null) {
+       result = new LinkedHashMap<>();
+       for (String key : fromClient.keySet()) {
+         result.put(key, deepCopy(fromClient.get(key)));
+       }
+     }
+     return result;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getDatabases(String databasePattern)
+     throws MetaException {
+     try {
+       return filterHook.filterDatabases(client.get_databases(databasePattern));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getAllDatabases() throws MetaException {
+     try {
+       return filterHook.filterDatabases(client.get_all_databases());
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /**
+    * @param tbl_name
+    * @param db_name
+    * @param max_parts
+    * @return list of partitions
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    */
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name,
+       short max_parts) throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions(db_name, tbl_name, max_parts);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_partitions_pspec(dbName, tableName, maxParts)));
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name,
+       String tbl_name, short max_parts, String user_name, List<String> group_names)
+        throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts,
+         user_name, group_names);
+     return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name,
+       String tbl_name, List<String> part_vals, short max_parts,
+       String user_name, List<String> group_names) throws NoSuchObjectException,
+       MetaException, TException {
+     List<Partition> parts = client.get_partitions_ps_with_auth(db_name,
+         tbl_name, part_vals, max_parts, user_name, group_names);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   /**
+    * Get list of partitions matching specified filter
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @return list of partitions
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    */
+   @Override
+   public List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+       String filter, short max_parts) throws MetaException,
+          NoSuchObjectException, TException {
+     List<Partition> parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts);
+     return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                        String filter, int max_parts) throws MetaException,
+          NoSuchObjectException, TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)));
+   }
+ 
+   @Override
+   public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr,
+       String default_partition_name, short max_parts, List<Partition> result)
+           throws TException {
+     assert result != null;
+     PartitionsByExprRequest req = new PartitionsByExprRequest(
+         db_name, tbl_name, ByteBuffer.wrap(expr));
+     if (default_partition_name != null) {
+       req.setDefaultPartitionName(default_partition_name);
+     }
+     if (max_parts >= 0) {
+       req.setMaxParts(max_parts);
+     }
+     PartitionsByExprResult r;
+     try {
+       r = client.get_partitions_by_expr(req);
+     } catch (TApplicationException te) {
+       // TODO: backward compat for Hive <= 0.12. Can be removed later.
+       if (te.getType() != TApplicationException.UNKNOWN_METHOD
+           && te.getType() != TApplicationException.WRONG_METHOD_NAME) {
+         throw te;
+       }
+       throw new IncompatibleMetastoreException(
+           "Metastore doesn't support listPartitionsByExpr: " + te.getMessage());
+     }
+     if (fastpath) {
+       result.addAll(r.getPartitions());
+     } else {
+       r.setPartitions(filterHook.filterPartitions(r.getPartitions()));
+       // TODO: in these methods, do we really need to deepcopy?
+       deepCopyPartitions(r.getPartitions(), result);
+     }
+     return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst.
+   }
+ 
+   /**
+    * @param name
+    * @return the database
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String)
+    */
+   @Override
+   public Database getDatabase(String name) throws NoSuchObjectException,
+       MetaException, TException {
+     Database d = client.get_database(name);
+     return fastpath ? d :deepCopy(filterHook.filterDatabase(d));
+   }
+ 
+   /**
+    * @param tbl_name
+    * @param db_name
+    * @param part_vals
+    * @return the partition
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String,
+    *      java.lang.String, java.util.List)
+    */
+   @Override
+   public Partition getPartition(String db_name, String tbl_name,
+       List<String> part_vals) throws NoSuchObjectException, MetaException, TException {
+     Partition p = client.get_partition(db_name, tbl_name, part_vals);
+     return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String db_name, String tbl_name,
+       List<String> part_names) throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions_by_names(db_name, tbl_name, part_names);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+       throws MetaException, TException, NoSuchObjectException {
+     return client.get_partition_values(request);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuthInfo(String db_name, String tbl_name,
+       List<String> part_vals, String user_name, List<String> group_names)
+       throws MetaException, UnknownTableException, NoSuchObjectException,
+       TException {
+     Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name,
+         group_names);
+     return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   /**
+    * @param name
+    * @param dbname
+    * @return the table
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @throws NoSuchObjectException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String,
+    *      java.lang.String)
+    */
+   @Override
+   public Table getTable(String dbname, String name) throws MetaException,
+       TException, NoSuchObjectException {
+     GetTableRequest req = new GetTableRequest(dbname, name);
+     req.setCapabilities(version);
+     Table t = client.get_table_req(req).getTable();
+     return fastpath ? t : deepCopy(filterHook.filterTable(t));
+   }
+ 
++  @Override
++  public Table getTable(String dbName, String tableName, long txnId, String validWriteIdList)
++      throws MetaException, TException, NoSuchObjectException {
++    GetTableRequest req = new GetTableRequest(dbName, tableName);
++    req.setCapabilities(version);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIdList);
++    Table t = client.get_table_req(req).getTable();
++    return fastpath ? t : deepCopy(filterHook.filterTable(t));
++  }
++
+   /** {@inheritDoc} */
+   @Override
+   public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     GetTablesRequest req = new GetTablesRequest(dbName);
+     req.setTblNames(tableNames);
+     req.setCapabilities(version);
+     List<Table> tabs = client.get_table_objects_by_name_req(req).getTables();
+     return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs));
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     return client.get_materialization_invalidation_info(cm, validTxnList);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     client.update_creation_metadata(null, dbName, tableName, cm);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+       throws MetaException, TException, InvalidOperationException, UnknownDBException {
+     return filterHook.filterTableNames(null, dbName,
+         client.get_table_names_by_filter(dbName, filter, maxTables));
+   }
+ 
+   /**
+    * @param name
+    * @return the type
+    * @throws MetaException
+    * @throws TException
+    * @throws NoSuchObjectException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String)
+    */
+   public Type getType(String name) throws NoSuchObjectException, MetaException, TException {
+     return deepCopy(client.get_type(name));
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getTables(String dbname, String tablePattern) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname, client.get_tables(dbname, tablePattern));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getTables(String dbname, String tablePattern, TableType tableType) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname,
+           client.get_tables_by_type(dbname, tablePattern, tableType.toString()));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String dbname) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname, client.get_materialized_views_for_rewriting(dbname));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+       throws MetaException {
+     try {
+       return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   private List<TableMeta> filterNames(List<TableMeta> metas) throws MetaException {
+     Map<String, TableMeta> sources = new LinkedHashMap<>();
+     Map<String, List<String>> dbTables = new LinkedHashMap<>();
+     for (TableMeta meta : metas) {
+       sources.put(meta.getDbName() + "." + meta.getTableName(), meta);
+       List<String> tables = dbTables.get(meta.getDbName());
+       if (tables == null) {
+         dbTables.put(meta.getDbName(), tables = new ArrayList<>());
+       }
+       tables.add(meta.getTableName());
+     }
+     List<TableMeta> filtered = new ArrayList<>();
+     for (Map.Entry<String, List<String>> entry : dbTables.entrySet()) {
+       for (String table : filterHook.filterTableNames(null, entry.getKey(), entry.getValue())) {
+         filtered.add(sources.get(entry.getKey() + "." + table));
+       }
+     }
+     return filtered;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getAllTables(String dbname) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname, client.get_all_tables(dbname));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public boolean tableExists(String databaseName, String tableName) throws MetaException,
+       TException, UnknownDBException {
+     try {
+       GetTableRequest req = new GetTableRequest(databaseName, tableName);
+       req.setCapabilities(version);
+       return filterHook.filterTable(client.get_table_req(req).getTable()) != null;
+     } catch (NoSuchObjectException e) {
+       return false;
+     }
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String dbName, String tblName,
+       short max) throws NoSuchObjectException, MetaException, TException {
+     return filterHook.filterPartitionNames(null, dbName, tblName,
+         client.get_partition_names(dbName, tblName, max));
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws MetaException, TException, NoSuchObjectException {
+     return filterHook.filterPartitionNames(null, db_name, tbl_name,
+         client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts));
+   }
+ 
+   /**
+    * Get number of partitions matching specified filter
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @return number of partitions
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    */
+   @Override
+   public int getNumPartitionsByFilter(String db_name, String tbl_name,
+                                       String filter) throws MetaException,
+           NoSuchObjectException, TException {
+     return client.get_num_partitions_by_filter(db_name, tbl_name, filter);
+   }
+ 
+   @Override
+   public void alter_partition(String dbName, String tblName, Partition newPart)
+       throws InvalidOperationException, MetaException, TException {
+     client.alter_partition_with_environment_context(dbName, tblName, newPart, null);
+   }
+ 
+   @Override
+   public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
+       throws InvalidOperationException, MetaException, TException {
+     client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext);
+   }
+ 
+   @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
+       throws InvalidOperationException, MetaException, TException {
 -    client.alter_partitions_with_environment_context(dbName, tblName, newParts, null);
++    client.alter_partitions(dbName, tblName, newParts);
+   }
+ 
+   @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
+   throws InvalidOperationException, MetaException, TException {
 -    client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext);
++    AlterPartitionsRequest req = new AlterPartitionsRequest();
++    req.setDbName(dbName);
++    req.setTableName(tblName);
++    req.setPartitions(newParts);
++    req.setEnvironmentContext(environmentContext);
++    client.alter_partitions_req(req);
++  }
++
++  @Override
++  public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
++                               EnvironmentContext environmentContext,
++                               long txnId, String writeIdList, long writeId)
++      throws InvalidOperationException, MetaException, TException {
++    AlterPartitionsRequest req = new AlterPartitionsRequest();
++    req.setDbName(dbName);
++    req.setTableName(tblName);
++    req.setPartitions(newParts);
++    req.setEnvironmentContext(environmentContext);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(writeIdList);
++    client.alter_partitions_req(req);
+   }
+ 
+   @Override
+   public void alterDatabase(String dbName, Database db)
+       throws MetaException, NoSuchObjectException, TException {
+     client.alter_database(dbName, db);
+   }
+   /**
+    * @param db
+    * @param tableName
+    * @throws UnknownTableException
+    * @throws UnknownDBException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String,
+    *      java.lang.String)
+    */
+   @Override
+   public List<FieldSchema> getFields(String db, String tableName)
+       throws MetaException, TException, UnknownTableException,
+       UnknownDBException {
+     List<FieldSchema> fields = client.get_fields(db, tableName);
+     return fastpath ? fields : deepCopyFieldSchemas(fields);
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     return client.get_primary_keys(req).getPrimaryKeys();
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(ForeignKeysRequest req) throws MetaException,
+     NoSuchObjectException, TException {
+     return client.get_foreign_keys(req).getForeignKeys();
+   }
+ 
+   @Override
+   public List<SQLUniqueConstraint> getUniqueConstraints(UniqueConstraintsRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     return client.get_unique_constraints(req).getUniqueConstraints();
+   }
+ 
+   @Override
+   public List<SQLNotNullConstraint> getNotNullConstraints(NotNullConstraintsRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     return client.get_not_null_constraints(req).getNotNullConstraints();
+   }
+ 
+   @Override
+   public List<SQLDefaultConstraint> getDefaultConstraints(DefaultConstraintsRequest req)
+       throws MetaException, NoSuchObjectException, TException {
+     return client.get_default_constraints(req).getDefaultConstraints();
+   }
+ 
+   @Override
+   public List<SQLCheckConstraint> getCheckConstraints(CheckConstraintsRequest request) throws
+       MetaException, NoSuchObjectException, TException {
+     return client.get_check_constraints(request).getCheckConstraints();
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   @Deprecated
+   //use setPartitionColumnStatistics instead
+   public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException{
+     return client.update_table_column_statistics(statsObj);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   @Deprecated
+   //use setPartitionColumnStatistics instead
+   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException{
+     return client.update_partition_column_statistics(statsObj);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException{
+     return client.set_aggr_stats_for(request);
+   }
+ 
+   @Override
+   public void flushCache() {
+     try {
+       client.flushCache();
+     } catch (TException e) {
+       // Not much we can do about it honestly
+       LOG.warn("Got error flushing the cache", e);
+     }
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+       List<String> colNames) throws NoSuchObjectException, MetaException, TException,
+       InvalidInputException, InvalidObjectException {
+     return client.get_table_statistics_req(
+         new TableStatsRequest(dbName, tableName, colNames)).getTableStats();
+   }
+ 
++  @Override
++  public List<ColumnStatisticsObj> getTableColumnStatistics(
++      String dbName, String tableName, List<String> colNames, long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames);
++    tsr.setTxnId(txnId);
++    tsr.setValidWriteIdList(validWriteIdList);
++
++    return client.get_table_statistics_req(tsr).getTableStats();
++  }
++
+   /** {@inheritDoc} */
+   @Override
+   public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+       String dbName, String tableName, List<String> partNames, List<String> colNames)
+           throws NoSuchObjectException, MetaException, TException {
+     return client.get_partitions_statistics_req(
+         new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats();
+   }
+ 
++  @Override
++  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
++      String dbName, String tableName, List<String> partNames,
++      List<String> colNames, long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames);
++    psr.setTxnId(txnId);
++    psr.setValidWriteIdList(validWriteIdList);
++    return client.get_partitions_statistics_req(
++        psr).getPartStats();
++  }
++
+   /** {@inheritDoc} */
+   @Override
+   public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
+     String colName) throws NoSuchObjectException, InvalidObjectException, MetaException,
+     TException, InvalidInputException
+   {
+     return client.delete_partition_column_statistics(dbName, tableName, partName, colName);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException
+   {
+     return client.delete_table_column_statistics(dbName, tableName, colName);
+   }
+ 
+   /**
+    * @param db
+    * @param tableName
+    * @throws UnknownTableException
+    * @throws UnknownDBException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String,
+    *      java.lang.String)
+    */
+   @Override
+   public List<FieldSchema> getSchema(String db, String tableName)
+       throws MetaException, TException, UnknownTableException,
+       UnknownDBException {
+       EnvironmentContext envCxt = null;
+       String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS);
+       if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) {
+          Map<String, String> props = new HashMap<String, String>();
+          props.put("hive.added.jars.path", addedJars);
+          envCxt = new EnvironmentContext(props);
+        }
+ 
+     List<FieldSchema> fields = client.get_schema_with_environment_context(db, tableName, envCxt);
+     return fastpath ? fields : deepCopyFieldSchemas(fields);
+   }
+ 
+   @Override
+   public String getConfigValue(String name, String defaultValue)
+       throws TException, ConfigValSecurityException {
+     return client.get_config_value(name, defaultValue);
+   }
+ 
+   @Override
+   public Partition getPartition(String db, String tableName, String partName)
+       throws MetaException, TException, UnknownTableException, NoSuchObjectException {
+     Partition p = client.get_partition_by_name(db, tableName, partName);
+     return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   public Partition appendPartitionByName(String dbName, String tableName, String partName)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+     return appendPartitionByName(dbName, tableName, partName, null);
+   }
+ 
+   public Partition appendPartitionByName(String dbName, String tableName, String partName,
+       EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException,
+       MetaException, TException {
+     Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName,
+         partName, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   public boolean dropPartitionByName(String dbName, String tableName, String partName,
+       boolean deleteData) throws NoSuchObjectException, MetaException, TException {
+     return dropPartitionByName(dbName, tableName, partName, deleteData, null);
+   }
+ 
+   public boolean dropPartitionByName(String dbName, String tableName, String partName,
+       boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException,
+       MetaException, TException {
+     return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName,
+         deleteData, envContext);
+   }
+ 
+   private HiveMetaHook getHook(Table tbl) throws MetaException {
+     if (hookLoader == null) {
+       return null;
+     }
+     return hookLoader.getHook(tbl);
+   }
+ 
+   @Override
+   public List<String> partitionNameToVals(String name) throws MetaException, TException {
+     return client.partition_name_to_vals(name);
+   }
+ 
+   @Override
+   public Map<String, String> partitionNameToSpec(String name) throws MetaException, TException{
+     return client.partition_name_to_spec(name);
+   }
+ 
+   /**
+    * @param partition
+    * @return
+    */
+   private Partition deepCopy(Partition partition) {
+     Partition copy = null;
+     if (partition != null) {
+       copy = new Partition(partition);
+     }
+     return copy;
+   }
+ 
+   private Database deepCopy(Database database) {
+     Database copy = null;
+     if (database != null) {
+       copy = new Database(database);
+     }
+     return copy;
+   }
+ 
+   protected Table deepCopy(Table table) {
+     Table copy = null;
+     if (table != null) {
+       copy = new Table(table);
+     }
+     return copy;
+   }
+ 
+   private Type deepCopy(Type type) {
+     Type copy = null;
+     if (type != null) {
+       copy = new Type(type);
+     }
+     return copy;
+   }
+ 
+   private FieldSchema deepCopy(FieldSchema schema) {
+     FieldSchema copy = null;
+     if (schema != null) {
+       copy = new FieldSchema(schema);
+     }
+     return copy;
+   }
+ 
+   private Function deepCopy(Function func) {
+     Function copy = null;
+     if (func != null) {
+       copy = new Function(func);
+     }
+     return copy;
+   }
+ 
+   protected PrincipalPrivilegeSet deepCopy(PrincipalPrivilegeSet pps) {
+     PrincipalPrivilegeSet copy = null;
+     if (pps != null) {
+       copy = new PrincipalPrivilegeSet(pps);
+     }
+     return copy;
+   }
+ 
+   private List<Partition> deepCopyPartitions(List<Partition> partitions) {
+     return deepCopyPartitions(partitions, null);
+   }
+ 
+   private List<Partition> deepCopyPartitions(
+       Collection<Partition> src, List<Partition> dest) {
+     if (src == null) {
+       return dest;
+     }
+     if (dest == null) {
+       dest = new ArrayList<Partition>(src.size());
+     }
+     for (Partition part : src) {
+       dest.add(deepCopy(part));
+     }
+     return dest;
+   }
+ 
+   private List<Table> deepCopyTables(List<Table> tables) {
+     List<Table> copy = null;
+     if (tables != null) {
+       copy = new ArrayList<Table>();
+       for (Table tab : tables) {
+         copy.add(deepCopy(tab));
+       }
+     }
+     return copy;
+   }
+ 
+   protected List<FieldSchema> deepCopyFieldSchemas(List<FieldSchema> schemas) {
+     List<FieldSchema> copy = null;
+     if (schemas != null) {
+       copy = new ArrayList<FieldSchema>();
+       for (FieldSchema schema : schemas) {
+         copy.add(deepCopy(schema));
+       }
+     }
+     return copy;
+   }
+ 
+   @Override
+   public boolean grant_role(String roleName, String userName,
+       PrincipalType principalType, String grantor, PrincipalType grantorType,
+       boolean grantOption) throws MetaException, TException {
+     GrantRevokeRoleRequest req = new GrantRevokeRoleRequest();
+     req.setRequestType(GrantRevokeType.GRANT);
+     req.setRoleName(roleName);
+     req.setPrincipalName(userName);
+     req.setPrincipalType(principalType);
+     req.setGrantor(grantor);
+     req.setGrantorType(grantorType);
+     req.setGrantOption(grantOption);
+     GrantRevokeRoleResponse res = client.grant_revoke_role(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean create_role(Role role)
+       throws MetaException, TException {
+     return client.create_role(role);
+   }
+ 
+   @Override
+   public boolean drop_role(String roleName) throws MetaException, TException {
+     return client.drop_role(roleName);
+   }
+ 
+   @Override
+   public List<Role> list_roles(String principalName,
+       PrincipalType principalType) throws MetaException, TException {
+     return client.list_roles(principalName, principalType);
+   }
+ 
+   @Override
+   public List<String> listRoleNames() throws MetaException, TException {
+     return client.get_role_names();
+   }
+ 
+   @Override
+   public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest req)
+       throws MetaException, TException {
+     return client.get_principals_in_role(req);
+   }
+ 
+   @Override
+   public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(
+       GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException {
+     return client.get_role_grants_for_principal(getRolePrincReq);
+   }
+ 
+   @Override
+   public boolean grant_privileges(PrivilegeBag privileges)
+       throws MetaException, TException {
+     GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest();
+     req.setRequestType(GrantRevokeType.GRANT);
+     req.setPrivileges(privileges);
+     GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean revoke_role(String roleName, String userName,
+       PrincipalType principalType, boolean grantOption) throws MetaException, TException {
+     GrantRevokeRoleRequest req = new GrantRevokeRoleRequest();
+     req.setRequestType(GrantRevokeType.REVOKE);
+     req.setRoleName(roleName);
+     req.setPrincipalName(userName);
+     req.setPrincipalType(principalType);
+     req.setGrantOption(grantOption);
+     GrantRevokeRoleResponse res = client.grant_revoke_role(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException,
+       TException {
+     GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest();
+     req.setRequestType(GrantRevokeType.REVOKE);
+     req.setPrivileges(privileges);
+     req.setRevokeGrantOption(grantOption);
+     GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean refresh_privileges(HiveObjectRef objToRefresh, String authorizer,
+       PrivilegeBag grantPrivileges) throws MetaException,
+       TException {
+     String defaultCat = getDefaultCatalog(conf);
+     objToRefresh.setCatName(defaultCat);
+ 
+     if (grantPrivileges.getPrivileges() != null) {
+       for (HiveObjectPrivilege priv : grantPrivileges.getPrivileges()) {
+         if (!priv.getHiveObject().isSetCatName()) {
+           priv.getHiveObject().setCatName(defaultCat);
+         }
+       }
+     }
+     GrantRevokePrivilegeRequest grantReq = new GrantRevokePrivilegeRequest();
+     grantReq.setRequestType(GrantRevokeType.GRANT);
+     grantReq.setPrivileges(grantPrivileges);
+ 
+     GrantRevokePrivilegeResponse res = client.refresh_privileges(objToRefresh, authorizer, grantReq);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject,
+       String userName, List<String> groupNames) throws MetaException,
+       TException {
+     return client.get_privilege_set(hiveObject, userName, groupNames);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> list_privileges(String principalName,
+       PrincipalType principalType, HiveObjectRef hiveObject)
+       throws MetaException, TException {
+     return client.list_privileges(principalName, principalType, hiveObject);
+   }
+ 
+   public String getDelegationToken(String renewerKerberosPrincipalName) throws
+   MetaException, TException, IOException {
+     //a convenience method that makes the intended owner for the delegation
+     //token request the current user
+     String owner = SecurityUtils.getUser();
+     return getDelegationToken(owner, renewerKerberosPrincipalName);
+   }
+ 
+   @Override
+   public String getDelegationToken(String owner, String renewerKerberosPrincipalName) throws
+   MetaException, TException {
+     // This is expected to be a no-op, so we will return null when we use local metastore.
+     if (localMetaStore) {
+       return null;
+     }
+     return client.get_delegation_token(owner, renewerKerberosPrincipalName);
+   }
+ 
+   @Override
+   public long renewDelegationToken(String tokenStrForm) throws MetaException, TException {
+     if (localMetaStore) {
+       return 0;
+     }
+     return client.renew_delegation_token(tokenStrForm);
+ 
+   }
+ 
+   @Override
+   public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException {
+     if (localMetaStore) {
+       return;
+     }
+     client.cancel_delegation_token(tokenStrForm);
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) throws TException {
+      return client.add_token(tokenIdentifier, delegationToken);
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) throws TException {
+     return client.remove_token(tokenIdentifier);
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) throws TException {
+     return client.get_token(tokenIdentifier);
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() throws TException {
+     return client.get_all_token_identifiers();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) throws MetaException, TException {
+     return client.add_master_key(key);
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key)
+       throws NoSuchObjectException, MetaException, TException {
+     client.update_master_key(seqNo, key);
+   }
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) throws TException {
+     return client.remove_master_key(keySeq);
+   }
+ 
+   @Override
+   public String[] getMasterKeys() throws TException {
+     List<String> keyList = client.get_master_keys();
+     return keyList.toArray(new String[keyList.size()]);
+   }
+ 
+   @Override
+   public ValidTxnList getValidTxns() throws TException {
+     return TxnUtils.createValidReadTxnList(client.get_open_txns(), 0);
+   }
+ 
+   @Override
+   public ValidTxnList getValidTxns(long currentTxn) throws TException {
+     return TxnUtils.createValidReadTxnList(client.get_open_txns(), currentTxn);
+   }
+ 
+   @Override
+   public ValidWriteIdList getValidWriteIds(String fullTableName) throws TException {
+     GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Collections.singletonList(fullTableName), null);
+     GetValidWriteIdsResponse validWriteIds = client.get_valid_write_ids(rqst);
+     return TxnUtils.createValidReaderWriteIdList(validWriteIds.getTblValidWriteIds().get(0));
+   }
+ 
+   @Override
+   public List<TableValidWriteIds> getValidWriteIds(List<String> tablesList, String validTxnList)
+           throws TException {
+     GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(tablesList, validTxnList);
+     return client.get_valid_write_ids(rqst).getTblValidWriteIds();
+   }
+ 
+   @Override
+   public long openTxn(String user) throws TException {
+     OpenTxnsResponse txns = openTxns(user, 1);
+     return txns.getTxn_ids().get(0);
+   }
+ 
+   @Override
+   public OpenTxnsResponse openTxns(String user, int numTxns) throws TException {
+     return openTxnsIntr(user, numTxns, null, null);
+   }
+ 
+   @Override
+   public List<Long> replOpenTxn(

<TRUNCATED>

[15/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/vector_topnkey.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_topnkey.q.out b/ql/src/test/results/clientpositive/llap/vector_topnkey.q.out
new file mode 100644
index 0000000..16803c9
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/vector_topnkey.q.out
@@ -0,0 +1,592 @@
+PREHOOK: query: explain vectorization detail
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                  Select Operator
+                    expressions: key (type: string), UDFToInteger(substr(value, 5)) (type: int)
+                    outputColumnNames: _col0, _col1
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 4]
+                        selectExpressions: CastStringToLong(col 3:string)(children: StringSubstrColStart(col 1:string, start 4) -> 3:string) -> 4:int
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Top N Key Operator
+                      sort order: +
+                      keys: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 5
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 0:string
+                          native: true
+                      Group By Operator
+                        aggregations: sum(_col1)
+                        Group By Vectorization:
+                            aggregators: VectorUDAFSumLong(col 4:int) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 0:string
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkStringOperator
+                              keyColumnNums: [0]
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              valueColumnNums: [1]
+                          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
+                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:string, value:string
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [string, bigint]
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    dataColumns: KEY._col0:string, VALUE._col0:bigint
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                Group By Vectorization:
+                    aggregators: VectorUDAFSumLong(col 1:bigint) -> bigint
+                    className: VectorGroupByOperator
+                    groupByMode: MERGEPARTIAL
+                    keyExpressions: col 0:string
+                    native: false
+                    vectorProcessingMode: MERGE_PARTIAL
+                    projectedOutputColumnNums: [0]
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Reduce Sink Vectorization:
+                      className: VectorReduceSinkObjectHashOperator
+                      keyColumnNums: [0]
+                      native: true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      valueColumnNums: [1]
+                  Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    dataColumns: KEY.reducesinkkey0:string, VALUE._col0:bigint
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+                outputColumnNames: _col0, _col1
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [0, 1]
+                Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 5
+                  Limit Vectorization:
+                      className: VectorLimitOperator
+                      native: true
+                  Statistics: Num rows: 5 Data size: 475 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
+                    Statistics: Num rows: 5 Data size: 475 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0
+10	10
+100	200
+103	206
+104	208
+PREHOOK: query: explain vectorization detail
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0]
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Top N Key Operator
+                      sort order: +
+                      keys: key (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 5
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 0:string
+                          native: true
+                      Group By Operator
+                        Group By Vectorization:
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 0:string
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: []
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkStringOperator
+                              keyColumnNums: [0]
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              valueColumnNums: []
+                          Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
+                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:string, value:string
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 1
+                    dataColumns: KEY._col0:string
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
+            Reduce Operator Tree:
+              Group By Operator
+                Group By Vectorization:
+                    className: VectorGroupByOperator
+                    groupByMode: MERGEPARTIAL
+                    keyExpressions: col 0:string
+                    native: false
+                    vectorProcessingMode: MERGE_PARTIAL
+                    projectedOutputColumnNums: []
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Reduce Sink Vectorization:
+                      className: VectorReduceSinkObjectHashOperator
+                      keyColumnNums: [0]
+                      native: true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      valueColumnNums: []
+                  Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 1
+                    dataColumns: KEY.reducesinkkey0:string
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [0]
+                Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 5
+                  Limit Vectorization:
+                      className: VectorLimitOperator
+                      native: true
+                  Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
+                    Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key FROM src GROUP BY key ORDER BY key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0
+10
+100
+103
+104
+PREHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  filterExpr: key is not null (type: boolean)
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                  Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: SelectColumnIsNotNull(col 0:string)
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [0]
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Reduce Sink Vectorization:
+                            className: VectorReduceSinkStringOperator
+                            keyColumnNums: [0]
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            valueColumnNums: []
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
+                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: true
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:string, value:string
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src2
+                  filterExpr: key is not null (type: boolean)
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                  Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: SelectColumnIsNotNull(col 0:string)
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [0, 1]
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Reduce Sink Vectorization:
+                            className: VectorReduceSinkStringOperator
+                            keyColumnNums: [0]
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            valueColumnNums: [1]
+                        Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
+                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: true
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:string, value:string
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string), _col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: string)
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    dataColumns: KEY.reducesinkkey0:string, VALUE._col0:string
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [0, 1]
+                Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 5
+                  Limit Vectorization:
+                      className: VectorLimitOperator
+                      native: true
+                  Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
+                    Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 JOIN src src2 ON (src1.key = src2.key) ORDER BY src1.key LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out b/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
index e79cdf7..f7c00f8 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
@@ -491,31 +491,40 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumnNums: [0]
                     Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:tinyint
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
+                    Top N Key Operator
+                      sort order: +
                       keys: ctinyint (type: tinyint)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: tinyint)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: tinyint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            keyColumnNums: [0]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            partitionColumnNums: [0]
-                            valueColumnNums: []
-                        Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
+                      Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
+                      top n: 20
+                      Top N Key Vectorization:
+                          className: VectorTopNKeyOperator
+                          keyExpressions: col 0:tinyint
+                          native: true
+                      Group By Operator
+                        Group By Vectorization:
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 0:tinyint
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: []
+                        keys: ctinyint (type: tinyint)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 131 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: tinyint)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: tinyint)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkObjectHashOperator
+                              keyColumnNums: [0]
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              partitionColumnNums: [0]
+                              valueColumnNums: []
+                          Statistics: Num rows: 131 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.3
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -560,19 +569,19 @@ STAGE PLANS:
                 keys: KEY._col0 (type: tinyint)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 131 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 20
                   Limit Vectorization:
                       className: VectorLimitOperator
                       native: true
-                  Statistics: Num rows: 20 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 20 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
                     File Sink Vectorization:
                         className: VectorFileSinkOperator
                         native: false
-                    Statistics: Num rows: 20 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 20 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query10.q.out b/ql/src/test/results/clientpositive/perf/tez/query10.q.out
index 1b6adee..a8f097f 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query10.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query10.q.out
@@ -140,188 +140,190 @@ Stage-0
     limit:100
     Stage-1
       Reducer 6 vectorized
-      File Output Operator [FS_224]
-        Limit [LIM_223] (rows=100 width=88)
+      File Output Operator [FS_225]
+        Limit [LIM_224] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_222] (rows=1045432122 width=88)
+          Select Operator [SEL_223] (rows=1045432122 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
           <-Reducer 5 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_221]
-              Select Operator [SEL_220] (rows=1045432122 width=88)
+            SHUFFLE [RS_222]
+              Select Operator [SEL_221] (rows=1045432122 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col8","_col10","_col12"]
-                Group By Operator [GBY_219] (rows=1045432122 width=88)
+                Group By Operator [GBY_220] (rows=1045432122 width=88)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6, KEY._col7
                 <-Reducer 4 [SIMPLE_EDGE]
                   SHUFFLE [RS_63]
                     PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                     Group By Operator [GBY_62] (rows=2090864244 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["count()"],keys:_col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-                      Select Operator [SEL_61] (rows=2090864244 width=88)
-                        Output:["_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
-                        Filter Operator [FIL_60] (rows=2090864244 width=88)
-                          predicate:(_col15 is not null or _col17 is not null)
-                          Merge Join Operator [MERGEJOIN_172] (rows=2090864244 width=88)
-                            Conds:RS_55._col0=RS_56._col0(Left Semi),RS_55._col0=RS_210._col0(Left Outer),RS_55._col0=RS_218._col0(Left Outer),Output:["_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col15","_col17"]
-                          <-Reducer 3 [SIMPLE_EDGE]
-                            PARTITION_ONLY_SHUFFLE [RS_55]
-                              PartitionCols:_col0
-                              Merge Join Operator [MERGEJOIN_168] (rows=96800003 width=860)
-                                Conds:RS_50._col1=RS_181._col0(Inner),Output:["_col0","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
-                              <-Map 9 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_181]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_180] (rows=1861800 width=385)
-                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
-                                    Filter Operator [FIL_179] (rows=1861800 width=385)
-                                      predicate:cd_demo_sk is not null
-                                      TableScan [TS_6] (rows=1861800 width=385)
-                                        default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status","cd_purchase_estimate","cd_credit_rating","cd_dep_count","cd_dep_employed_count","cd_dep_college_count"]
-                              <-Reducer 2 [SIMPLE_EDGE]
-                                SHUFFLE [RS_50]
-                                  PartitionCols:_col1
-                                  Merge Join Operator [MERGEJOIN_167] (rows=88000001 width=860)
-                                    Conds:RS_175._col2=RS_178._col0(Inner),Output:["_col0","_col1"]
-                                  <-Map 1 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_175]
-                                      PartitionCols:_col2
-                                      Select Operator [SEL_174] (rows=80000000 width=860)
-                                        Output:["_col0","_col1","_col2"]
-                                        Filter Operator [FIL_173] (rows=80000000 width=860)
-                                          predicate:(c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_customer_sk is not null)
-                                          TableScan [TS_0] (rows=80000000 width=860)
-                                            default@customer,c,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_addr_sk"]
-                                  <-Map 8 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_178]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_177] (rows=20000000 width=1014)
-                                        Output:["_col0"]
-                                        Filter Operator [FIL_176] (rows=20000000 width=1014)
-                                          predicate:((ca_county) IN ('Walker County', 'Richland County', 'Gaines County', 'Douglas County', 'Dona Ana County') and ca_address_sk is not null)
-                                          TableScan [TS_3] (rows=40000000 width=1014)
-                                            default@customer_address,ca,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
-                          <-Reducer 11 [SIMPLE_EDGE]
-                            SHUFFLE [RS_56]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_54] (rows=633595212 width=88)
-                                Output:["_col0"],keys:_col0
-                                Select Operator [SEL_18] (rows=633595212 width=88)
-                                  Output:["_col0"]
-                                  Merge Join Operator [MERGEJOIN_169] (rows=633595212 width=88)
-                                    Conds:RS_202._col0=RS_184._col0(Inner),Output:["_col1"]
-                                  <-Map 12 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_184]
+                      Top N Key Operator [TNK_103] (rows=2090864244 width=88)
+                        keys:_col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13,sort order:++++++++,top n:100
+                        Select Operator [SEL_61] (rows=2090864244 width=88)
+                          Output:["_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
+                          Filter Operator [FIL_60] (rows=2090864244 width=88)
+                            predicate:(_col15 is not null or _col17 is not null)
+                            Merge Join Operator [MERGEJOIN_173] (rows=2090864244 width=88)
+                              Conds:RS_55._col0=RS_56._col0(Left Semi),RS_55._col0=RS_211._col0(Left Outer),RS_55._col0=RS_219._col0(Left Outer),Output:["_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col15","_col17"]
+                            <-Reducer 3 [SIMPLE_EDGE]
+                              PARTITION_ONLY_SHUFFLE [RS_55]
+                                PartitionCols:_col0
+                                Merge Join Operator [MERGEJOIN_169] (rows=96800003 width=860)
+                                  Conds:RS_50._col1=RS_182._col0(Inner),Output:["_col0","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
+                                <-Map 9 [SIMPLE_EDGE] vectorized
+                                  SHUFFLE [RS_182]
+                                    PartitionCols:_col0
+                                    Select Operator [SEL_181] (rows=1861800 width=385)
+                                      Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
+                                      Filter Operator [FIL_180] (rows=1861800 width=385)
+                                        predicate:cd_demo_sk is not null
+                                        TableScan [TS_6] (rows=1861800 width=385)
+                                          default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status","cd_purchase_estimate","cd_credit_rating","cd_dep_count","cd_dep_employed_count","cd_dep_college_count"]
+                                <-Reducer 2 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_50]
+                                    PartitionCols:_col1
+                                    Merge Join Operator [MERGEJOIN_168] (rows=88000001 width=860)
+                                      Conds:RS_176._col2=RS_179._col0(Inner),Output:["_col0","_col1"]
+                                    <-Map 1 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_176]
+                                        PartitionCols:_col2
+                                        Select Operator [SEL_175] (rows=80000000 width=860)
+                                          Output:["_col0","_col1","_col2"]
+                                          Filter Operator [FIL_174] (rows=80000000 width=860)
+                                            predicate:(c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_customer_sk is not null)
+                                            TableScan [TS_0] (rows=80000000 width=860)
+                                              default@customer,c,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_addr_sk"]
+                                    <-Map 8 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_179]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_178] (rows=20000000 width=1014)
+                                          Output:["_col0"]
+                                          Filter Operator [FIL_177] (rows=20000000 width=1014)
+                                            predicate:((ca_county) IN ('Walker County', 'Richland County', 'Gaines County', 'Douglas County', 'Dona Ana County') and ca_address_sk is not null)
+                                            TableScan [TS_3] (rows=40000000 width=1014)
+                                              default@customer_address,ca,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
+                            <-Reducer 11 [SIMPLE_EDGE]
+                              SHUFFLE [RS_56]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_54] (rows=633595212 width=88)
+                                  Output:["_col0"],keys:_col0
+                                  Select Operator [SEL_18] (rows=633595212 width=88)
+                                    Output:["_col0"]
+                                    Merge Join Operator [MERGEJOIN_170] (rows=633595212 width=88)
+                                      Conds:RS_203._col0=RS_185._col0(Inner),Output:["_col1"]
+                                    <-Map 12 [SIMPLE_EDGE] vectorized
+                                      PARTITION_ONLY_SHUFFLE [RS_185]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_184] (rows=4058 width=1119)
+                                          Output:["_col0"]
+                                          Filter Operator [FIL_183] (rows=4058 width=1119)
+                                            predicate:((d_year = 2002) and d_date_sk is not null and d_moy BETWEEN 4 AND 7)
+                                            TableScan [TS_12] (rows=73049 width=1119)
+                                              default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                    <-Map 10 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_203]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_202] (rows=575995635 width=88)
+                                          Output:["_col0","_col1"]
+                                          Filter Operator [FIL_201] (rows=575995635 width=88)
+                                            predicate:((ss_customer_sk BETWEEN DynamicValue(RS_55_c_c_customer_sk_min) AND DynamicValue(RS_55_c_c_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_55_c_c_customer_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_16_date_dim_d_date_sk_min) AND DynamicValue(RS_16_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_16_date_dim_d_date_sk_bloom_filter))) and ss_customer_sk is not null and ss_sold_date_sk is not null)
+                                            TableScan [TS_9] (rows=575995635 width=88)
+                                              default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk"]
+                                            <-Reducer 13 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_198]
+                                                Group By Operator [GBY_197] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  PARTITION_ONLY_SHUFFLE [RS_194]
+                                                    Group By Operator [GBY_191] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_186] (rows=4058 width=1119)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_184]
+                                            <-Reducer 7 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_200]
+                                                Group By Operator [GBY_199] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=96800000)"]
+                                                <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
+                                                  PARTITION_ONLY_SHUFFLE [RS_136]
+                                                    Group By Operator [GBY_135] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=96800000)"]
+                                                      Select Operator [SEL_134] (rows=96800003 width=860)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_169]
+                            <-Reducer 15 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_211]
+                                PartitionCols:_col0
+                                Select Operator [SEL_210] (rows=79201469 width=135)
+                                  Output:["_col0","_col1"]
+                                  Group By Operator [GBY_209] (rows=79201469 width=135)
+                                    Output:["_col0"],keys:KEY._col0
+                                  <-Reducer 14 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_30]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_183] (rows=4058 width=1119)
-                                        Output:["_col0"]
-                                        Filter Operator [FIL_182] (rows=4058 width=1119)
-                                          predicate:((d_year = 2002) and d_date_sk is not null and d_moy BETWEEN 4 AND 7)
-                                          TableScan [TS_12] (rows=73049 width=1119)
-                                            default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                                  <-Map 10 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_202]
+                                      Group By Operator [GBY_29] (rows=158402938 width=135)
+                                        Output:["_col0"],keys:_col1
+                                        Merge Join Operator [MERGEJOIN_171] (rows=158402938 width=135)
+                                          Conds:RS_208._col0=RS_187._col0(Inner),Output:["_col1"]
+                                        <-Map 12 [SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_187]
+                                            PartitionCols:_col0
+                                             Please refer to the previous Select Operator [SEL_184]
+                                        <-Map 20 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_208]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_207] (rows=144002668 width=135)
+                                              Output:["_col0","_col1"]
+                                              Filter Operator [FIL_206] (rows=144002668 width=135)
+                                                predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_26_date_dim_d_date_sk_min) AND DynamicValue(RS_26_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_26_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_sold_date_sk is not null)
+                                                TableScan [TS_19] (rows=144002668 width=135)
+                                                  default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_customer_sk"]
+                                                <-Reducer 16 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_205]
+                                                    Group By Operator [GBY_204] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_195]
+                                                        Group By Operator [GBY_192] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_188] (rows=4058 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_184]
+                            <-Reducer 18 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_219]
+                                PartitionCols:_col0
+                                Select Operator [SEL_218] (rows=158394413 width=135)
+                                  Output:["_col0","_col1"]
+                                  Group By Operator [GBY_217] (rows=158394413 width=135)
+                                    Output:["_col0"],keys:KEY._col0
+                                  <-Reducer 17 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_44]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_201] (rows=575995635 width=88)
-                                        Output:["_col0","_col1"]
-                                        Filter Operator [FIL_200] (rows=575995635 width=88)
-                                          predicate:((ss_customer_sk BETWEEN DynamicValue(RS_55_c_c_customer_sk_min) AND DynamicValue(RS_55_c_c_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_55_c_c_customer_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_16_date_dim_d_date_sk_min) AND DynamicValue(RS_16_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_16_date_dim_d_date_sk_bloom_filter))) and ss_customer_sk is not null and ss_sold_date_sk is not null)
-                                          TableScan [TS_9] (rows=575995635 width=88)
-                                            default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk"]
-                                          <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_197]
-                                              Group By Operator [GBY_196] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                PARTITION_ONLY_SHUFFLE [RS_193]
-                                                  Group By Operator [GBY_190] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_185] (rows=4058 width=1119)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_183]
-                                          <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_199]
-                                              Group By Operator [GBY_198] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=96800000)"]
-                                              <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
-                                                PARTITION_ONLY_SHUFFLE [RS_135]
-                                                  Group By Operator [GBY_134] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=96800000)"]
-                                                    Select Operator [SEL_133] (rows=96800003 width=860)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Merge Join Operator [MERGEJOIN_168]
-                          <-Reducer 15 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_210]
-                              PartitionCols:_col0
-                              Select Operator [SEL_209] (rows=79201469 width=135)
-                                Output:["_col0","_col1"]
-                                Group By Operator [GBY_208] (rows=79201469 width=135)
-                                  Output:["_col0"],keys:KEY._col0
-                                <-Reducer 14 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_30]
-                                    PartitionCols:_col0
-                                    Group By Operator [GBY_29] (rows=158402938 width=135)
-                                      Output:["_col0"],keys:_col1
-                                      Merge Join Operator [MERGEJOIN_170] (rows=158402938 width=135)
-                                        Conds:RS_207._col0=RS_186._col0(Inner),Output:["_col1"]
-                                      <-Map 12 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_186]
-                                          PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_183]
-                                      <-Map 20 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_207]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_206] (rows=144002668 width=135)
-                                            Output:["_col0","_col1"]
-                                            Filter Operator [FIL_205] (rows=144002668 width=135)
-                                              predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_26_date_dim_d_date_sk_min) AND DynamicValue(RS_26_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_26_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_sold_date_sk is not null)
-                                              TableScan [TS_19] (rows=144002668 width=135)
-                                                default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_customer_sk"]
-                                              <-Reducer 16 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_204]
-                                                  Group By Operator [GBY_203] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_194]
-                                                      Group By Operator [GBY_191] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_187] (rows=4058 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_183]
-                          <-Reducer 18 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_218]
-                              PartitionCols:_col0
-                              Select Operator [SEL_217] (rows=158394413 width=135)
-                                Output:["_col0","_col1"]
-                                Group By Operator [GBY_216] (rows=158394413 width=135)
-                                  Output:["_col0"],keys:KEY._col0
-                                <-Reducer 17 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_44]
-                                    PartitionCols:_col0
-                                    Group By Operator [GBY_43] (rows=316788826 width=135)
-                                      Output:["_col0"],keys:_col1
-                                      Merge Join Operator [MERGEJOIN_171] (rows=316788826 width=135)
-                                        Conds:RS_215._col0=RS_188._col0(Inner),Output:["_col1"]
-                                      <-Map 12 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_188]
-                                          PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_183]
-                                      <-Map 21 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_215]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_214] (rows=287989836 width=135)
-                                            Output:["_col0","_col1"]
-                                            Filter Operator [FIL_213] (rows=287989836 width=135)
-                                              predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_40_date_dim_d_date_sk_min) AND DynamicValue(RS_40_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_40_date_dim_d_date_sk_bloom_filter))) and cs_ship_customer_sk is not null and cs_sold_date_sk is not null)
-                                              TableScan [TS_33] (rows=287989836 width=135)
-                                                default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_ship_customer_sk"]
-                                              <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_212]
-                                                  Group By Operator [GBY_211] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_195]
-                                                      Group By Operator [GBY_192] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_189] (rows=4058 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_183]
+                                      Group By Operator [GBY_43] (rows=316788826 width=135)
+                                        Output:["_col0"],keys:_col1
+                                        Merge Join Operator [MERGEJOIN_172] (rows=316788826 width=135)
+                                          Conds:RS_216._col0=RS_189._col0(Inner),Output:["_col1"]
+                                        <-Map 12 [SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_189]
+                                            PartitionCols:_col0
+                                             Please refer to the previous Select Operator [SEL_184]
+                                        <-Map 21 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_216]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_215] (rows=287989836 width=135)
+                                              Output:["_col0","_col1"]
+                                              Filter Operator [FIL_214] (rows=287989836 width=135)
+                                                predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_40_date_dim_d_date_sk_min) AND DynamicValue(RS_40_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_40_date_dim_d_date_sk_bloom_filter))) and cs_ship_customer_sk is not null and cs_sold_date_sk is not null)
+                                                TableScan [TS_33] (rows=287989836 width=135)
+                                                  default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_ship_customer_sk"]
+                                                <-Reducer 19 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_213]
+                                                    Group By Operator [GBY_212] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_196]
+                                                        Group By Operator [GBY_193] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_190] (rows=4058 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_184]
 


[37/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index 0000000,d53e7fc..e81ea2c
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@@ -1,0 -1,1218 +1,1220 @@@
+ -- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+ --
+ -- Host: localhost    Database: test
+ -- ------------------------------------------------------
+ -- Server version	5.5.25
+ 
+ /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+ /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+ /*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+ /*!40101 SET NAMES utf8 */;
+ /*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+ /*!40103 SET TIME_ZONE='+00:00' */;
+ /*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+ /*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+ /*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+ /*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+ 
+ --
+ -- Table structure for table `BUCKETING_COLS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+   KEY `BUCKETING_COLS_N49` (`SD_ID`),
+   CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `CDS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `CDS` (
+   `CD_ID` bigint(20) NOT NULL,
+   PRIMARY KEY (`CD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `COLUMNS_V2`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+   `CD_ID` bigint(20) NOT NULL,
+   `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+   KEY `COLUMNS_V2_N49` (`CD_ID`),
+   CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `DATABASE_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+   `DB_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+   KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+   CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ CREATE TABLE `CTLGS` (
+     `CTLG_ID` BIGINT PRIMARY KEY,
+     `NAME` VARCHAR(256),
+     `DESC` VARCHAR(4000),
+     `LOCATION_URI` VARCHAR(4000) NOT NULL,
+     UNIQUE KEY `UNIQUE_CATALOG` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ 
+ --
+ -- Table structure for table `DBS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `DBS` (
+   `DB_ID` bigint(20) NOT NULL,
+   `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `CTLG_NAME` varchar(256) NOT NULL,
+   PRIMARY KEY (`DB_ID`),
+   UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`),
+   CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `DB_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+   `DB_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `DB_ID` bigint(20) DEFAULT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`DB_GRANT_ID`),
+   UNIQUE KEY `DBPRIVILEGEINDEX` (`AUTHORIZER`,`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `DB_PRIVS_N49` (`DB_ID`),
+   CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `GLOBAL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+   `USER_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`USER_GRANT_ID`),
+   UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`AUTHORIZER`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `IDXS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `IDXS` (
+   `INDEX_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `DEFERRED_REBUILD` bit(1) NOT NULL,
+   `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+   `LAST_ACCESS_TIME` int(11) NOT NULL,
+   `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+   `SD_ID` bigint(20) DEFAULT NULL,
+   PRIMARY KEY (`INDEX_ID`),
+   UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+   KEY `IDXS_N51` (`SD_ID`),
+   KEY `IDXS_N50` (`INDEX_TBL_ID`),
+   KEY `IDXS_N49` (`ORIG_TBL_ID`),
+   CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+   CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+   CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `INDEX_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+   `INDEX_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+   KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+   CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `NUCLEUS_TABLES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+   `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`CLASS_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITIONS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+   `PART_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `LAST_ACCESS_TIME` int(11) NOT NULL,
+   `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SD_ID` bigint(20) DEFAULT NULL,
+   `TBL_ID` bigint(20) DEFAULT NULL,
++  `WRITE_ID` bigint(20) DEFAULT 0,
+   PRIMARY KEY (`PART_ID`),
+   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+   KEY `PARTITIONS_N49` (`TBL_ID`),
+   KEY `PARTITIONS_N50` (`SD_ID`),
+   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+   CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_EVENTS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+   `PART_NAME_ID` bigint(20) NOT NULL,
+   `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `EVENT_TIME` bigint(20) NOT NULL,
+   `EVENT_TYPE` int(11) NOT NULL,
+   `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_NAME_ID`),
+   KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_KEYS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+   `TBL_ID` bigint(20) NOT NULL,
+   `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+   KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+   CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_KEY_VALS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+   `PART_ID` bigint(20) NOT NULL,
+   `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+   KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+   CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+   `PART_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+   KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+   CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PART_COL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+   `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_ID` bigint(20) DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+   KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+   KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`AUTHORIZER`,`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PART_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+   `PART_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_ID` bigint(20) DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_GRANT_ID`),
+   KEY `PARTPRIVILEGEINDEX` (`AUTHORIZER`,`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `PART_PRIVS_N49` (`PART_ID`),
+   CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `ROLES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `ROLES` (
+   `ROLE_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`ROLE_ID`),
+   UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `ROLE_MAP`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+   `ROLE_GRANT_ID` bigint(20) NOT NULL,
+   `ADD_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `ROLE_ID` bigint(20) DEFAULT NULL,
+   PRIMARY KEY (`ROLE_GRANT_ID`),
+   UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `ROLE_MAP_N49` (`ROLE_ID`),
+   CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SDS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SDS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `CD_ID` bigint(20) DEFAULT NULL,
+   `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `IS_COMPRESSED` bit(1) NOT NULL,
+   `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+   `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `NUM_BUCKETS` int(11) NOT NULL,
+   `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SERDE_ID` bigint(20) DEFAULT NULL,
+   PRIMARY KEY (`SD_ID`),
+   KEY `SDS_N49` (`SERDE_ID`),
+   KEY `SDS_N50` (`CD_ID`),
+   CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+   CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SD_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+   KEY `SD_PARAMS_N49` (`SD_ID`),
+   CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SEQUENCE_TABLE`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+   `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `NEXT_VAL` bigint(20) NOT NULL,
+   PRIMARY KEY (`SEQUENCE_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ --
+ -- Table structure for table `SERDES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SERDES` (
+   `SERDE_ID` bigint(20) NOT NULL,
+   `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DESCRIPTION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DESERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SERDE_TYPE` integer,
+   PRIMARY KEY (`SERDE_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SERDE_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+   `SERDE_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+   KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+   CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_COL_NAMES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+   `SD_ID` bigint(20) NOT NULL,
+   `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+   KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+   CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+   `SD_ID` bigint(20) NOT NULL,
+   `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+   `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+   KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+   KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+   CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+   CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_STRING_LIST`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+   `STRING_LIST_ID` bigint(20) NOT NULL,
+   PRIMARY KEY (`STRING_LIST_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_STRING_LIST_VALUES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+   `STRING_LIST_ID` bigint(20) NOT NULL,
+   `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+   KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+   CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_VALUES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+   `SD_ID_OID` bigint(20) NOT NULL,
+   `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+   KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+   KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+   CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+   CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SORT_COLS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `ORDER` int(11) NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+   KEY `SORT_COLS_N49` (`SD_ID`),
+   CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TABLE_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+   `TBL_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+   KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+   CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `MV_CREATION_METADATA`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` (
+   `MV_CREATION_METADATA_ID` bigint(20) NOT NULL,
+   `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TXN_LIST` TEXT DEFAULT NULL,
+   `MATERIALIZATION_TIME` bigint(20) NOT NULL,
+   PRIMARY KEY (`MV_CREATION_METADATA_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME, DB_NAME) USING BTREE;
+ 
+ --
+ -- Table structure for table `TBLS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TBLS` (
+   `TBL_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `DB_ID` bigint(20) DEFAULT NULL,
+   `LAST_ACCESS_TIME` int(11) NOT NULL,
+   `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `RETENTION` int(11) NOT NULL,
+   `SD_ID` bigint(20) DEFAULT NULL,
+   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `VIEW_EXPANDED_TEXT` mediumtext,
+   `VIEW_ORIGINAL_TEXT` mediumtext,
+   `IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0,
++  `WRITE_ID` bigint(20) DEFAULT 0,
+   PRIMARY KEY (`TBL_ID`),
+   UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+   KEY `TBLS_N50` (`SD_ID`),
+   KEY `TBLS_N49` (`DB_ID`),
+   CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+   CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `MV_TABLES_USED`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `MV_TABLES_USED` (
+   `MV_CREATION_METADATA_ID` bigint(20) NOT NULL,
+   `TBL_ID` bigint(20) NOT NULL,
+   CONSTRAINT `MV_TABLES_USED_FK1` FOREIGN KEY (`MV_CREATION_METADATA_ID`) REFERENCES `MV_CREATION_METADATA` (`MV_CREATION_METADATA_ID`),
+   CONSTRAINT `MV_TABLES_USED_FK2` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TBL_COL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+   `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_ID` bigint(20) DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+   KEY `TABLECOLUMNPRIVILEGEINDEX` (`AUTHORIZER`,`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+   CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TBL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+   `TBL_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_ID` bigint(20) DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TBL_GRANT_ID`),
+   KEY `TBL_PRIVS_N49` (`TBL_ID`),
+   KEY `TABLEPRIVILEGEINDEX` (`AUTHORIZER`,`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TAB_COL_STATS`
+ --
+ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+  `CS_ID` bigint(20) NOT NULL,
+  `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TBL_ID` bigint(20) NOT NULL,
+  `LONG_LOW_VALUE` bigint(20),
+  `LONG_HIGH_VALUE` bigint(20),
+  `DOUBLE_HIGH_VALUE` double(53,4),
+  `DOUBLE_LOW_VALUE` double(53,4),
+  `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `NUM_NULLS` bigint(20) NOT NULL,
+  `NUM_DISTINCTS` bigint(20),
+  `BIT_VECTOR` blob,
+  `AVG_COL_LEN` double(53,4),
+  `MAX_COL_LEN` bigint(20),
+  `NUM_TRUES` bigint(20),
+  `NUM_FALSES` bigint(20),
+  `LAST_ANALYZED` bigint(20) NOT NULL,
+   PRIMARY KEY (`CS_ID`),
+   CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME) USING BTREE;
+ --
+ -- Table structure for table `PART_COL_STATS`
+ --
+ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+  `CS_ID` bigint(20) NOT NULL,
+  `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PART_ID` bigint(20) NOT NULL,
+  `LONG_LOW_VALUE` bigint(20),
+  `LONG_HIGH_VALUE` bigint(20),
+  `DOUBLE_HIGH_VALUE` double(53,4),
+  `DOUBLE_LOW_VALUE` double(53,4),
+  `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `NUM_NULLS` bigint(20) NOT NULL,
+  `NUM_DISTINCTS` bigint(20),
+  `BIT_VECTOR` blob,
+  `AVG_COL_LEN` double(53,4),
+  `MAX_COL_LEN` bigint(20),
+  `NUM_TRUES` bigint(20),
+  `NUM_FALSES` bigint(20),
+  `LAST_ANALYZED` bigint(20) NOT NULL,
+   PRIMARY KEY (`CS_ID`),
+   CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;
+ 
+ --
+ -- Table structure for table `TYPES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TYPES` (
+   `TYPES_ID` bigint(20) NOT NULL,
+   `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TYPES_ID`),
+   UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TYPE_FIELDS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+   `TYPE_NAME` bigint(20) NOT NULL,
+   `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+   KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+   CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ -- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+ CREATE TABLE IF NOT EXISTS `MASTER_KEYS`
+ (
+     `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+     `MASTER_KEY` VARCHAR(767) BINARY NULL,
+     PRIMARY KEY (`KEY_ID`)
+ ) ENGINE=INNODB DEFAULT CHARSET=latin1;
+ 
+ -- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+ CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+ (
+     `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+     `TOKEN` VARCHAR(767) BINARY NULL,
+     PRIMARY KEY (`TOKEN_IDENT`)
+ ) ENGINE=INNODB DEFAULT CHARSET=latin1;
+ 
+ --
+ -- Table structure for VERSION
+ --
+ CREATE TABLE IF NOT EXISTS `VERSION` (
+   `VER_ID` BIGINT NOT NULL,
+   `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+   `VERSION_COMMENT` VARCHAR(255),
+   PRIMARY KEY (`VER_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ --
+ -- Table structure for table FUNCS
+ --
+ CREATE TABLE IF NOT EXISTS `FUNCS` (
+   `FUNC_ID` BIGINT(20) NOT NULL,
+   `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+   `CREATE_TIME` INT(11) NOT NULL,
+   `DB_ID` BIGINT(20),
+   `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+   `FUNC_TYPE` INT(11) NOT NULL,
+   `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+   `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+   PRIMARY KEY (`FUNC_ID`),
+   UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+   KEY `FUNCS_N49` (`DB_ID`),
+   CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ --
+ -- Table structure for table FUNC_RU
+ --
+ CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+   `FUNC_ID` BIGINT(20) NOT NULL,
+   `RESOURCE_TYPE` INT(11) NOT NULL,
+   `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+   `INTEGER_IDX` INT(11) NOT NULL,
+   PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+   CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG`
+ (
+     `NL_ID` BIGINT(20) NOT NULL,
+     `EVENT_ID` BIGINT(20) NOT NULL,
+     `EVENT_TIME` INT(11) NOT NULL,
+     `EVENT_TYPE` varchar(32) NOT NULL,
+     `CAT_NAME` varchar(256),
+     `DB_NAME` varchar(128),
+     `TBL_NAME` varchar(256),
+     `MESSAGE` longtext,
+     `MESSAGE_FORMAT` varchar(16),
+     PRIMARY KEY (`NL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE`
+ (
+     `NNI_ID` BIGINT(20) NOT NULL,
+     `NEXT_EVENT_ID` BIGINT(20) NOT NULL,
+     PRIMARY KEY (`NNI_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT * from (select 1 as `NNI_ID`, 1 as `NOTIFICATION_SEQUENCE`) a WHERE (SELECT COUNT(*) FROM `NOTIFICATION_SEQUENCE`) = 0;
+ 
+ CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS`
+ (
+   `CHILD_CD_ID` BIGINT,
+   `CHILD_INTEGER_IDX` INT(11),
+   `CHILD_TBL_ID` BIGINT,
+   `PARENT_CD_ID` BIGINT,
+   `PARENT_INTEGER_IDX` INT(11) NOT NULL,
+   `PARENT_TBL_ID` BIGINT NOT NULL,
+   `POSITION` BIGINT NOT NULL,
+   `CONSTRAINT_NAME` VARCHAR(400) NOT NULL,
+   `CONSTRAINT_TYPE` SMALLINT(6)  NOT NULL,
+   `UPDATE_RULE` SMALLINT(6),
+   `DELETE_RULE` SMALLINT(6),
+   `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL,
+   `DEFAULT_VALUE` VARCHAR(400),
+   PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE;
+ 
+ CREATE INDEX `CONSTRAINTS_CONSTRAINT_TYPE_INDEX` ON KEY_CONSTRAINTS (`CONSTRAINT_TYPE`) USING BTREE;
+ 
+ -- -----------------------------
+ -- Metastore DB Properties table
+ -- -----------------------------
+ CREATE TABLE IF NOT EXISTS `METASTORE_DB_PROPERTIES` (
+   `PROPERTY_KEY` varchar(255) NOT NULL,
+   `PROPERTY_VALUE` varchar(1000) NOT NULL,
+   `DESCRIPTION` varchar(1000),
+  PRIMARY KEY(`PROPERTY_KEY`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ 
+ -- ---------------------
+ -- Resource plan tables.
+ -- ---------------------
+ CREATE TABLE IF NOT EXISTS WM_RESOURCEPLAN (
+     `RP_ID` bigint(20) NOT NULL,
+     `NAME` varchar(128) NOT NULL,
+     `QUERY_PARALLELISM` int(11),
+     `STATUS` varchar(20) NOT NULL,
+     `DEFAULT_POOL_ID` bigint(20),
+     PRIMARY KEY (`RP_ID`),
+     UNIQUE KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS WM_POOL
+ (
+     `POOL_ID` bigint(20) NOT NULL,
+     `RP_ID` bigint(20) NOT NULL,
+     `PATH` varchar(767) NOT NULL,
+     `ALLOC_FRACTION` DOUBLE,
+     `QUERY_PARALLELISM` int(11),
+     `SCHEDULING_POLICY` varchar(767),
+     PRIMARY KEY (`POOL_ID`),
+     UNIQUE KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`),
+     CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`);
+ 
+ CREATE TABLE IF NOT EXISTS WM_TRIGGER
+ (
+     `TRIGGER_ID` bigint(20) NOT NULL,
+     `RP_ID` bigint(20) NOT NULL,
+     `NAME` varchar(128) NOT NULL,
+     `TRIGGER_EXPRESSION` varchar(1024),
+     `ACTION_EXPRESSION` varchar(1024),
+     `IS_IN_UNMANAGED` bit(1) NOT NULL DEFAULT 0,
+     PRIMARY KEY (`TRIGGER_ID`),
+     UNIQUE KEY `UNIQUE_WM_TRIGGER` (`RP_ID`, `NAME`),
+     CONSTRAINT `WM_TRIGGER_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS WM_POOL_TO_TRIGGER
+ (
+     `POOL_ID` bigint(20) NOT NULL,
+     `TRIGGER_ID` bigint(20) NOT NULL,
+     PRIMARY KEY (`POOL_ID`, `TRIGGER_ID`),
+     CONSTRAINT `WM_POOL_TO_TRIGGER_FK1` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`),
+     CONSTRAINT `WM_POOL_TO_TRIGGER_FK2` FOREIGN KEY (`TRIGGER_ID`) REFERENCES `WM_TRIGGER` (`TRIGGER_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS WM_MAPPING
+ (
+     `MAPPING_ID` bigint(20) NOT NULL,
+     `RP_ID` bigint(20) NOT NULL,
+     `ENTITY_TYPE` varchar(128) NOT NULL,
+     `ENTITY_NAME` varchar(128) NOT NULL,
+     `POOL_ID` bigint(20),
+     `ORDERING` int,
+     PRIMARY KEY (`MAPPING_ID`),
+     UNIQUE KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`),
+     CONSTRAINT `WM_MAPPING_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`),
+     CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ -- ----------------------------
+ -- Transaction and Lock Tables
+ -- ----------------------------
+ CREATE TABLE TXNS (
+   TXN_ID bigint PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED bigint NOT NULL,
+   TXN_LAST_HEARTBEAT bigint NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar(128),
+   TXN_META_INFO varchar(128),
+   TXN_HEARTBEAT_COUNT int,
+   TXN_TYPE int
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID bigint NOT NULL,
+   TC_DATABASE varchar(128) NOT NULL,
+   TC_TABLE varchar(128),
+   TC_PARTITION varchar(767),
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID bigint,
+   FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID bigint NOT NULL,
+   CTC_DATABASE varchar(128) NOT NULL,
+   CTC_TABLE varchar(256),
+   CTC_PARTITION varchar(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID bigint,
+   CTC_UPDATE_DELETE char(1) NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE;
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID bigint NOT NULL,
+   HL_LOCK_INT_ID bigint NOT NULL,
+   HL_TXNID bigint NOT NULL,
+   HL_DB varchar(128) NOT NULL,
+   HL_TABLE varchar(128),
+   HL_PARTITION varchar(767),
+   HL_LOCK_STATE char(1) not null,
+   HL_LOCK_TYPE char(1) not null,
+   HL_LAST_HEARTBEAT bigint NOT NULL,
+   HL_ACQUIRED_AT bigint,
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT int,
+   HL_AGENT_INFO varchar(128),
+   HL_BLOCKEDBY_EXT_ID bigint,
+   HL_BLOCKEDBY_INT_ID bigint,
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+   KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID bigint PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START bigint,
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID bigint,
+   CQ_META_INFO varbinary(2048),
+   CQ_HADOOP_JOB_ID varchar(32)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID bigint PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START bigint,
+   CC_END bigint,
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID bigint,
+   CC_META_INFO varbinary(2048),
+   CC_HADOOP_JOB_ID varchar(32)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT varchar(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar(128) NOT NULL,
+   WS_TABLE varchar(128) NOT NULL,
+   WS_PARTITION varchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE varchar(128) NOT NULL,
+   T2W_TABLE varchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE varchar(128) NOT NULL,
+   NWI_TABLE varchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+   MRL_TXN_ID bigint NOT NULL,
+   MRL_DB_NAME VARCHAR(128) NOT NULL,
+   MRL_TBL_NAME VARCHAR(256) NOT NULL,
+   MRL_LAST_HEARTBEAT bigint NOT NULL,
+   PRIMARY KEY(MRL_TXN_ID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE `I_SCHEMA` (
+   `SCHEMA_ID` BIGINT PRIMARY KEY,
+   `SCHEMA_TYPE` INTEGER NOT NULL,
+   `NAME` VARCHAR(256),
+   `DB_ID` BIGINT,
+   `COMPATIBILITY` INTEGER NOT NULL,
+   `VALIDATION_LEVEL` INTEGER NOT NULL,
+   `CAN_EVOLVE` bit(1) NOT NULL,
+   `SCHEMA_GROUP` VARCHAR(256),
+   `DESCRIPTION` VARCHAR(4000),
+   FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+   KEY `UNIQUE_NAME` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE `SCHEMA_VERSION` (
+   `SCHEMA_VERSION_ID` bigint primary key,
+   `SCHEMA_ID` BIGINT,
+   `VERSION` INTEGER NOT NULL,
+   `CREATED_AT` BIGINT NOT NULL,
+   `CD_ID` BIGINT, 
+   `STATE` INTEGER NOT NULL,
+   `DESCRIPTION` VARCHAR(4000),
+   `SCHEMA_TEXT` mediumtext,
+   `FINGERPRINT` VARCHAR(256),
+   `SCHEMA_VERSION_NAME` VARCHAR(256),
+   `SERDE_ID` bigint, 
+   FOREIGN KEY (`SCHEMA_ID`) REFERENCES `I_SCHEMA` (`SCHEMA_ID`),
+   FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`),
+   FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+   KEY `UNIQUE_VERSION` (`SCHEMA_ID`, `VERSION`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID bigint primary key,
+   CREATE_TIME bigint NOT NULL,
+   WEIGHT bigint NOT NULL,
+   PAYLOAD blob
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID bigint NOT NULL,
+   WNL_TXNID bigint NOT NULL,
+   WNL_WRITEID bigint NOT NULL,
+   WNL_DATABASE varchar(128) NOT NULL,
+   WNL_TABLE varchar(128) NOT NULL,
+   WNL_PARTITION varchar(1024) NOT NULL,
+   WNL_TABLE_OBJ longtext NOT NULL,
+   WNL_PARTITION_OBJ longtext,
+   WNL_FILES longtext,
+   WNL_EVENT_TIME INT(11) NOT NULL,
+   PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');
+ 
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ /*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+ 
+ /*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+ /*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+ /*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+ /*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+ /*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+ /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+ /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+ 
+ -- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
index 0000000,b3789f9..89265ad
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' ';
+ 
++-- HIVE-19416
++ALTER TABLE TBLS ADD WRITE_ID bigint;
++ALTER TABLE PARTITIONS ADD WRITE_ID bigint;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' ';
+ 


[14/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query14.q.out b/ql/src/test/results/clientpositive/perf/tez/query14.q.out
index 6d996b5..b9efa45 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query14.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query14.q.out
@@ -1,6 +1,6 @@
-Warning: Shuffle Join MERGEJOIN[1454][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 5' is a cross product
-Warning: Shuffle Join MERGEJOIN[1466][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 12' is a cross product
-Warning: Shuffle Join MERGEJOIN[1478][tables = [$hdt$_2, $hdt$_3, $hdt$_1]] in Stage 'Reducer 16' is a cross product
+Warning: Shuffle Join MERGEJOIN[1455][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 5' is a cross product
+Warning: Shuffle Join MERGEJOIN[1468][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 12' is a cross product
+Warning: Shuffle Join MERGEJOIN[1481][tables = [$hdt$_2, $hdt$_3, $hdt$_1]] in Stage 'Reducer 16' is a cross product
 PREHOOK: query: explain
 with  cross_items as
  (select i_item_sk ss_item_sk
@@ -314,1135 +314,1141 @@ Stage-0
     limit:100
     Stage-1
       Reducer 8 vectorized
-      File Output Operator [FS_1739]
-        Limit [LIM_1738] (rows=100 width=237)
+      File Output Operator [FS_1743]
+        Limit [LIM_1742] (rows=100 width=237)
           Number of rows:100
-          Select Operator [SEL_1737] (rows=1016388080 width=237)
+          Select Operator [SEL_1741] (rows=1016388080 width=237)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
           <-Reducer 7 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_1736]
-              Select Operator [SEL_1735] (rows=1016388080 width=237)
+            SHUFFLE [RS_1740]
+              Select Operator [SEL_1739] (rows=1016388080 width=237)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                Group By Operator [GBY_1734] (rows=1016388080 width=237)
+                Group By Operator [GBY_1738] (rows=1016388080 width=237)
                   Output:["_col0","_col1","_col2","_col3","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
                 <-Union 6 [SIMPLE_EDGE]
                   <-Reducer 12 [CONTAINS]
-                    Reduce Output Operator [RS_1471]
+                    Reduce Output Operator [RS_1474]
                       PartitionCols:_col0, _col1, _col2, _col3, _col4
-                      Group By Operator [GBY_1470] (rows=2032776160 width=237)
+                      Group By Operator [GBY_1473] (rows=2032776160 width=237)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3, 0L
-                        Select Operator [SEL_1468] (rows=116155905 width=264)
-                          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                          Filter Operator [FIL_1467] (rows=116155905 width=264)
-                            predicate:(_col5 > _col1)
-                            Merge Join Operator [MERGEJOIN_1466] (rows=348467716 width=264)
-                              Conds:(Inner),(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
-                            <-Reducer 11 [CUSTOM_SIMPLE_EDGE] vectorized
-                              PARTITION_ONLY_SHUFFLE [RS_1745]
-                                Select Operator [SEL_1744] (rows=1 width=8)
-                                  Filter Operator [FIL_1743] (rows=1 width=8)
-                                    predicate:(sq_count_check(_col0) <= 1)
-                                    Group By Operator [GBY_1742] (rows=1 width=8)
-                                      Output:["_col0"],aggregations:["count()"]
-                                      Select Operator [SEL_1741] (rows=1 width=8)
-                                        Group By Operator [GBY_1740] (rows=1 width=8)
-                                          Output:["_col0"],aggregations:["count(VALUE._col0)"]
-                                        <-Union 10 [CUSTOM_SIMPLE_EDGE]
-                                          <-Reducer 19 [CONTAINS]
-                                            Reduce Output Operator [RS_1495]
-                                              Group By Operator [GBY_1494] (rows=1 width=8)
-                                                Output:["_col0"],aggregations:["count(_col0)"]
-                                                Select Operator [SEL_1493] (rows=1108786976 width=108)
-                                                  Output:["_col0"]
-                                                  Select Operator [SEL_1491] (rows=316788826 width=135)
+                        Top N Key Operator [TNK_1472] (rows=406555232 width=237)
+                          keys:_col0, _col1, _col2, _col3, 0L,sort order:+++++,top n:100
+                          Select Operator [SEL_1470] (rows=116155905 width=264)
+                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                            Filter Operator [FIL_1469] (rows=116155905 width=264)
+                              predicate:(_col5 > _col1)
+                              Merge Join Operator [MERGEJOIN_1468] (rows=348467716 width=264)
+                                Conds:(Inner),(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
+                              <-Reducer 11 [CUSTOM_SIMPLE_EDGE] vectorized
+                                PARTITION_ONLY_SHUFFLE [RS_1749]
+                                  Select Operator [SEL_1748] (rows=1 width=8)
+                                    Filter Operator [FIL_1747] (rows=1 width=8)
+                                      predicate:(sq_count_check(_col0) <= 1)
+                                      Group By Operator [GBY_1746] (rows=1 width=8)
+                                        Output:["_col0"],aggregations:["count()"]
+                                        Select Operator [SEL_1745] (rows=1 width=8)
+                                          Group By Operator [GBY_1744] (rows=1 width=8)
+                                            Output:["_col0"],aggregations:["count(VALUE._col0)"]
+                                          <-Union 10 [CUSTOM_SIMPLE_EDGE]
+                                            <-Reducer 19 [CONTAINS]
+                                              Reduce Output Operator [RS_1499]
+                                                Group By Operator [GBY_1498] (rows=1 width=8)
+                                                  Output:["_col0"],aggregations:["count(_col0)"]
+                                                  Select Operator [SEL_1497] (rows=1108786976 width=108)
                                                     Output:["_col0"]
-                                                    Merge Join Operator [MERGEJOIN_1490] (rows=316788826 width=135)
-                                                      Conds:RS_1817._col0=RS_1804._col0(Inner),Output:["_col1"]
-                                                    <-Map 21 [SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_1804]
-                                                        PartitionCols:_col0
-                                                        Select Operator [SEL_1799] (rows=8116 width=1119)
-                                                          Output:["_col0"]
-                                                          Filter Operator [FIL_1798] (rows=8116 width=1119)
-                                                            predicate:(d_date_sk is not null and d_year BETWEEN 1998 AND 2000)
-                                                            TableScan [TS_13] (rows=73049 width=1119)
-                                                              default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                                                    <-Map 17 [SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_1817]
-                                                        PartitionCols:_col0
-                                                        Select Operator [SEL_1815] (rows=287989836 width=135)
-                                                          Output:["_col0","_col1"]
-                                                          Filter Operator [FIL_1814] (rows=287989836 width=135)
-                                                            predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_17_date_dim_d_date_sk_min) AND DynamicValue(RS_17_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_17_date_dim_d_date_sk_bloom_filter))) and cs_sold_date_sk is not null)
-                                                            TableScan [TS_10] (rows=287989836 width=135)
-                                                              default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_quantity"]
-                                                            <-Reducer 22 [BROADCAST_EDGE] vectorized
-                                                              BROADCAST [RS_1813]
-                                                                Group By Operator [GBY_1812] (rows=1 width=12)
-                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                                <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                  SHUFFLE [RS_1810]
-                                                                    Group By Operator [GBY_1808] (rows=1 width=12)
-                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                      Select Operator [SEL_1801] (rows=8116 width=1119)
-                                                                        Output:["_col0"]
-                                                                         Please refer to the previous Select Operator [SEL_1799]
-                                          <-Reducer 35 [CONTAINS]
-                                            Reduce Output Operator [RS_1531]
-                                              Group By Operator [GBY_1530] (rows=1 width=8)
-                                                Output:["_col0"],aggregations:["count(_col0)"]
-                                                Select Operator [SEL_1529] (rows=1108786976 width=108)
-                                                  Output:["_col0"]
-                                                  Select Operator [SEL_1527] (rows=158402938 width=135)
+                                                    Select Operator [SEL_1495] (rows=316788826 width=135)
+                                                      Output:["_col0"]
+                                                      Merge Join Operator [MERGEJOIN_1494] (rows=316788826 width=135)
+                                                        Conds:RS_1821._col0=RS_1808._col0(Inner),Output:["_col1"]
+                                                      <-Map 21 [SIMPLE_EDGE] vectorized
+                                                        SHUFFLE [RS_1808]
+                                                          PartitionCols:_col0
+                                                          Select Operator [SEL_1803] (rows=8116 width=1119)
+                                                            Output:["_col0"]
+                                                            Filter Operator [FIL_1802] (rows=8116 width=1119)
+                                                              predicate:(d_date_sk is not null and d_year BETWEEN 1998 AND 2000)
+                                                              TableScan [TS_13] (rows=73049 width=1119)
+                                                                default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                                                      <-Map 17 [SIMPLE_EDGE] vectorized
+                                                        SHUFFLE [RS_1821]
+                                                          PartitionCols:_col0
+                                                          Select Operator [SEL_1819] (rows=287989836 width=135)
+                                                            Output:["_col0","_col1"]
+                                                            Filter Operator [FIL_1818] (rows=287989836 width=135)
+                                                              predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_17_date_dim_d_date_sk_min) AND DynamicValue(RS_17_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_17_date_dim_d_date_sk_bloom_filter))) and cs_sold_date_sk is not null)
+                                                              TableScan [TS_10] (rows=287989836 width=135)
+                                                                default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_quantity"]
+                                                              <-Reducer 22 [BROADCAST_EDGE] vectorized
+                                                                BROADCAST [RS_1817]
+                                                                  Group By Operator [GBY_1816] (rows=1 width=12)
+                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                  <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                    SHUFFLE [RS_1814]
+                                                                      Group By Operator [GBY_1812] (rows=1 width=12)
+                                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                        Select Operator [SEL_1805] (rows=8116 width=1119)
+                                                                          Output:["_col0"]
+                                                                           Please refer to the previous Select Operator [SEL_1803]
+                                            <-Reducer 35 [CONTAINS]
+                                              Reduce Output Operator [RS_1535]
+                                                Group By Operator [GBY_1534] (rows=1 width=8)
+                                                  Output:["_col0"],aggregations:["count(_col0)"]
+                                                  Select Operator [SEL_1533] (rows=1108786976 width=108)
                                                     Output:["_col0"]
-                                                    Merge Join Operator [MERGEJOIN_1526] (rows=158402938 width=135)
-                                                      Conds:RS_1845._col0=RS_1832._col0(Inner),Output:["_col1"]
-                                                    <-Map 37 [SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_1832]
-                                                        PartitionCols:_col0
-                                                        Select Operator [SEL_1827] (rows=8116 width=1119)
-                                                          Output:["_col0"]
-                                                          Filter Operator [FIL_1826] (rows=8116 width=1119)
-                                                            predicate:(d_date_sk is not null and d_year BETWEEN 1998 AND 2000)
-                                                            TableScan [TS_24] (rows=73049 width=1119)
-                                                              default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                                                    <-Map 33 [SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_1845]
-                                                        PartitionCols:_col0
-                                                        Select Operator [SEL_1843] (rows=144002668 width=135)
-                                                          Output:["_col0","_col1"]
-                                                          Filter Operator [FIL_1842] (rows=144002668 width=135)
-                                                            predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_28_date_dim_d_date_sk_min) AND DynamicValue(RS_28_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_28_date_dim_d_date_sk_bloom_filter))) and ws_sold_date_sk is not null)
-                                                            TableScan [TS_21] (rows=144002668 width=135)
-                                                              default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_quantity"]
-                                                            <-Reducer 38 [BROADCAST_EDGE] vectorized
-                                                              BROADCAST [RS_1841]
-                                                                Group By Operator [GBY_1840] (rows=1 width=12)
-                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                                <-Map 37 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                  SHUFFLE [RS_1838]
-                                                                    Group By Operator [GBY_1836] (rows=1 width=12)
-                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                      Select Operator [SEL_1829] (rows=8116 width=1119)
-                                                                        Output:["_col0"]
-                                                                         Please refer to the previous Select Operator [SEL_1827]
-                                          <-Reducer 9 [CONTAINS]
-                                            Reduce Output Operator [RS_1465]
-                                              Group By Operator [GBY_1464] (rows=1 width=8)
-                                                Output:["_col0"],aggregations:["count(_col0)"]
-                                                Select Operator [SEL_1463] (rows=1108786976 width=108)
-                                                  Output:["_col0"]
-                                                  Select Operator [SEL_1461] (rows=633595212 width=88)
+                                                    Select Operator [SEL_1531] (rows=158402938 width=135)
+                                                      Output:["_col0"]
+                                                      Merge Join Operator [MERGEJOIN_1530] (rows=158402938 width=135)
+                                                        Conds:RS_1849._col0=RS_1836._col0(Inner),Output:["_col1"]
+                                                      <-Map 37 [SIMPLE_EDGE] vectorized
+                                                        SHUFFLE [RS_1836]
+                                                          PartitionCols:_col0
+                                                          Select Operator [SEL_1831] (rows=8116 width=1119)
+                                                            Output:["_col0"]
+                                                            Filter Operator [FIL_1830] (rows=8116 width=1119)
+                                                              predicate:(d_date_sk is not null and d_year BETWEEN 1998 AND 2000)
+                                                              TableScan [TS_24] (rows=73049 width=1119)
+                                                                default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                                                      <-Map 33 [SIMPLE_EDGE] vectorized
+                                                        SHUFFLE [RS_1849]
+                                                          PartitionCols:_col0
+                                                          Select Operator [SEL_1847] (rows=144002668 width=135)
+                                                            Output:["_col0","_col1"]
+                                                            Filter Operator [FIL_1846] (rows=144002668 width=135)
+                                                              predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_28_date_dim_d_date_sk_min) AND DynamicValue(RS_28_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_28_date_dim_d_date_sk_bloom_filter))) and ws_sold_date_sk is not null)
+                                                              TableScan [TS_21] (rows=144002668 width=135)
+                                                                default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_quantity"]
+                                                              <-Reducer 38 [BROADCAST_EDGE] vectorized
+                                                                BROADCAST [RS_1845]
+                                                                  Group By Operator [GBY_1844] (rows=1 width=12)
+                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                  <-Map 37 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                    SHUFFLE [RS_1842]
+                                                                      Group By Operator [GBY_1840] (rows=1 width=12)
+                                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                        Select Operator [SEL_1833] (rows=8116 width=1119)
+                                                                          Output:["_col0"]
+                                                                           Please refer to the previous Select Operator [SEL_1831]
+                                            <-Reducer 9 [CONTAINS]
+                                              Reduce Output Operator [RS_1467]
+                                                Group By Operator [GBY_1466] (rows=1 width=8)
+                                                  Output:["_col0"],aggregations:["count(_col0)"]
+                                                  Select Operator [SEL_1465] (rows=1108786976 width=108)
                                                     Output:["_col0"]
-                                                    Merge Join Operator [MERGEJOIN_1460] (rows=633595212 width=88)
-                                                      Conds:RS_1641._col0=RS_1622._col0(Inner),Output:["_col1"]
-                                                    <-Map 102 [SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_1622]
-                                                        PartitionCols:_col0
-                                                        Select Operator [SEL_1611] (rows=8116 width=1119)
-                                                          Output:["_col0"]
-                                                          Filter Operator [FIL_1610] (rows=8116 width=1119)
-                                                            predicate:(d_date_sk is not null and d_year BETWEEN 1999 AND 2001)
-                                                            TableScan [TS_97] (rows=73049 width=1119)
-                                                              default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                                                    <-Map 1 [SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_1641]
-                                                        PartitionCols:_col0
-                                                        Select Operator [SEL_1639] (rows=575995635 width=88)
-                                                          Output:["_col0","_col1"]
-                                                          Filter Operator [FIL_1638] (rows=575995635 width=88)
-                                                            predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_7_date_dim_d_date_sk_min) AND DynamicValue(RS_7_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_7_date_dim_d_date_sk_bloom_filter))) and ss_sold_date_sk is not null)
-                                                            TableScan [TS_0] (rows=575995635 width=88)
-                                                              default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_quantity"]
-                                                            <-Reducer 108 [BROADCAST_EDGE] vectorized
-                                                              BROADCAST [RS_1637]
-                                                                Group By Operator [GBY_1636] (rows=1 width=12)
-                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                                <-Map 102 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                  SHUFFLE [RS_1634]
-                                                                    Group By Operator [GBY_1629] (rows=1 width=12)
-                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                      Select Operator [SEL_1619] (rows=8116 width=1119)
-                                                                        Output:["_col0"]
-                                                                         Please refer to the previous Select Operator [SEL_1611]
-                            <-Reducer 29 [CUSTOM_SIMPLE_EDGE] vectorized
-                              PARTITION_ONLY_SHUFFLE [RS_1748]
-                                Select Operator [SEL_1747] (rows=1 width=120)
-                                  Output:["_col0"]
-                                  Group By Operator [GBY_1746] (rows=1 width=120)
-                                    Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"]
-                                  <-Union 28 [CUSTOM_SIMPLE_EDGE]
-                                    <-Reducer 27 [CONTAINS]
-                                      Reduce Output Operator [RS_1513]
-                                        Group By Operator [GBY_1512] (rows=1 width=120)
-                                          Output:["_col0","_col1"],aggregations:["sum(_col0)","count(_col0)"]
-                                          Select Operator [SEL_1511] (rows=1108786976 width=108)
-                                            Output:["_col0"]
-                                            Select Operator [SEL_1509] (rows=316788826 width=135)
-                                              Output:["_col0","_col1"]
-                                              Merge Join Operator [MERGEJOIN_1508] (rows=316788826 width=135)
-                                                Conds:RS_1824._col0=RS_1805._col0(Inner),Output:["_col1","_col2"]
-                                              <-Map 21 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_1805]
+                                                    Select Operator [SEL_1463] (rows=633595212 width=88)
+                                                      Output:["_col0"]
+                                                      Merge Join Operator [MERGEJOIN_1462] (rows=633595212 width=88)
+                                                        Conds:RS_1645._col0=RS_1626._col0(Inner),Output:["_col1"]
+                                                      <-Map 102 [SIMPLE_EDGE] vectorized
+                                                        SHUFFLE [RS_1626]
+                                                          PartitionCols:_col0
+                                                          Select Operator [SEL_1615] (rows=8116 width=1119)
+                                                            Output:["_col0"]
+                                                            Filter Operator [FIL_1614] (rows=8116 width=1119)
+                                                              predicate:(d_date_sk is not null and d_year BETWEEN 1999 AND 2001)
+                                                              TableScan [TS_97] (rows=73049 width=1119)
+                                                                default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                                                      <-Map 1 [SIMPLE_EDGE] vectorized
+                                                        SHUFFLE [RS_1645]
+                                                          PartitionCols:_col0
+                                                          Select Operator [SEL_1643] (rows=575995635 width=88)
+                                                            Output:["_col0","_col1"]
+                                                            Filter Operator [FIL_1642] (rows=575995635 width=88)
+                                                              predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_7_date_dim_d_date_sk_min) AND DynamicValue(RS_7_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_7_date_dim_d_date_sk_bloom_filter))) and ss_sold_date_sk is not null)
+                                                              TableScan [TS_0] (rows=575995635 width=88)
+                                                                default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_quantity"]
+                                                              <-Reducer 108 [BROADCAST_EDGE] vectorized
+                                                                BROADCAST [RS_1641]
+                                                                  Group By Operator [GBY_1640] (rows=1 width=12)
+                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                  <-Map 102 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                    SHUFFLE [RS_1638]
+                                                                      Group By Operator [GBY_1633] (rows=1 width=12)
+                                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                        Select Operator [SEL_1623] (rows=8116 width=1119)
+                                                                          Output:["_col0"]
+                                                                           Please refer to the previous Select Operator [SEL_1615]
+                              <-Reducer 29 [CUSTOM_SIMPLE_EDGE] vectorized
+                                PARTITION_ONLY_SHUFFLE [RS_1752]
+                                  Select Operator [SEL_1751] (rows=1 width=120)
+                                    Output:["_col0"]
+                                    Group By Operator [GBY_1750] (rows=1 width=120)
+                                      Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"]
+                                    <-Union 28 [CUSTOM_SIMPLE_EDGE]
+                                      <-Reducer 27 [CONTAINS]
+                                        Reduce Output Operator [RS_1517]
+                                          Group By Operator [GBY_1516] (rows=1 width=120)
+                                            Output:["_col0","_col1"],aggregations:["sum(_col0)","count(_col0)"]
+                                            Select Operator [SEL_1515] (rows=1108786976 width=108)
+                                              Output:["_col0"]
+                                              Select Operator [SEL_1513] (rows=316788826 width=135)
+                                                Output:["_col0","_col1"]
+                                                Merge Join Operator [MERGEJOIN_1512] (rows=316788826 width=135)
+                                                  Conds:RS_1828._col0=RS_1809._col0(Inner),Output:["_col1","_col2"]
+                                                <-Map 21 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_1809]
+                                                    PartitionCols:_col0
+                                                     Please refer to the previous Select Operator [SEL_1803]
+                                                <-Map 47 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_1828]
+                                                    PartitionCols:_col0
+                                                    Select Operator [SEL_1826] (rows=287989836 width=135)
+                                                      Output:["_col0","_col1","_col2"]
+                                                      Filter Operator [FIL_1825] (rows=287989836 width=135)
+                                                        predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_62_date_dim_d_date_sk_min) AND DynamicValue(RS_62_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_62_date_dim_d_date_sk_bloom_filter))) and cs_sold_date_sk is not null)
+                                                        TableScan [TS_55] (rows=287989836 width=135)
+                                                          default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_quantity","cs_list_price"]
+                                                        <-Reducer 26 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_1824]
+                                                            Group By Operator [GBY_1823] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_1815]
+                                                                Group By Operator [GBY_1813] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_1807] (rows=8116 width=1119)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_1803]
+                                      <-Reducer 41 [CONTAINS]
+                                        Reduce Output Operator [RS_1553]
+                                          Group By Operator [GBY_1552] (rows=1 width=120)
+                                            Output:["_col0","_col1"],aggregations:["sum(_col0)","count(_col0)"]
+                                            Select Operator [SEL_1551] (rows=1108786976 width=108)
+                                              Output:["_col0"]
+                                              Select Operator [SEL_1549] (rows=158402938 width=135)
+                                                Output:["_col0","_col1"]
+                                                Merge Join Operator [MERGEJOIN_1548] (rows=158402938 width=135)
+                                                  Conds:RS_1856._col0=RS_1837._col0(Inner),Output:["_col1","_col2"]
+                                                <-Map 37 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_1837]
+                                                    PartitionCols:_col0
+                                                     Please refer to the previous Select Operator [SEL_1831]
+                                                <-Map 48 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_1856]
+                                                    PartitionCols:_col0
+                                                    Select Operator [SEL_1854] (rows=144002668 width=135)
+                                                      Output:["_col0","_col1","_col2"]
+                                                      Filter Operator [FIL_1853] (rows=144002668 width=135)
+                                                        predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_73_date_dim_d_date_sk_min) AND DynamicValue(RS_73_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_73_date_dim_d_date_sk_bloom_filter))) and ws_sold_date_sk is not null)
+                                                        TableScan [TS_66] (rows=144002668 width=135)
+                                                          default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_quantity","ws_list_price"]
+                                                        <-Reducer 40 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_1852]
+                                                            Group By Operator [GBY_1851] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 37 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_1843]
+                                                                Group By Operator [GBY_1841] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_1835] (rows=8116 width=1119)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_1831]
+                                      <-Reducer 45 [CONTAINS]
+                                        Reduce Output Operator [RS_1571]
+                                          Group By Operator [GBY_1570] (rows=1 width=120)
+                                            Output:["_col0","_col1"],aggregations:["sum(_col0)","count(_col0)"]
+                                            Select Operator [SEL_1569] (rows=1108786976 width=108)
+                                              Output:["_col0"]
+                                              Select Operator [SEL_1567] (rows=633595212 width=88)
+                                                Output:["_col0","_col1"]
+                                                Merge Join Operator [MERGEJOIN_1566] (rows=633595212 width=88)
+                                                  Conds:RS_1863._col0=RS_1627._col0(Inner),Output:["_col1","_col2"]
+                                                <-Map 102 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_1627]
+                                                    PartitionCols:_col0
+                                                     Please refer to the previous Select Operator [SEL_1615]
+                                                <-Map 43 [SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_1863]
+                                                    PartitionCols:_col0
+                                                    Select Operator [SEL_1861] (rows=575995635 width=88)
+                                                      Output:["_col0","_col1","_col2"]
+                                                      Filter Operator [FIL_1860] (rows=575995635 width=88)
+                                                        predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_52_date_dim_d_date_sk_min) AND DynamicValue(RS_52_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_52_date_dim_d_date_sk_bloom_filter))) and ss_sold_date_sk is not null)
+                                                        TableScan [TS_45] (rows=575995635 width=88)
+                                                          default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_quantity","ss_list_price"]
+                                                        <-Reducer 109 [BROADCAST_EDGE] vectorized
+                                                          BROADCAST [RS_1859]
+                                                            Group By Operator [GBY_1858] (rows=1 width=12)
+                                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                            <-Map 102 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_1639]
+                                                                Group By Operator [GBY_1634] (rows=1 width=12)
+                                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                  Select Operator [SEL_1625] (rows=8116 width=1119)
+                                                                    Output:["_col0"]
+                                                                     Please refer to the previous Select Operator [SEL_1615]
+                              <-Reducer 57 [CUSTOM_SIMPLE_EDGE] vectorized
+                                PARTITION_ONLY_SHUFFLE [RS_1772]
+                                  Group By Operator [GBY_1771] (rows=348467716 width=135)
+                                    Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2
+                                  <-Reducer 56 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_369]
+                                      PartitionCols:_col0, _col1, _col2
+                                      Group By Operator [GBY_368] (rows=696935432 width=135)
+                                        Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col3)","count()"],keys:_col0, _col1, _col2
+                                        Select Operator [SEL_366] (rows=696935432 width=135)
+                                          Output:["_col0","_col1","_col2","_col3"]
+                                          Merge Join Operator [MERGEJOIN_1430] (rows=696935432 width=135)
+                                            Conds:RS_362._col1=RS_1703._col0(Inner),RS_362._col1=RS_1762._col0(Inner),Output:["_col2","_col3","_col8","_col9","_col10"]
+                                          <-Map 63 [SIMPLE_EDGE] vectorized
+                                            PARTITION_ONLY_SHUFFLE [RS_1703]
+                                              PartitionCols:_col0
+                                              Select Operator [SEL_1687] (rows=462000 width=1436)
+                                                Output:["_col0","_col1","_col2","_col3"]
+                                                Filter Operator [FIL_1678] (rows=462000 width=1436)
+                                                  predicate:i_item_sk is not null
+                                                  TableScan [TS_91] (rows=462000 width=1436)
+                                                    default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id"]
+                                          <-Reducer 80 [ONE_TO_ONE_EDGE] vectorized
+                                            PARTITION_ONLY_SHUFFLE [RS_1762]
+                                              PartitionCols:_col0
+                                              Group By Operator [GBY_1761] (rows=254100 width=1436)
+                                                Output:["_col0"],keys:KEY._col0
+                                              <-Reducer 79 [SIMPLE_EDGE]
+                                                SHUFFLE [RS_356]
                                                   PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_1799]
-                                              <-Map 47 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_1824]
+                                                  Group By Operator [GBY_355] (rows=508200 width=1436)
+                                                    Output:["_col0"],keys:_col0
+                                                    Merge Join Operator [MERGEJOIN_1429] (rows=508200 width=1436)
+                                                      Conds:RS_1699._col1, _col2, _col3=RS_1760._col0, _col1, _col2(Inner),Output:["_col0"]
+                                                    <-Map 63 [SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_1699]
+                                                        PartitionCols:_col1, _col2, _col3
+                                                        Select Operator [SEL_1683] (rows=462000 width=1436)
+                                                          Output:["_col0","_col1","_col2","_col3"]
+                                                          Filter Operator [FIL_1674] (rows=462000 width=1436)
+                                                            predicate:(i_brand_id is not null and i_category_id is not null and i_class_id is not null and i_item_sk is not null)
+                                                             Please refer to the previous TableScan [TS_91]
+                                                    <-Reducer 85 [ONE_TO_ONE_EDGE] vectorized
+                                                      FORWARD [RS_1760]
+                                                        PartitionCols:_col0, _col1, _col2
+                                                        Select Operator [SEL_1759] (rows=1 width=108)
+                                                          Output:["_col0","_col1","_col2"]
+                                                          Filter Operator [FIL_1758] (rows=1 width=108)
+                                                            predicate:(_col3 = 3L)
+                                                            Group By Operator [GBY_1757] (rows=304916424 width=108)
+                                                              Output:["_col0","_col1","_col2","_col3"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
+                                                            <-Union 84 [SIMPLE_EDGE]
+                                                              <-Reducer 83 [CONTAINS] vectorized
+                                                                Reduce Output Operator [RS_1897]
+                                                                  PartitionCols:_col0, _col1, _col2
+                                                                  Group By Operator [GBY_1896] (rows=609832849 width=108)
+                                                                    Output:["_col0","_col1","_col2","_col3"],aggregations:["count(_col3)"],keys:_col0, _col1, _col2
+                                                                    Group By Operator [GBY_1895] (rows=348477374 width=88)
+                                                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
+                                                                    <-Reducer 82 [SIMPLE_EDGE]
+                                                                      SHUFFLE [RS_300]
+                                                                        PartitionCols:_col0, _col1, _col2
+                                                                        Group By Operator [GBY_299] (rows=696954748 width=88)
+                                                                          Output:["_col0","_col1","_col2","_col3"],aggregations:["count()"],keys:_col5, _col6, _col7
+                                                                          Merge Join Operator [MERGEJOIN_1424] (rows=696954748 width=88)
+                                                                            Conds:RS_295._col1=RS_1700._col0(Inner),Output:["_col5","_col6","_col7"]
+                                                                          <-Map 63 [SIMPLE_EDGE] vectorized
+                                                                            PARTITION_ONLY_SHUFFLE [RS_1700]
+                                                                              PartitionCols:_col0
+                                                                              Select Operator [SEL_1684] (rows=462000 width=1436)
+                                                                                Output:["_col0","_col1","_col2","_col3"]
+                                                                                Filter Operator [FIL_1675] (rows=462000 width=1436)
+                                                                                  predicate:(i_brand_id is not null and i_category_id is not null and i_class_id is not null and i_item_sk is not null)
+                                                                                   Please refer to the previous TableScan [TS_91]
+                                                                          <-Reducer 101 [SIMPLE_EDGE]
+                                                                            SHUFFLE [RS_295]
+                                                                              PartitionCols:_col1
+                                                                              Merge Join Operator [MERGEJOIN_1408] (rows=633595212 width=88)
+                                                                                Conds:RS_1871._col0=RS_1616._col0(Inner),Output:["_col1"]
+                                                                              <-Map 102 [SIMPLE_EDGE] vectorized
+                                                                                SHUFFLE [RS_1616]
+                                                                                  PartitionCols:_col0
+                                                                                   Please refer to the previous Select Operator [SEL_1615]
+                                                                              <-Map 100 [SIMPLE_EDGE] vectorized
+                                                                                SHUFFLE [RS_1871]
+                                                                                  PartitionCols:_col0
+                                                                                  Select Operator [SEL_1870] (rows=575995635 width=88)
+                                                                                    Output:["_col0","_col1"]
+                                                                                    Filter Operator [FIL_1869] (rows=575995635 width=88)
+                                                                                      predicate:((ss_item_sk BETWEEN DynamicValue(RS_107_iss_i_item_sk_min) AND DynamicValue(RS_107_iss_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_107_iss_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_104_d1_d_date_sk_min) AND DynamicValue(RS_104_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_104_d1_d_date_sk_bloom_filter))) and ss_item_sk is not null and ss_sold_date_sk is not null)
+                                                                                      TableScan [TS_94] (rows=575995635 width=88)
+                                                                                        default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk"]
+                                                                                      <-Reducer 103 [BROADCAST_EDGE] vectorized
+                                                                                        BROADCAST [RS_1866]
+                                                                                          Group By Operator [GBY_1865] (rows=1 width=12)
+                                                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                                          <-Map 102 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                            SHUFFLE [RS_1635]
+                                                                                              Group By Operator [GBY_1630] (rows=1 width=12)
+                                                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                                                Select Operator [SEL_1617] (rows=8116 width=1119)
+                                                                                                  Output:["_col0"]
+                                                                                                   Please refer to the previous Select Operator [SEL_1615]
+                                                                                      <-Reducer 71 [BROADCAST_EDGE] vectorized
+                                                                                        BROADCAST [RS_1868]
+                                                                                          Group By Operator [GBY_1867] (rows=1 width=12)
+                                                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                                          <-Map 63 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                            PARTITION_ONLY_SHUFFLE [RS_1714]
+                                                                                              Group By Operator [GBY_1708] (rows=1 width=12)
+                                                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                                                Select Operator [SEL_1692] (rows=462000 width=1436)
+                                                                                                  Output:["_col0"]
+                                                                                                  Select Operator [SEL_1681] (rows=462000 width=1436)
+                                                                                                    Output:["_col0","_col1","_col2","_col3"]
+                                                                                                    Filter Operator [FIL_1672] (rows=462000 width=1436)
+                                                                                                      predicate:(i_brand_id is not null and i_category_id is not null and i_class_id is not null and i_item_sk is not null)
+                                                                                                       Please refer to the previous TableScan [TS_91]
+                                                              <-Reducer 93 [CONTAINS] vectorized
+                                                                Reduce Output Operator [RS_1903]
+                                                                  PartitionCols:_col0, _col1, _col2
+                                                                  Group By Operator [GBY_1902] (rows=609832849 width=108)
+                                                                    Output:["_col0","_col1","_col2","_col3"],aggregations:["count(_col3)"],keys:_col0, _col1, _col2
+                                                                    Group By Operator [GBY_1901] (rows=174233858 width=135)
+                                                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
+                                                                    <-Reducer 92 [SIMPLE_EDGE]
+                                                                      SHUFFLE [RS_320]
+                                                                        PartitionCols:_col0, _col1, _col2
+                                                                        Group By Operator [GBY_319] (rows=348467716 width=135)
+                                                                          Output:["_col0","_col1","_col2","_col3"],aggregations:["count()"],keys:_col5, _col6, _col7
+                                                                          Merge Join Operator [MERGEJOIN_1426] (rows=348467716 width=135)
+                                                                            Conds:RS_315._col1=RS_1701._col0(Inner),Output:["_col5","_col6","_col7"]
+                                                                          <-Map 63 [SIMPLE_EDGE] vectorized
+                                                                            PARTITION_ONLY_SHUFFLE [RS_1701]
+                                                                              PartitionCols:_col0
+                                                                              Select Operator [SEL_1685] (rows=462000 width=1436)
+                                                                                Output:["_col0","_col1","_col2","_col3"]
+                                                                                Filter Operator [FIL_1676] (rows=462000 width=1436)
+                                                                                  predicate:(i_brand_id is not null and i_category_id is not null and i_class_id is not null and i_item_sk is not null)
+                                                                                   Please refer to the previous TableScan [TS_91]
+                                                                          <-Reducer 104 [SIMPLE_EDGE]
+                                                                            SHUFFLE [RS_315]
+                                                                              PartitionCols:_col1
+                                                                              Merge Join Operator [MERGEJOIN_1410] (rows=316788826 width=135)
+                                                                                Conds:RS_1881._col0=RS_1618._col0(Inner),Output:["_col1"]
+                                                                              <-Map 102 [SIMPLE_EDGE] vectorized
+                                                                                SHUFFLE [RS_1618]
+                                                                                  PartitionCols:_col0
+                                                                                   Please refer to the previous Select Operator [SEL_1615]
+                                                                              <-Map 110 [SIMPLE_EDGE] vectorized
+                                                                                SHUFFLE [RS_1881]
+                                                                                  PartitionCols:_col0
+                                                                                  Select Operator [SEL_1880] (rows=287989836 width=135)
+                                                                                    Output:["_col0","_col1"]
+                                                                                    Filter Operator [FIL_1879] (rows=287989836 width=135)
+                                                                                      predicate:((cs_item_sk BETWEEN DynamicValue(RS_127_ics_i_item_sk_min) AND DynamicValue(RS_127_ics_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_127_ics_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_124_d2_d_date_sk_min) AND DynamicValue(RS_124_d2_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_124_d2_d_date_sk_bloom_filter))) and cs_item_sk is not null and cs_sold_date_sk is not null)
+                                                                                      TableScan [TS_114] (rows=287989836 width=135)
+                                                                                        default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk"]
+                                                                                      <-Reducer 105 [BROADCAST_EDGE] vectorized
+                                                                                        BROADCAST [RS_1876]
+                                                                                          Group By Operator [GBY_1875] (rows=1 width=12)
+                                                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                                          <-Map 102 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                            SHUFFLE [RS_1636]
+                                                                                              Group By Operator [GBY_1631] (rows=1 width=12)
+                                                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                                                Select Operator [SEL_1619] (rows=8116 width=1119)
+                                                                                                  Output:["_col0"]
+                                                                                                   Please refer to the previous Select Operator [SEL_1615]
+                                                                                      <-Reducer 74 [BROADCAST_EDGE] vectorized
+                                                                                        BROADCAST [RS_1878]
+                                                                                          Group By Operator [GBY_1877] (rows=1 width=12)
+                                                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                                          <-Map 63 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                            PARTITION_ONLY_SHUFFLE [RS_1715]
+                                                                                              Group By Operator [GBY_1709] (rows=1 width=12)
+                                                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                                                Select Operator [SEL_1694] (rows=462000 width=1436)
+                                                                                                  Output:["_col0"]
+                                                                                                   Please refer to the previous Select Operator [SEL_1681]
+                                                              <-Reducer 96 [CONTAINS] vectorized
+                                                                Reduce Output Operator [RS_1909]
+                                                                  PartitionCols:_col0, _col1, _col2
+                                                                  Group By Operator [GBY_1908] (rows=609832849 width=108)
+                                                                    Output:["_col0","_col1","_col2","_col3"],aggregations:["count(_col3)"],keys:_col0, _col1, _col2
+                                                                    Group By Operator [GBY_1907] (rows=87121617 width=135)
+                                                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
+                                                                    <-Reducer 95 [SIMPLE_EDGE]
+                                                                      SHUFFLE [RS_341]
+                                                                        PartitionCols:_col0, _col1, _col2
+                                                                        Group By Operator [GBY_340] (rows=174243235 width=135)
+                                                                          Output:["_col0","_col1","_col2","_col3"],aggregations:["count()"],keys:_col5, _col6, _col7
+                                                                          Merge Join Operator [MERGEJOIN_1428] (rows=174243235 width=135)
+                                                                            Conds:RS_336._col1=RS_1702._col0(Inner),Output:["_col5","_col6","_col7"]
+                                                                          <-Map 63 [SIMPLE_EDGE] vectorized
+                                                                            PARTITION_ONLY_SHUFFLE [RS_1702]
+                                                                              PartitionCols:_col0
+                                                                              Select Operator [SEL_1686] (rows=462000 width=1436)
+                                                                                Output:["_col0","_col1","_col2","_col3"]
+                                                                                Filter Operator [FIL_1677] (rows=462000 width=1436)
+                                                                                  predicate:(i_brand_id is not null and i_category_id is not null and i_class_id is not null and i_item_sk is not null)
+                                                                                   Please refer to the previous TableScan [TS_91]
+                                                                          <-Reducer 106 [SIMPLE_EDGE]
+                                                                            SHUFFLE [RS_336]
+                                                                              PartitionCols:_col1
+                                                                              Merge Join Operator [MERGEJOIN_1412] (rows=158402938 width=135)
+                                                                                Conds:RS_1891._col0=RS_1620._col0(Inner),Output:["_col1"]
+                                                                              <-Map 102 [SIMPLE_EDGE] vectorized
+                                                                                SHUFFLE [RS_1620]
+                                                                                  PartitionCols:_col0
+                                                                                   Please refer to the previous Select Operator [SEL_1615]
+                                                                              <-Map 111 [SIMPLE_EDGE] vectorized
+                                                                                SHUFFLE [RS_1891]
+                                                                                  PartitionCols:_col0
+                                                                                  Select Operator [SEL_1890] (rows=144002668 width=135)
+                                                                                    Output:["_col0","_col1"]
+                                                                                    Filter Operator [FIL_1889] (rows=144002668 width=135)
+                                                                                      predicate:((ws_item_sk BETWEEN DynamicValue(RS_148_iws_i_item_sk_min) AND DynamicValue(RS_148_iws_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_148_iws_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_145_d3_d_date_sk_min) AND DynamicValue(RS_145_d3_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_145_d3_d_date_sk_bloom_filter))) and ws_item_sk is not null and ws_sold_date_sk is not null)
+                                                                                      TableScan [TS_135] (rows=144002668 width=135)
+                                                                                        default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk"]
+                                                                                      <-Reducer 107 [BROADCAST_EDGE] vectorized
+                                                                                        BROADCAST [RS_1886]
+                                                                                          Group By Operator [GBY_1885] (rows=1 width=12)
+                                                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                                          <-Map 102 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                            SHUFFLE [RS_1637]
+                                                                                              Group By Operator [GBY_1632] (rows=1 width=12)
+                                                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                                                Select Operator [SEL_1621] (rows=8116 width=1119)
+                                                                                                  Output:["_col0"]
+                                                                                                   Please refer to the previous Select Operator [SEL_1615]
+                                                                                      <-Reducer 77 [BROADCAST_EDGE] vectorized
+                                                                                        BROADCAST [RS_1888]
+                                                                                          Group By Operator [GBY_1887] (rows=1 width=12)
+                                                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                                          <-Map 63 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                            PARTITION_ONLY_SHUFFLE [RS_1716]
+                                                                                              Group By Operator [GBY_1710] (rows=1 width=12)
+                                                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                                                Select Operator [SEL_1696] (rows=462000 width=1436)
+                                                                                                  Output:["_col0"]
+                                                                                                   Please refer to the previous Select Operator [SEL_1681]
+                                          <-Reducer 55 [SIMPLE_EDGE]
+                                            SHUFFLE [RS_362]
+                                              PartitionCols:_col1
+                                              Merge Join Operator [MERGEJOIN_1422] (rows=316788826 width=135)
+                                                Conds:RS_1770._col0=RS_1660._col0(Inner),Output:["_col1","_col2","_col3"]
+                                              <-Map 53 [SIMPLE_EDGE] vectorized
+                                                PARTITION_ONLY_SHUFFLE [RS_1660]
                                                   PartitionCols:_col0
-                                                  Select Operator [SEL_1822] (rows=287989836 width=135)
-                                                    Output:["_col0","_col1","_col2"]
-                                                    Filter Operator [FIL_1821] (rows=287989836 width=135)
-                                                      predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_62_date_dim_d_date_sk_min) AND DynamicValue(RS_62_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_62_date_dim_d_date_sk_bloom_filter))) and cs_sold_date_sk is not null)
-                                                      TableScan [TS_55] (rows=287989836 width=135)
-                                                        default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_quantity","cs_list_price"]
-                                                      <-Reducer 26 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_1820]
-                                                          Group By Operator [GBY_1819] (rows=1 width=12)
+                                                  Select Operator [SEL_1657] (rows=18262 width=1119)
+                                                    Output:["_col0"]
+                                                    Filter Operator [FIL_1656] (rows=18262 width=1119)
+                                                      predicate:((d_moy = 11) and (d_year = 2000) and d_date_sk is not null)
+                                                      TableScan [TS_85] (rows=73049 width=1119)
+                                                        default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                              <-Map 112 [SIMPLE_EDGE] vectorized
+                                                SHUFFLE [RS_1770]
+                                                  PartitionCols:_col0
+                                                  Select Operator [SEL_1769] (rows=287989836 width=135)
+                                                    Output:["_col0","_col1","_col2","_col3"]
+                                                    Filter Operator [FIL_1768] (rows=287989836 width=135)
+                                                      predicate:((cs_item_sk BETWEEN DynamicValue(RS_363_item_i_item_sk_min) AND DynamicValue(RS_363_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_363_item_i_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_364_item_i_item_sk_min) AND DynamicValue(RS_364_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_364_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_360_date_dim_d_date_sk_min) AND DynamicValue(RS_360_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_360_date_dim_d_date_sk_bloom_filter))) and cs_item_sk is not null and cs_sold_date_sk is not null)
+                                                      TableScan [TS_271] (rows=287989836 width=135)
+                                                        default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_quantity","cs_list_price"]
+                                                      <-Reducer 58 [BROADCAST_EDGE] vectorized
+                                                        BROADCAST [RS_1754]
+                                                          Group By Operator [GBY_1753] (rows=1 width=12)
                                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_1811]
-                                                              Group By Operator [GBY_1809] (rows=1 width=12)
+                                                          <-Map 53 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                            PARTITION_ONLY_SHUFFLE [RS_1668]
+                                                              Group By Operator [GBY_1665] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_1803] (rows=8116 width=1119)
+                                                                Select Operator [SEL_1661] (rows=18262 width=1119)
                                                                   Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_1799]
-                                    <-Reducer 41 [CONTAINS]
-                                      Reduce Output Operator [RS_1549]
-                                        Group By Operator [GBY_1548] (rows=1 width=120)
-                                          Output:["_col0","_col1"],aggregations:["sum(_col0)","count(_col0)"]
-                                          Select Operator [SEL_1547] (rows=1108786976 width=108)
-                                            Output:["_col0"]
-                                            Select Operator [SEL_1545] (rows=158402938 width=135)
-                                              Output:["_col0","_col1"]
-                                              Merge Join Operator [MERGEJOIN_1544] (rows=158402938 width=135)
-                                                Conds:RS_1852._col0=RS_1833._col0(Inner),Output:["_col1","_col2"]
-                                              <-Map 37 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_1833]
-                                                  PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_1827]
-                                              <-Map 48 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_1852]
-                                                  PartitionCols:_col0
-                                                  Select Operator [SEL_1850] (rows=144002668 width=135)
-                                                    Output:["_col0","_col1","_col2"]
-                                                    Filter Operator [FIL_1849] (rows=144002668 width=135)
-                                                      predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_73_date_dim_d_date_sk_min) AND DynamicValue(RS_73_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_73_date_dim_d_date_sk_bloom_filter))) and ws_sold_date_sk is not null)
-                                                      TableScan [TS_66] (rows=144002668 width=135)
-                                                        default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_quantity","ws_list_price"]
-                                                      <-Reducer 40 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_1848]
-                                                          Group By Operator [GBY_1847] (rows=1 width=12)
+                                                                   Please refer to the previous Select Operator [SEL_1657]
+                                                      <-Reducer 81 [BROADCAST_EDGE] vectorized
+                                                        BROADCAST [RS_1767]
+                                                          Group By Operator [GBY_1766] (rows=1 width=12)
                                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                          <-Map 37 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_1839]
-                                                              Group By Operator [GBY_1837] (rows=1 width=12)
+                                                          <-Reducer 80 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                            PARTITION_ONLY_SHUFFLE [RS_1765]
+                                                              Group By Operator [GBY_1764] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_1831] (rows=8116 width=1119)
+                                                                Select Operator [SEL_1763] (rows=254100 width=1436)
                                                                   Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_1827]
-                                    <-Reducer 45 [CONTAINS]
-                                      Reduce Output Operator [RS_1567]
-                                        Group By Operator [GBY_1566] (rows=1 width=120)
-                                          Output:["_col0","_col1"],aggregations:["sum(_col0)","count(_col0)"]
-                                          Select Operator [SEL_1565] (rows=1108786976 width=108)
-                                            Output:["_col0"]
-                                            Select Operator [SEL_1563] (rows=633595212 width=88)
-                                              Output:["_col0","_col1"]
-                                              Merge Join Operator [MERGEJOIN_1562] (rows=633595212 width=88)
-                                                Conds:RS_1859._col0=RS_1623._col0(Inner),Output:["_col1","_col2"]
-                                              <-Map 102 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_1623]
-                                                  PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_1611]
-                                              <-Map 43 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_1859]
-                                                  PartitionCols:_col0
-                                                  Select Operator [SEL_1857] (rows=575995635 width=88)
-                                                    Output:["_col0","_col1","_col2"]
-                                                    Filter Operator [FIL_1856] (rows=575995635 width=88)
-                                                      predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_52_date_dim_d_date_sk_min) AND DynamicValue(RS_52_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_52_date_dim_d_date_sk_bloom_filter))) and ss_sold_date_sk is not null)
-                                                      TableScan [TS_45] (rows=575995635 width=88)
-                                                        default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_quantity","ss_list_price"]
-                                                      <-Reducer 109 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_1855]
-                                                          Group By Operator [GBY_1854] (rows=1 width=12)
+                                                                   Please refer 

<TRUNCATED>

[52/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 0000000,47f819b..285f7fb
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@@ -1,0 -1,9353 +1,9602 @@@
+ /* * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.commons.lang.StringUtils.join;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DB_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependNotNullCatToDbName;
+ 
+ import java.io.IOException;
+ import java.net.InetAddress;
+ import java.net.UnknownHostException;
+ import java.nio.ByteBuffer;
+ import java.security.PrivilegedExceptionAction;
+ import java.util.AbstractMap;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Iterator;
+ import java.util.LinkedHashMap;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.Objects;
+ import java.util.Properties;
+ import java.util.Set;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.Future;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.concurrent.atomic.AtomicInteger;
+ import java.util.concurrent.locks.Condition;
+ import java.util.concurrent.locks.Lock;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.regex.Pattern;
+ 
+ import javax.jdo.JDOException;
+ 
+ import com.codahale.metrics.Counter;
+ import com.google.common.collect.ImmutableList;
+ import com.google.common.collect.ImmutableListMultimap;
+ import com.google.common.collect.Lists;
+ import com.google.common.collect.Multimaps;
+ 
+ import org.apache.commons.cli.OptionBuilder;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent;
+ import org.apache.hadoop.hive.metastore.events.AcidWriteEvent;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.StatsUpdateMode;
+ import org.apache.hadoop.hive.metastore.events.AbortTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent;
+ import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent;
+ import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent;
+ import org.apache.hadoop.hive.metastore.events.AllocWriteIdEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.CommitTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.AddSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.DropCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.DropConstraintEvent;
+ import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.InsertEvent;
+ import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.events.OpenTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAddSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreEventContext;
+ import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadhSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
+ import org.apache.hadoop.hive.metastore.metrics.JvmPauseMonitor;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+ import org.apache.hadoop.hive.metastore.metrics.PerfLogger;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
+ import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
+ import org.apache.hadoop.hive.metastore.txn.TxnStore;
+ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.security.SecurityUtil;
+ import org.apache.hadoop.hive.metastore.utils.CommonCliOptions;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.HdfsUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.LogUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.ReflectionUtils;
+ import org.apache.hadoop.util.ShutdownHookManager;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.logging.log4j.LogManager;
+ import org.apache.logging.log4j.core.LoggerContext;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.TProcessor;
+ import org.apache.thrift.protocol.TBinaryProtocol;
+ import org.apache.thrift.protocol.TCompactProtocol;
+ import org.apache.thrift.protocol.TProtocol;
+ import org.apache.thrift.protocol.TProtocolFactory;
+ import org.apache.thrift.server.ServerContext;
+ import org.apache.thrift.server.TServer;
+ import org.apache.thrift.server.TServerEventHandler;
+ import org.apache.thrift.server.TThreadPoolServer;
+ import org.apache.thrift.transport.TFramedTransport;
+ import org.apache.thrift.transport.TServerSocket;
+ import org.apache.thrift.transport.TTransport;
+ import org.apache.thrift.transport.TTransportFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.facebook.fb303.FacebookBase;
+ import com.facebook.fb303.fb_status;
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.base.Preconditions;
+ import com.google.common.base.Splitter;
+ import com.google.common.util.concurrent.ThreadFactoryBuilder;
+ 
+ /**
+  * TODO:pc remove application logic to a separate interface.
+  */
+ public class HiveMetaStore extends ThriftHiveMetastore {
+   public static final Logger LOG = LoggerFactory.getLogger(HiveMetaStore.class);
+   public static final String PARTITION_NUMBER_EXCEED_LIMIT_MSG =
+       "Number of partitions scanned (=%d) on table '%s' exceeds limit (=%d). This is controlled on the metastore server by %s.";
+ 
+   // boolean that tells if the HiveMetaStore (remote) server is being used.
+   // Can be used to determine if the calls to metastore api (HMSHandler) are being made with
+   // embedded metastore or a remote one
+   private static boolean isMetaStoreRemote = false;
+ 
+   // Used for testing to simulate method timeout.
+   @VisibleForTesting
+   static boolean TEST_TIMEOUT_ENABLED = false;
+   @VisibleForTesting
+   static long TEST_TIMEOUT_VALUE = -1;
+ 
+   private static ShutdownHookManager shutdownHookMgr;
+ 
+   public static final String ADMIN = "admin";
+   public static final String PUBLIC = "public";
+   /** MM write states. */
+   public static final char MM_WRITE_OPEN = 'o', MM_WRITE_COMMITTED = 'c', MM_WRITE_ABORTED = 'a';
+ 
+   private static HadoopThriftAuthBridge.Server saslServer;
+   private static MetastoreDelegationTokenManager delegationTokenManager;
+   private static boolean useSasl;
+ 
+   static final String NO_FILTER_STRING = "";
+   static final int UNLIMITED_MAX_PARTITIONS = -1;
+ 
+   private static final class ChainedTTransportFactory extends TTransportFactory {
+     private final TTransportFactory parentTransFactory;
+     private final TTransportFactory childTransFactory;
+ 
+     private ChainedTTransportFactory(
+         TTransportFactory parentTransFactory,
+         TTransportFactory childTransFactory) {
+       this.parentTransFactory = parentTransFactory;
+       this.childTransFactory = childTransFactory;
+     }
+ 
+     @Override
+     public TTransport getTransport(TTransport trans) {
+       return childTransFactory.getTransport(parentTransFactory.getTransport(trans));
+     }
+   }
+ 
+   public static boolean isRenameAllowed(Database srcDB, Database destDB) {
+     if (!srcDB.getName().equalsIgnoreCase(destDB.getName())) {
+       if (ReplChangeManager.isSourceOfReplication(srcDB) || ReplChangeManager.isSourceOfReplication(destDB)) {
+         return false;
+       }
+     }
+     return true;
+   }
+ 
+   public static class HMSHandler extends FacebookBase implements IHMSHandler {
+     public static final Logger LOG = HiveMetaStore.LOG;
+     private final Configuration conf; // stores datastore (jpox) properties,
+                                      // right now they come from jpox.properties
+ 
+     // Flag to control that always threads are initialized only once
+     // instead of multiple times
+     private final static AtomicBoolean alwaysThreadsInitialized =
+         new AtomicBoolean(false);
+ 
+     private static String currentUrl;
+     private FileMetadataManager fileMetadataManager;
+     private PartitionExpressionProxy expressionProxy;
+     private StorageSchemaReader storageSchemaReader;
+ 
+     // Variables for metrics
+     // Package visible so that HMSMetricsListener can see them.
+     static AtomicInteger databaseCount, tableCount, partCount;
+ 
+     private Warehouse wh; // hdfs warehouse
+     private static final ThreadLocal<RawStore> threadLocalMS =
+         new ThreadLocal<RawStore>() {
+           @Override
+           protected RawStore initialValue() {
+             return null;
+           }
+         };
+ 
+     private static final ThreadLocal<TxnStore> threadLocalTxn = new ThreadLocal<TxnStore>() {
+       @Override
+       protected TxnStore initialValue() {
+         return null;
+       }
+     };
+ 
+     private static final ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>> timerContexts =
+         new ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>>() {
+       @Override
+       protected Map<String, com.codahale.metrics.Timer.Context> initialValue() {
+         return new HashMap<>();
+       }
+     };
+ 
+     public static RawStore getRawStore() {
+       return threadLocalMS.get();
+     }
+ 
+     static void removeRawStore() {
+       threadLocalMS.remove();
+     }
+ 
+     // Thread local configuration is needed as many threads could make changes
+     // to the conf using the connection hook
+     private static final ThreadLocal<Configuration> threadLocalConf =
+         new ThreadLocal<Configuration>() {
+           @Override
+           protected Configuration initialValue() {
+             return null;
+           }
+         };
+ 
+     /**
+      * Thread local HMSHandler used during shutdown to notify meta listeners
+      */
+     private static final ThreadLocal<HMSHandler> threadLocalHMSHandler = new ThreadLocal<>();
+ 
+     /**
+      * Thread local Map to keep track of modified meta conf keys
+      */
+     private static final ThreadLocal<Map<String, String>> threadLocalModifiedConfig =
+         new ThreadLocal<>();
+ 
+     private static ExecutorService threadPool;
+ 
+     static final Logger auditLog = LoggerFactory.getLogger(
+         HiveMetaStore.class.getName() + ".audit");
+ 
+     private static void logAuditEvent(String cmd) {
+       if (cmd == null) {
+         return;
+       }
+ 
+       UserGroupInformation ugi;
+       try {
+         ugi = SecurityUtils.getUGI();
+       } catch (Exception ex) {
+         throw new RuntimeException(ex);
+       }
+ 
+       String address = getIPAddress();
+       if (address == null) {
+         address = "unknown-ip-addr";
+       }
+ 
+       auditLog.info("ugi={}	ip={}	cmd={}	", ugi.getUserName(), address, cmd);
+     }
+ 
+     private static String getIPAddress() {
+       if (useSasl) {
+         if (saslServer != null && saslServer.getRemoteAddress() != null) {
+           return saslServer.getRemoteAddress().getHostAddress();
+         }
+       } else {
+         // if kerberos is not enabled
+         return getThreadLocalIpAddress();
+       }
+       return null;
+     }
+ 
+     private static AtomicInteger nextSerialNum = new AtomicInteger();
+     private static ThreadLocal<Integer> threadLocalId = new ThreadLocal<Integer>() {
+       @Override
+       protected Integer initialValue() {
+         return nextSerialNum.getAndIncrement();
+       }
+     };
+ 
+     // This will only be set if the metastore is being accessed from a metastore Thrift server,
+     // not if it is from the CLI. Also, only if the TTransport being used to connect is an
+     // instance of TSocket. This is also not set when kerberos is used.
+     private static ThreadLocal<String> threadLocalIpAddress = new ThreadLocal<String>() {
+       @Override
+       protected String initialValue() {
+         return null;
+       }
+     };
+ 
+     /**
+      * Internal function to notify listeners for meta config change events
+      */
+     private void notifyMetaListeners(String key, String oldValue, String newValue) throws MetaException {
+       for (MetaStoreEventListener listener : listeners) {
+         listener.onConfigChange(new ConfigChangeEvent(this, key, oldValue, newValue));
+       }
+ 
+       if (transactionalListeners.size() > 0) {
+         // All the fields of this event are final, so no reason to create a new one for each
+         // listener
+         ConfigChangeEvent cce = new ConfigChangeEvent(this, key, oldValue, newValue);
+         for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+           transactionalListener.onConfigChange(cce);
+         }
+       }
+     }
+ 
+     /**
+      * Internal function to notify listeners to revert back to old values of keys
+      * that were modified during setMetaConf. This would get called from HiveMetaStore#cleanupRawStore
+      */
+     private void notifyMetaListenersOnShutDown() {
+       Map<String, String> modifiedConf = threadLocalModifiedConfig.get();
+       if (modifiedConf == null) {
+         // Nothing got modified
+         return;
+       }
+       try {
+         Configuration conf = threadLocalConf.get();
+         if (conf == null) {
+           throw new MetaException("Unexpected: modifiedConf is non-null but conf is null");
+         }
+         // Notify listeners of the changed value
+         for (Entry<String, String> entry : modifiedConf.entrySet()) {
+           String key = entry.getKey();
+           // curr value becomes old and vice-versa
+           String currVal = entry.getValue();
+           String oldVal = conf.get(key);
+           if (!Objects.equals(oldVal, currVal)) {
+             notifyMetaListeners(key, oldVal, currVal);
+           }
+         }
+         logInfo("Meta listeners shutdown notification completed.");
+       } catch (MetaException e) {
+         LOG.error("Failed to notify meta listeners on shutdown: ", e);
+       }
+     }
+ 
+     static void setThreadLocalIpAddress(String ipAddress) {
+       threadLocalIpAddress.set(ipAddress);
+     }
+ 
+     // This will return null if the metastore is not being accessed from a metastore Thrift server,
+     // or if the TTransport being used to connect is not an instance of TSocket, or if kereberos
+     // is used
+     static String getThreadLocalIpAddress() {
+       return threadLocalIpAddress.get();
+     }
+ 
+     // Make it possible for tests to check that the right type of PartitionExpressionProxy was
+     // instantiated.
+     @VisibleForTesting
+     PartitionExpressionProxy getExpressionProxy() {
+       return expressionProxy;
+     }
+ 
+     /**
+      * Use {@link #getThreadId()} instead.
+      * @return thread id
+      */
+     @Deprecated
+     public static Integer get() {
+       return threadLocalId.get();
+     }
+ 
+     @Override
+     public int getThreadId() {
+       return threadLocalId.get();
+     }
+ 
+     public HMSHandler(String name) throws MetaException {
+       this(name, MetastoreConf.newMetastoreConf(), true);
+     }
+ 
+     public HMSHandler(String name, Configuration conf) throws MetaException {
+       this(name, conf, true);
+     }
+ 
+     public HMSHandler(String name, Configuration conf, boolean init) throws MetaException {
+       super(name);
+       this.conf = conf;
+       isInTest = MetastoreConf.getBoolVar(this.conf, ConfVars.HIVE_IN_TEST);
+       if (threadPool == null) {
+         synchronized (HMSHandler.class) {
+           int numThreads = MetastoreConf.getIntVar(conf, ConfVars.FS_HANDLER_THREADS_COUNT);
+           threadPool = Executors.newFixedThreadPool(numThreads,
+               new ThreadFactoryBuilder().setDaemon(true)
+                   .setNameFormat("HMSHandler #%d").build());
+         }
+       }
+       if (init) {
+         init();
+       }
+     }
+ 
+     /**
+      * Use {@link #getConf()} instead.
+      * @return Configuration object
+      */
+     @Deprecated
+     public Configuration getHiveConf() {
+       return conf;
+     }
+ 
+     private ClassLoader classLoader;
+     private AlterHandler alterHandler;
+     private List<MetaStorePreEventListener> preListeners;
+     private List<MetaStoreEventListener> listeners;
+     private List<TransactionalMetaStoreEventListener> transactionalListeners;
+     private List<MetaStoreEndFunctionListener> endFunctionListeners;
+     private List<MetaStoreInitListener> initListeners;
+     private Pattern partitionValidationPattern;
+     private final boolean isInTest;
+ 
+     {
+       classLoader = Thread.currentThread().getContextClassLoader();
+       if (classLoader == null) {
+         classLoader = Configuration.class.getClassLoader();
+       }
+     }
+ 
+     @Override
+     public List<TransactionalMetaStoreEventListener> getTransactionalListeners() {
+       return transactionalListeners;
+     }
+ 
+     @Override
+     public List<MetaStoreEventListener> getListeners() {
+       return listeners;
+     }
+ 
+     @Override
+     public void init() throws MetaException {
+       initListeners = MetaStoreUtils.getMetaStoreListeners(
+           MetaStoreInitListener.class, conf, MetastoreConf.getVar(conf, ConfVars.INIT_HOOKS));
+       for (MetaStoreInitListener singleInitListener: initListeners) {
+           MetaStoreInitContext context = new MetaStoreInitContext();
+           singleInitListener.onInit(context);
+       }
+ 
+       String alterHandlerName = MetastoreConf.getVar(conf, ConfVars.ALTER_HANDLER);
+       alterHandler = ReflectionUtils.newInstance(JavaUtils.getClass(
+           alterHandlerName, AlterHandler.class), conf);
+       wh = new Warehouse(conf);
+ 
+       synchronized (HMSHandler.class) {
+         if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(conf))) {
+           createDefaultDB();
+           createDefaultRoles();
+           addAdminUsers();
+           currentUrl = MetaStoreInit.getConnectionURL(conf);
+         }
+       }
+ 
+       //Start Metrics
+       if (MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) {
+         LOG.info("Begin calculating metadata count metrics.");
+         Metrics.initialize(conf);
+         databaseCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_DATABASES);
+         tableCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES);
+         partCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS);
+         updateMetrics();
+ 
+       }
+ 
+       preListeners = MetaStoreUtils.getMetaStoreListeners(MetaStorePreEventListener.class,
+           conf, MetastoreConf.getVar(conf, ConfVars.PRE_EVENT_LISTENERS));
+       preListeners.add(0, new TransactionalValidationListener(conf));
+       listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, conf,
+           MetastoreConf.getVar(conf, ConfVars.EVENT_LISTENERS));
+       listeners.add(new SessionPropertiesListener(conf));
+       listeners.add(new AcidEventListener(conf));
+       transactionalListeners = MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class,
+           conf, MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS));
+       if (Metrics.getRegistry() != null) {
+         listeners.add(new HMSMetricsListener(conf));
+       }
+ 
+       endFunctionListeners = MetaStoreUtils.getMetaStoreListeners(
+           MetaStoreEndFunctionListener.class, conf, MetastoreConf.getVar(conf, ConfVars.END_FUNCTION_LISTENERS));
+ 
+       String partitionValidationRegex =
+           MetastoreConf.getVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN);
+       if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
+         partitionValidationPattern = Pattern.compile(partitionValidationRegex);
+       } else {
+         partitionValidationPattern = null;
+       }
+ 
+       // We only initialize once the tasks that need to be run periodically
+       if (alwaysThreadsInitialized.compareAndSet(false, true)) {
+         ThreadPool.initialize(conf);
+         Collection<String> taskNames =
+             MetastoreConf.getStringCollection(conf, ConfVars.TASK_THREADS_ALWAYS);
+         for (String taskName : taskNames) {
+           MetastoreTaskThread task =
+               JavaUtils.newInstance(JavaUtils.getClass(taskName, MetastoreTaskThread.class));
+           task.setConf(conf);
+           long freq = task.runFrequency(TimeUnit.MILLISECONDS);
+           // For backwards compatibility, since some threads used to be hard coded but only run if
+           // frequency was > 0
+           if (freq > 0) {
+             ThreadPool.getPool().scheduleAtFixedRate(task, freq, freq, TimeUnit.MILLISECONDS);
+           }
+         }
+       }
+       expressionProxy = PartFilterExprUtil.createExpressionProxy(conf);
+       fileMetadataManager = new FileMetadataManager(this.getMS(), conf);
+     }
+ 
+     private static String addPrefix(String s) {
+       return threadLocalId.get() + ": " + s;
+     }
+ 
+     /**
+      * Set copy of invoking HMSHandler on thread local
+      */
+     private static void setHMSHandler(HMSHandler handler) {
+       if (threadLocalHMSHandler.get() == null) {
+         threadLocalHMSHandler.set(handler);
+       }
+     }
+     @Override
+     public void setConf(Configuration conf) {
+       threadLocalConf.set(conf);
+       RawStore ms = threadLocalMS.get();
+       if (ms != null) {
+         ms.setConf(conf); // reload if DS related configuration is changed
+       }
+     }
+ 
+     @Override
+     public Configuration getConf() {
+       Configuration conf = threadLocalConf.get();
+       if (conf == null) {
+         conf = new Configuration(this.conf);
+         threadLocalConf.set(conf);
+       }
+       return conf;
+     }
+ 
+     private Map<String, String> getModifiedConf() {
+       Map<String, String> modifiedConf = threadLocalModifiedConfig.get();
+       if (modifiedConf == null) {
+         modifiedConf = new HashMap<>();
+         threadLocalModifiedConfig.set(modifiedConf);
+       }
+       return modifiedConf;
+     }
+ 
+     @Override
+     public Warehouse getWh() {
+       return wh;
+     }
+ 
+     @Override
+     public void setMetaConf(String key, String value) throws MetaException {
+       ConfVars confVar = MetastoreConf.getMetaConf(key);
+       if (confVar == null) {
+         throw new MetaException("Invalid configuration key " + key);
+       }
+       try {
+         confVar.validate(value);
+       } catch (IllegalArgumentException e) {
+         throw new MetaException("Invalid configuration value " + value + " for key " + key +
+             " by " + e.getMessage());
+       }
+       Configuration configuration = getConf();
+       String oldValue = MetastoreConf.get(configuration, key);
+       // Save prev val of the key on threadLocal
+       Map<String, String> modifiedConf = getModifiedConf();
+       if (!modifiedConf.containsKey(key)) {
+         modifiedConf.put(key, oldValue);
+       }
+       // Set invoking HMSHandler on threadLocal, this will be used later to notify
+       // metaListeners in HiveMetaStore#cleanupRawStore
+       setHMSHandler(this);
+       configuration.set(key, value);
+       notifyMetaListeners(key, oldValue, value);
+ 
+       if (ConfVars.TRY_DIRECT_SQL == confVar) {
+         HMSHandler.LOG.info("Direct SQL optimization = {}",  value);
+       }
+     }
+ 
+     @Override
+     public String getMetaConf(String key) throws MetaException {
+       ConfVars confVar = MetastoreConf.getMetaConf(key);
+       if (confVar == null) {
+         throw new MetaException("Invalid configuration key " + key);
+       }
+       return getConf().get(key, confVar.getDefaultVal().toString());
+     }
+ 
+     /**
+      * Get a cached RawStore.
+      *
+      * @return the cached RawStore
+      * @throws MetaException
+      */
+     @Override
+     public RawStore getMS() throws MetaException {
+       Configuration conf = getConf();
+       return getMSForConf(conf);
+     }
+ 
+     public static RawStore getMSForConf(Configuration conf) throws MetaException {
+       RawStore ms = threadLocalMS.get();
+       if (ms == null) {
+         ms = newRawStoreForConf(conf);
+         ms.verifySchema();
+         threadLocalMS.set(ms);
+         ms = threadLocalMS.get();
+       }
+       return ms;
+     }
+ 
+     @Override
+     public TxnStore getTxnHandler() {
++      return getMsThreadTxnHandler(conf);
++    }
++
++    public static TxnStore getMsThreadTxnHandler(Configuration conf) {
+       TxnStore txn = threadLocalTxn.get();
+       if (txn == null) {
+         txn = TxnUtils.getTxnStore(conf);
+         threadLocalTxn.set(txn);
+       }
+       return txn;
+     }
+ 
+     static RawStore newRawStoreForConf(Configuration conf) throws MetaException {
+       Configuration newConf = new Configuration(conf);
+       String rawStoreClassName = MetastoreConf.getVar(newConf, ConfVars.RAW_STORE_IMPL);
+       LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName));
+       return RawStoreProxy.getProxy(newConf, conf, rawStoreClassName, threadLocalId.get());
+     }
+ 
+     @VisibleForTesting
+     public static void createDefaultCatalog(RawStore ms, Warehouse wh) throws MetaException,
+         InvalidOperationException {
+       try {
+         Catalog defaultCat = ms.getCatalog(DEFAULT_CATALOG_NAME);
+         // Null check because in some test cases we get a null from ms.getCatalog.
+         if (defaultCat !=null && defaultCat.getLocationUri().equals("TBD")) {
+           // One time update issue.  When the new 'hive' catalog is created in an upgrade the
+           // script does not know the location of the warehouse.  So we need to update it.
+           LOG.info("Setting location of default catalog, as it hasn't been done after upgrade");
+           defaultCat.setLocationUri(wh.getWhRoot().toString());
+           ms.alterCatalog(defaultCat.getName(), defaultCat);
+         }
+ 
+       } catch (NoSuchObjectException e) {
+         Catalog cat = new Catalog(DEFAULT_CATALOG_NAME, wh.getWhRoot().toString());
+         cat.setDescription(Warehouse.DEFAULT_CATALOG_COMMENT);
+         ms.createCatalog(cat);
+       }
+     }
+ 
+     private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException {
+       try {
+         ms.getDatabase(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME);
+       } catch (NoSuchObjectException e) {
+         Database db = new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT,
+           wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null);
+         db.setOwnerName(PUBLIC);
+         db.setOwnerType(PrincipalType.ROLE);
+         db.setCatalogName(DEFAULT_CATALOG_NAME);
+         ms.createDatabase(db);
+       }
+     }
+ 
+     /**
+      * create default database if it doesn't exist.
+      *
+      * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+      * Server try to concurrently invoke createDefaultDB. If one failed, JDOException was caught
+      * for one more time try, if failed again, simply ignored by warning, which meant another
+      * succeeds.
+      *
+      * @throws MetaException
+      */
+     private void createDefaultDB() throws MetaException {
+       try {
+         RawStore ms = getMS();
+         createDefaultCatalog(ms, wh);
+         createDefaultDB_core(ms);
+       } catch (JDOException e) {
+         LOG.warn("Retrying creating default database after error: " + e.getMessage(), e);
+         try {
+           createDefaultDB_core(getMS());
+         } catch (InvalidObjectException e1) {
+           throw new MetaException(e1.getMessage());
+         }
+       } catch (InvalidObjectException|InvalidOperationException e) {
+         throw new MetaException(e.getMessage());
+       }
+     }
+ 
+     /**
+      * create default roles if they don't exist.
+      *
+      * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+      * Server try to concurrently invoke createDefaultRoles. If one failed, JDOException was caught
+      * for one more time try, if failed again, simply ignored by warning, which meant another
+      * succeeds.
+      *
+      * @throws MetaException
+      */
+     private void createDefaultRoles() throws MetaException {
+       try {
+         createDefaultRoles_core();
+       } catch (JDOException e) {
+         LOG.warn("Retrying creating default roles after error: " + e.getMessage(), e);
+         createDefaultRoles_core();
+       }
+     }
+ 
+     private void createDefaultRoles_core() throws MetaException {
+ 
+       RawStore ms = getMS();
+       try {
+         ms.addRole(ADMIN, ADMIN);
+       } catch (InvalidObjectException e) {
+         LOG.debug(ADMIN +" role already exists",e);
+       } catch (NoSuchObjectException e) {
+         // This should never be thrown.
+         LOG.warn("Unexpected exception while adding " +ADMIN+" roles" , e);
+       }
+       LOG.info("Added "+ ADMIN+ " role in metastore");
+       try {
+         ms.addRole(PUBLIC, PUBLIC);
+       } catch (InvalidObjectException e) {
+         LOG.debug(PUBLIC + " role already exists",e);
+       } catch (NoSuchObjectException e) {
+         // This should never be thrown.
+         LOG.warn("Unexpected exception while adding "+PUBLIC +" roles" , e);
+       }
+       LOG.info("Added "+PUBLIC+ " role in metastore");
+       // now grant all privs to admin
+       PrivilegeBag privs = new PrivilegeBag();
+       privs.addToPrivileges(new HiveObjectPrivilege( new HiveObjectRef(HiveObjectType.GLOBAL, null,
+         null, null, null), ADMIN, PrincipalType.ROLE, new PrivilegeGrantInfo("All", 0, ADMIN,
+           PrincipalType.ROLE, true), "SQL"));
+       try {
+         ms.grantPrivileges(privs);
+       } catch (InvalidObjectException e) {
+         // Surprisingly these privs are already granted.
+         LOG.debug("Failed while granting global privs to admin", e);
+       } catch (NoSuchObjectException e) {
+         // Unlikely to be thrown.
+         LOG.warn("Failed while granting global privs to admin", e);
+       }
+     }
+ 
+     /**
+      * add admin users if they don't exist.
+      *
+      * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+      * Server try to concurrently invoke addAdminUsers. If one failed, JDOException was caught for
+      * one more time try, if failed again, simply ignored by warning, which meant another succeeds.
+      *
+      * @throws MetaException
+      */
+     private void addAdminUsers() throws MetaException {
+       try {
+         addAdminUsers_core();
+       } catch (JDOException e) {
+         LOG.warn("Retrying adding admin users after error: " + e.getMessage(), e);
+         addAdminUsers_core();
+       }
+     }
+ 
+     private void addAdminUsers_core() throws MetaException {
+ 
+       // now add pre-configured users to admin role
+       String userStr = MetastoreConf.getVar(conf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim();
+       if (userStr.isEmpty()) {
+         LOG.info("No user is added in admin role, since config is empty");
+         return;
+       }
+       // Since user names need to be valid unix user names, per IEEE Std 1003.1-2001 they cannot
+       // contain comma, so we can safely split above string on comma.
+ 
+      Iterator<String> users = Splitter.on(",").trimResults().omitEmptyStrings().split(userStr).iterator();
+       if (!users.hasNext()) {
+         LOG.info("No user is added in admin role, since config value "+ userStr +
+           " is in incorrect format. We accept comma separated list of users.");
+         return;
+       }
+       Role adminRole;
+       RawStore ms = getMS();
+       try {
+         adminRole = ms.getRole(ADMIN);
+       } catch (NoSuchObjectException e) {
+         LOG.error("Failed to retrieve just added admin role",e);
+         return;
+       }
+       while (users.hasNext()) {
+         String userName = users.next();
+         try {
+           ms.grantRole(adminRole, userName, PrincipalType.USER, ADMIN, PrincipalType.ROLE, true);
+           LOG.info("Added " + userName + " to admin role");
+         } catch (NoSuchObjectException e) {
+           LOG.error("Failed to add "+ userName + " in admin role",e);
+         } catch (InvalidObjectException e) {
+           LOG.debug(userName + " already in admin role", e);
+         }
+       }
+     }
+ 
+     private static void logInfo(String m) {
+       LOG.info(threadLocalId.get().toString() + ": " + m);
+       logAuditEvent(m);
+     }
+ 
+     private String startFunction(String function, String extraLogInfo) {
+       incrementCounter(function);
+       logInfo((getThreadLocalIpAddress() == null ? "" : "source:" + getThreadLocalIpAddress() + " ") +
+           function + extraLogInfo);
+       com.codahale.metrics.Timer timer =
+           Metrics.getOrCreateTimer(MetricsConstants.API_PREFIX + function);
+       if (timer != null) {
+         // Timer will be null we aren't using the metrics
+         timerContexts.get().put(function, timer.time());
+       }
+       Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
+       if (counter != null) {
+         counter.inc();
+       }
+       return function;
+     }
+ 
+     private String startFunction(String function) {
+       return startFunction(function, "");
+     }
+ 
+     private void startTableFunction(String function, String catName, String db, String tbl) {
+       startFunction(function, " : tbl=" +
+           TableName.getQualified(catName, db, tbl));
+     }
+ 
+     private void startMultiTableFunction(String function, String db, List<String> tbls) {
+       String tableNames = join(tbls, ",");
+       startFunction(function, " : db=" + db + " tbls=" + tableNames);
+     }
+ 
+     private void startPartitionFunction(String function, String cat, String db, String tbl,
+                                         List<String> partVals) {
+       startFunction(function, " : tbl=" +
+           TableName.getQualified(cat, db, tbl) + "[" + join(partVals, ",") + "]");
+     }
+ 
+     private void startPartitionFunction(String function, String catName, String db, String tbl,
+                                         Map<String, String> partName) {
+       startFunction(function, " : tbl=" +
+           TableName.getQualified(catName, db, tbl) + "partition=" + partName);
+     }
+ 
+     private void endFunction(String function, boolean successful, Exception e) {
+       endFunction(function, successful, e, null);
+     }
+     private void endFunction(String function, boolean successful, Exception e,
+                             String inputTableName) {
+       endFunction(function, new MetaStoreEndFunctionContext(successful, e, inputTableName));
+     }
+ 
+     private void endFunction(String function, MetaStoreEndFunctionContext context) {
+       com.codahale.metrics.Timer.Context timerContext = timerContexts.get().remove(function);
+       if (timerContext != null) {
+         timerContext.close();
+       }
+       Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
+       if (counter != null) {
+         counter.dec();
+       }
+ 
+       for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+         listener.onEndFunction(function, context);
+       }
+     }
+ 
+     @Override
+     public fb_status getStatus() {
+       return fb_status.ALIVE;
+     }
+ 
+     @Override
+     public void shutdown() {
+       cleanupRawStore();
+       PerfLogger.getPerfLogger(false).cleanupPerfLogMetrics();
+     }
+ 
+     @Override
+     public AbstractMap<String, Long> getCounters() {
+       AbstractMap<String, Long> counters = super.getCounters();
+ 
+       // Allow endFunctionListeners to add any counters they have collected
+       if (endFunctionListeners != null) {
+         for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+           listener.exportCounters(counters);
+         }
+       }
+ 
+       return counters;
+     }
+ 
+     @Override
+     public void create_catalog(CreateCatalogRequest rqst)
+         throws AlreadyExistsException, InvalidObjectException, MetaException {
+       Catalog catalog = rqst.getCatalog();
+       startFunction("create_catalog", ": " + catalog.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         try {
+           getMS().getCatalog(catalog.getName());
+           throw new AlreadyExistsException("Catalog " + catalog.getName() + " already exists");
+         } catch (NoSuchObjectException e) {
+           // expected
+         }
+ 
+         if (!MetaStoreUtils.validateName(catalog.getName(), null)) {
+           throw new InvalidObjectException(catalog.getName() + " is not a valid catalog name");
+         }
+ 
+         if (catalog.getLocationUri() == null) {
+           throw new InvalidObjectException("You must specify a path for the catalog");
+         }
+ 
+         RawStore ms = getMS();
+         Path catPath = new Path(catalog.getLocationUri());
+         boolean madeDir = false;
+         Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+         try {
+           firePreEvent(new PreCreateCatalogEvent(this, catalog));
+           if (!wh.isDir(catPath)) {
+             if (!wh.mkdirs(catPath)) {
+               throw new MetaException("Unable to create catalog path " + catPath +
+                   ", failed to create catalog " + catalog.getName());
+             }
+             madeDir = true;
+           }
+ 
+           ms.openTransaction();
+           ms.createCatalog(catalog);
+ 
+           // Create a default database inside the catalog
+           Database db = new Database(DEFAULT_DATABASE_NAME, "Default database for catalog " +
+                            catalog.getName(), catalog.getLocationUri(), Collections.emptyMap());
+           db.setCatalogName(catalog.getName());
+           create_database_core(ms, db);
+ 
+           if (!transactionalListeners.isEmpty()) {
+             transactionalListenersResponses =
+                 MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                     EventType.CREATE_CATALOG,
+                     new CreateCatalogEvent(true, this, catalog));
+           }
+ 
+           success = ms.commitTransaction();
+         } finally {
+           if (!success) {
+             ms.rollbackTransaction();
+             if (madeDir) {
+               wh.deleteDir(catPath, true, false, false);
+             }
+           }
+ 
+           if (!listeners.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners,
+                 EventType.CREATE_CATALOG,
+                 new CreateCatalogEvent(success, this, catalog),
+                 null,
+                 transactionalListenersResponses, ms);
+           }
+         }
+         success = true;
+       } catch (AlreadyExistsException|InvalidObjectException|MetaException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("create_catalog", success, ex);
+       }
+     }
+ 
+     @Override
+     public void alter_catalog(AlterCatalogRequest rqst) throws TException {
+       startFunction("alter_catalog " + rqst.getName());
+       boolean success = false;
+       Exception ex = null;
+       RawStore ms = getMS();
+       Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+       GetCatalogResponse oldCat = null;
+ 
+       try {
+         oldCat = get_catalog(new GetCatalogRequest(rqst.getName()));
+         // Above should have thrown NoSuchObjectException if there is no such catalog
+         assert oldCat != null && oldCat.getCatalog() != null;
+         firePreEvent(new PreAlterCatalogEvent(oldCat.getCatalog(), rqst.getNewCat(), this));
+ 
+         ms.openTransaction();
+         ms.alterCatalog(rqst.getName(), rqst.getNewCat());
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenersResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventType.ALTER_CATALOG,
+                   new AlterCatalogEvent(oldCat.getCatalog(), rqst.getNewCat(), true, this));
+         }
+ 
+         success = ms.commitTransaction();
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         }
+ 
+         if ((null != oldCat) && (!listeners.isEmpty())) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+               EventType.ALTER_CATALOG,
+               new AlterCatalogEvent(oldCat.getCatalog(), rqst.getNewCat(), success, this),
+               null, transactionalListenersResponses, ms);
+         }
+         endFunction("alter_catalog", success, ex);
+       }
+ 
+     }
+ 
+     @Override
+     public GetCatalogResponse get_catalog(GetCatalogRequest rqst)
+         throws NoSuchObjectException, TException {
+       String catName = rqst.getName();
+       startFunction("get_catalog", ": " + catName);
+       Catalog cat = null;
+       Exception ex = null;
+       try {
+         cat = getMS().getCatalog(catName);
+         firePreEvent(new PreReadCatalogEvent(this, cat));
+         return new GetCatalogResponse(cat);
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("get_database", cat != null, ex);
+       }
+     }
+ 
+     @Override
+     public GetCatalogsResponse get_catalogs() throws MetaException {
+       startFunction("get_catalogs");
+ 
+       List<String> ret = null;
+       Exception ex = null;
+       try {
+         ret = getMS().getCatalogs();
+       } catch (MetaException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("get_catalog", ret != null, ex);
+       }
+       return new GetCatalogsResponse(ret == null ? Collections.emptyList() : ret);
+ 
+     }
+ 
+     @Override
+     public void drop_catalog(DropCatalogRequest rqst)
+         throws NoSuchObjectException, InvalidOperationException, MetaException {
+       String catName = rqst.getName();
+       startFunction("drop_catalog", ": " + catName);
+       if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(catName)) {
+         endFunction("drop_catalog", false, null);
+         throw new MetaException("Can not drop " + DEFAULT_CATALOG_NAME + " catalog");
+       }
+ 
+       boolean success = false;
+       Exception ex = null;
+       try {
+         dropCatalogCore(catName);
+         success = true;
+       } catch (NoSuchObjectException|InvalidOperationException|MetaException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("drop_catalog", success, ex);
+       }
+ 
+     }
+ 
+     private void dropCatalogCore(String catName)
+         throws MetaException, NoSuchObjectException, InvalidOperationException {
+       boolean success = false;
+       Catalog cat = null;
+       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+       RawStore ms = getMS();
+       try {
+         ms.openTransaction();
+         cat = ms.getCatalog(catName);
+ 
+         firePreEvent(new PreDropCatalogEvent(this, cat));
+ 
+         List<String> allDbs = get_databases(prependNotNullCatToDbName(catName, null));
+         if (allDbs != null && !allDbs.isEmpty()) {
+           // It might just be the default, in which case we can drop that one if it's empty
+           if (allDbs.size() == 1 && allDbs.get(0).equals(DEFAULT_DATABASE_NAME)) {
+             try {
+               drop_database_core(ms, catName, DEFAULT_DATABASE_NAME, true, false);
+             } catch (InvalidOperationException e) {
+               // This means there are tables of something in the database
+               throw new InvalidOperationException("There are still objects in the default " +
+                   "database for catalog " + catName);
+             } catch (InvalidObjectException|IOException|InvalidInputException e) {
+               MetaException me = new MetaException("Error attempt to drop default database for " +
+                   "catalog " + catName);
+               me.initCause(e);
+               throw me;
+             }
+           } else {
+             throw new InvalidOperationException("There are non-default databases in the catalog " +
+                 catName + " so it cannot be dropped.");
+           }
+         }
+ 
+         ms.dropCatalog(catName) ;
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenerResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventType.DROP_CATALOG,
+                   new DropCatalogEvent(true, this, cat));
+         }
+ 
+         success = ms.commitTransaction();
+       } finally {
+         if (success) {
+           wh.deleteDir(wh.getDnsPath(new Path(cat.getLocationUri())), false, false, false);
+         } else {
+           ms.rollbackTransaction();
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+               EventType.DROP_CATALOG,
+               new DropCatalogEvent(success, this, cat),
+               null,
+               transactionalListenerResponses, ms);
+         }
+       }
+     }
+ 
+ 
+     // Assumes that the catalog has already been set.
+     private void create_database_core(RawStore ms, final Database db)
+         throws AlreadyExistsException, InvalidObjectException, MetaException {
+       if (!MetaStoreUtils.validateName(db.getName(), null)) {
+         throw new InvalidObjectException(db.getName() + " is not a valid database name");
+       }
+ 
+       Catalog cat = null;
+       try {
+         cat = getMS().getCatalog(db.getCatalogName());
+       } catch (NoSuchObjectException e) {
+         LOG.error("No such catalog " + db.getCatalogName());
+         throw new InvalidObjectException("No such catalog " + db.getCatalogName());
+       }
+       Path dbPath = wh.determineDatabasePath(cat, db);
+       db.setLocationUri(dbPath.toString());
+ 
+       boolean success = false;
+       boolean madeDir = false;
+       Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+       try {
+         firePreEvent(new PreCreateDatabaseEvent(db, this));
+         if (!wh.isDir(dbPath)) {
+           LOG.debug("Creating database path " + dbPath);
+           if (!wh.mkdirs(dbPath)) {
+             throw new MetaException("Unable to create database path " + dbPath +
+                 ", failed to create database " + db.getName());
+           }
+           madeDir = true;
+         }
+ 
+         ms.openTransaction();
+         ms.createDatabase(db);
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenersResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                     EventType.CREATE_DATABASE,
+                                                     new CreateDatabaseEvent(db, true, this));
+         }
+ 
+         success = ms.commitTransaction();
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+           if (madeDir) {
+             wh.deleteDir(dbPath, true, db);
+           }
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+                                                 EventType.CREATE_DATABASE,
+                                                 new CreateDatabaseEvent(db, success, this),
+                                                 null,
+                                                 transactionalListenersResponses, ms);
+         }
+       }
+     }
+ 
+     @Override
+     public void create_database(final Database db)
+         throws AlreadyExistsException, InvalidObjectException, MetaException {
+       startFunction("create_database", ": " + db.toString());
+       boolean success = false;
+       Exception ex = null;
+       if (!db.isSetCatalogName()) {
+         db.setCatalogName(getDefaultCatalog(conf));
+       }
+       try {
+         try {
+           if (null != get_database_core(db.getCatalogName(), db.getName())) {
+             throw new AlreadyExistsException("Database " + db.getName() + " already exists");
+           }
+         } catch (NoSuchObjectException e) {
+           // expected
+         }
+ 
+         if (TEST_TIMEOUT_ENABLED) {
+           try {
+             Thread.sleep(TEST_TIMEOUT_VALUE);
+           } catch (InterruptedException e) {
+             // do nothing
+           }
+           Deadline.checkTimeout();
+         }
+         create_database_core(getMS(), db);
+         success = true;
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_database", success, ex);
+       }
+     }
+ 
+     @Override
+     public Database get_database(final String name) throws NoSuchObjectException, MetaException {
+       startFunction("get_database", ": " + name);
+       Database db = null;
+       Exception ex = null;
+       try {
+         String[] parsedDbName = parseDbName(name, conf);
+         db = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]);
+         firePreEvent(new PreReadDatabaseEvent(db, this));
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("get_database", db != null, ex);
+       }
+       return db;
+     }
+ 
+     @Override
+     public Database get_database_core(String catName, final String name) throws NoSuchObjectException, MetaException {
+       Database db = null;
+       if (name == null) {
+         throw new MetaException("Database name cannot be null.");
+       }
+       try {
+         db = getMS().getDatabase(catName, name);
+       } catch (MetaException | NoSuchObjectException e) {
+         throw e;
+       } catch (Exception e) {
+         assert (e instanceof RuntimeException);
+         throw (RuntimeException) e;
+       }
+       return db;
+     }
+ 
+     @Override
+     public void alter_database(final String dbName, final Database newDB) throws TException {
+       startFunction("alter_database " + dbName);
+       boolean success = false;
+       Exception ex = null;
+       RawStore ms = getMS();
+       Database oldDB = null;
+       Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+ 
+       // Perform the same URI normalization as create_database_core.
+       if (newDB.getLocationUri() != null) {
+         newDB.setLocationUri(wh.getDnsPath(new Path(newDB.getLocationUri())).toString());
+       }
+ 
+       String[] parsedDbName = parseDbName(dbName, conf);
+ 
+       try {
+         oldDB = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]);
+         if (oldDB == null) {
+           throw new MetaException("Could not alter database \"" + parsedDbName[DB_NAME] +
+               "\". Could not retrieve old definition.");
+         }
+         firePreEvent(new PreAlterDatabaseEvent(oldDB, newDB, this));
+ 
+         ms.openTransaction();
+         ms.alterDatabase(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], newDB);
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenersResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventType.ALTER_DATABASE,
+                   new AlterDatabaseEvent(oldDB, newDB, true, this));
+         }
+ 
+         success = ms.commitTransaction();
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         }
+ 
+         if ((null != oldDB) && (!listeners.isEmpty())) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+               EventType.ALTER_DATABASE,
+               new AlterDatabaseEvent(oldDB, newDB, success, this),
+               null,
+               transactionalListenersResponses, ms);
+         }
+         endFunction("alter_database", success, ex);
+       }
+     }
+ 
+     private void drop_database_core(RawStore ms, String catName,
+         final String name, final boolean deleteData, final boolean cascade)
+         throws NoSuchObjectException, InvalidOperationException, MetaException,
+         IOException, InvalidObjectException, InvalidInputException {
+       boolean success = false;
+       Database db = null;
+       List<Path> tablePaths = new ArrayList<>();
+       List<Path> partitionPaths = new ArrayList<>();
+       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+       if (name == null) {
+         throw new MetaException("Database name cannot be null.");
+       }
+       try {
+         ms.openTransaction();
+         db = ms.getDatabase(catName, name);
+ 
+         if (!isInTest && ReplChangeManager.isSourceOfReplication(db)) {
+           throw new InvalidOperationException("can not drop a database which is a source of replication");
+         }
+ 
+         firePreEvent(new PreDropDatabaseEvent(db, this));
+         String catPrependedName = MetaStoreUtils.prependCatalogToDbName(catName, name, conf);
+ 
+         Set<String> uniqueTableNames = new HashSet<>(get_all_tables(catPrependedName));
+         List<String> allFunctions = get_functions(catPrependedName, "*");
+ 
+         if (!cascade) {
+           if (!uniqueTableNames.isEmpty()) {
+             throw new InvalidOperationException(
+                 "Database " + db.getName() + " is not empty. One or more tables exist.");
+           }
+           if (!allFunctions.isEmpty()) {
+             throw new InvalidOperationException(
+                 "Database " + db.getName() + " is not empty. One or more functions exist.");
+           }
+         }
+         Path path = new Path(db.getLocationUri()).getParent();
+         if (!wh.isWritable(path)) {
+           throw new MetaException("Database not dropped since " +
+               path + " is not writable by " +
+               SecurityUtils.getUser());
+         }
+ 
+         Path databasePath = wh.getDnsPath(wh.getDatabasePath(db));
+ 
+         // drop any functions before dropping db
+         for (String funcName : allFunctions) {
+           drop_function(catPrependedName, funcName);
+         }
+ 
+         final int tableBatchSize = MetastoreConf.getIntVar(conf,
+             ConfVars.BATCH_RETRIEVE_MAX);
+ 
+         // First pass will drop the materialized views
+         List<String> materializedViewNames = get_tables_by_type(name, ".*", TableType.MATERIALIZED_VIEW.toString());
+         int startIndex = 0;
+         // retrieve the tables from the metastore in batches to alleviate memory constraints
+         while (startIndex < materializedViewNames.size()) {
+           int endIndex = Math.min(startIndex + tableBatchSize, materializedViewNames.size());
+ 
+           List<Table> materializedViews;
+           try {
+             materializedViews = ms.getTableObjectsByName(catName, name, materializedViewNames.subList(startIndex, endIndex));
+           } catch (UnknownDBException e) {
+             throw new MetaException(e.getMessage());
+           }
+ 
+           if (materializedViews != null && !materializedViews.isEmpty()) {
+             for (Table materializedView : materializedViews) {
+               if (materializedView.getSd().getLocation() != null) {
+                 Path materializedViewPath = wh.getDnsPath(new Path(materializedView.getSd().getLocation()));
+                 if (!wh.isWritable(materializedViewPath.getParent())) {
+                   throw new MetaException("Database metadata not deleted since table: " +
+                       materializedView.getTableName() + " has a parent location " + materializedViewPath.getParent() +
+                       " which is not writable by " + SecurityUtils.getUser());
+                 }
+ 
+                 if (!FileUtils.isSubdirectory(databasePath.toString(),
+                     materializedViewPath.toString())) {
+                   tablePaths.add(materializedViewPath);
+                 }
+               }
+               // Drop the materialized view but not its data
+               drop_table(name, materializedView.getTableName(), false);
+               // Remove from all tables
+               uniqueTableNames.remove(materializedView.getTableName());
+             }
+           }
+           startIndex = endIndex;
+         }
+ 
+         // drop tables before dropping db
+         List<String> allTables = new ArrayList<>(uniqueTableNames);
+         startIndex = 0;
+         // retrieve the tables from the metastore in batches to alleviate memory constraints
+         while (startIndex < allTables.size()) {
+           int endIndex = Math.min(startIndex + tableBatchSize, allTables.size());
+ 
+           List<Table> tables;
+           try {
+             tables = ms.getTableObjectsByName(catName, name, allTables.subList(startIndex, endIndex));
+           } catch (UnknownDBException e) {
+             throw new MetaException(e.getMessage());
+           }
+ 
+           if (tables != null && !tables.isEmpty()) {
+             for (Table table : tables) {
+ 
+               // If the table is not external and it might not be in a subdirectory of the database
+               // add it's locations to the list of paths to delete
+               Path tablePath = null;
+               boolean tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(table, deleteData);
+               if (table.getSd().getLocation() != null && tableDataShouldBeDeleted) {
+                 tablePath = wh.getDnsPath(new Path(table.getSd().getLocation()));
+                 if (!wh.isWritable(tablePath.getParent())) {
+                   throw new MetaException("Database metadata not deleted since table: " +
+                       table.getTableName() + " has a parent location " + tablePath.getParent() +
+                       " which is not writable by " + SecurityUtils.getUser());
+                 }
+ 
+                 if (!FileUtils.isSubdirectory(databasePath.toString(), tablePath.toString())) {
+                   tablePaths.add(tablePath);
+                 }
+               }
+ 
+               // For each partition in each table, drop the partitions and get a list of
+               // partitions' locations which might need to be deleted
+               partitionPaths = dropPartitionsAndGetLocations(ms, catName, name, table.getTableName(),
+                   tablePath, tableDataShouldBeDeleted);
+ 
+               // Drop the table but not its data
+               drop_table(MetaStoreUtils.prependCatalogToDbName(table.getCatName(), table.getDbName(), conf),
+                   table.getTableName(), false);
+             }
+ 
+             startIndex = endIndex;
+           }
+         }
+ 
+         if (ms.dropDatabase(catName, name)) {
+           if (!transactionalListeners.isEmpty()) {
+             transactionalListenerResponses =
+                 MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                       EventType.DROP_DATABASE,
+                                                       new DropDatabaseEvent(db, true, this));
+           }
+ 
+           success = ms.commitTransaction();
+         }
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         } else if (deleteData) {
+           // Delete the data in the partitions which have other locations
+           deletePartitionData(partitionPaths, false, db);
+           // Delete the data in the tables which have other locations
+           for (Path tablePath : tablePaths) {
+             deleteTableData(tablePath, false, db);
+           }
+           // Delete the data in the database
+           try {
+             wh.deleteDir(new Path(db.getLocationUri()), true, db);
+           } catch (Exception e) {
+             LOG.error("Failed to delete database directory: " + db.getLocationUri() +
+                 " " + e.getMessage());
+           }
+           // it is not a terrible thing even if the data is not deleted
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+                                                 EventType.DROP_DATABASE,
+                                                 new DropDatabaseEvent(db, success, this),
+                                                 null,
+                                                 transactionalListenerResponses, ms);
+         }
+       }
+     }
+ 
+     @Override
+     public void drop_database(final String dbName, final boolean deleteData, final boolean cascade)
+         throws NoSuchObjectException, InvalidOperationException, MetaException {
+       startFunction("drop_database", ": " + dbName);
+       String[] parsedDbName = parseDbName(dbName, conf);
+       if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(parsedDbName[CAT_NAME]) &&
+           DEFAULT_DATABASE_NAME.equalsIgnoreCase(parsedDbName[DB_NAME])) {
+         endFunction("drop_database", false, null);
+         throw new MetaException("Can not drop " + DEFAULT_DATABASE_NAME + " database in catalog "
+             + DEFAULT_CATALOG_NAME);
+       }
+ 
+       boolean success = false;
+       Exception ex = null;
+       try {
+         drop_database_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], deleteData,
+             cascade);
+         success = true;
+       } catch (NoSuchObjectException|InvalidOperationException|MetaException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("drop_database", success, ex);
+       }
+     }
+ 
+ 
+     @Override
+     public List<String> get_databases(final String pattern) throws MetaException {
+       startFunction("get_databases", ": " + pattern);
+ 
+       String[] parsedDbNamed = parseDbName(pattern, conf);
+       List<String> ret = null;
+       Exception ex = null;
+       try {
+         if (parsedDbNamed[DB_NAME] == null) {
+           ret = getMS().getAllDatabases(parsedDbNamed[CAT_NAME]);
+         } else {
+           ret = getMS().getDatabases(parsedDbNamed[CAT_NAME], parsedDbNamed[DB_NAME]);
+         }
+       } catch (MetaException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("get_databases", ret != null, ex);
+       }
+       return ret;
+     }
+ 
+     @Override
+     public List<String> get_all_databases() throws MetaException {
+       return get_databases(MetaStoreUtils.prependCatalogToDbName(null, null, conf));
+     }
+ 
+     private void create_type_core(final RawStore ms, final Type type)
+         throws AlreadyExistsException, MetaException, InvalidObjectException {
+       if (!MetaStoreUtils.validateName(type.getName(), null)) {
+         throw new InvalidObjectException("Invalid type name");
+       }
+ 
+       boolean success = false;
+       try {
+         ms.openTransaction();
+         if (is_type_exists(ms, type.getName())) {
+           throw new AlreadyExistsException("Type " + type.getName() + " already exists");
+         }
+         ms.createType(type);
+         success = ms.commitTransaction();
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         }
+       }
+     }
+ 
+     @Override
+     public boolean create_type(final Type type) throws AlreadyExistsException,
+         MetaException, InvalidObjectException {
+       startFunction("create_type", ": " + type.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         create_type_core(getMS(), type);
+         success = true;
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_type", success, ex);
+       }
+ 
+       return success;
+     }
+ 
+     @Override
+     public Type get_type(final String name) throws MetaException, NoSuchObjectException {
+       startFunction("get_type", ": " + name);
+ 
+       Type ret = null;
+       Exception ex = null;
+       try {
+         ret = getMS().getType(name);
+         if (null == ret) {
+           throw new NoSuchObjectException("Type \"" + name + "\" not found.");
+         }
+       } catch (Exception e) {
+         ex = e;
+         throwMetaException(e);
+       } finally {
+         endFunction("get_type", ret != null, ex);
+       }
+       return ret;
+     }
+ 
+     private boolean is_type_exists(RawStore ms, String typeName)
+         throws MetaException {
+       return (ms.getType(typeName) != null);
+     }
+ 
+     @Override
+     public boolean drop_type(final String name) throws MetaException, NoSuchObjectException {
+       startFunction("drop_type", ": " + name);
+ 
+       boolean success = false;
+       Exception ex = null;
+       try {
+         // TODO:pc validate that there are no types that refer to this
+         success = getMS().dropType(name);
+       } catch (Exception e) {
+         ex = e;
+         throwMetaException(e);
+       } finally {
+         endFunction("drop_type", success, ex);
+       }
+       return success;
+     }
+ 
+     @Override
+     public Map<String, Type> get_type_all(String name) throws MetaException {
+       // TODO Auto-generated method stub
+       startFunction("get_type_all", ": " + name);
+       endFunction("get_type_all", false, null);
+       throw new MetaException("Not yet implemented");
+     }
+ 
+     private void create_table_core(final RawStore ms, final Table tbl,
+         final EnvironmentContext envContext)
+             throws AlreadyExistsException, MetaException,
+             InvalidObjectException, NoSuchObjectException {
+       create_table_core(ms, tbl, envContext, null, null, null, null, null, null);
+     }
+ 
+     private void create_table_core(final RawStore ms, final Table tbl,
+         final EnvironmentContext envContext, List<SQLPrimaryKey> primaryKeys,
+         List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints,
+         List<SQLNotNullConstraint> notNullConstraints, List<SQLDefaultConstraint> defaultConstraints,
+                                    List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, MetaException,
+         InvalidObjectException, NoSuchObjectException {
+       // To preserve backward compatibility throw MetaException in case of null database
+       if (tbl.getDbName() == null) {
+         throw new MetaException("Null database name is not allowed");
+       }
+ 
+       if (!MetaStoreUtils.validateName(tbl.getTableName(), conf)) {
+         throw new InvalidObjectException(tbl.getTableName()
+             + " is not a valid object name");
+       }
+       String validate = MetaStoreUtils.validateTblColumns(tbl.getSd().getCols());
+       if (validate != null) {
+         throw new InvalidObjectException("Invalid column " + validate);
+       }
+       if (tbl.getPartitionKeys() != null) {
+         validate = MetaStoreUtils.validateTblColumns(tbl.getPartitionKeys());
+         if (validate != null) {
+           throw new InvalidObjectException("Invalid partition column " + validate);
+         }
+       }
+       SkewedInfo skew = tbl.getSd().getSkewedInfo();
+       if (skew != null) {
+         validate = MetaStoreUtils.validateSkewedColNames(skew.getSkewedColNames());
+         if (validate != null) {
+           throw new InvalidObjectException("Invalid skew column " + validate);
+         }
+         validate = MetaStoreUtils.validateSkewedColNamesSubsetCol(
+             skew.getSkewedColNames(), tbl.getSd().getCols());
+         if (validate != null) {
+           throw new InvalidObjectException("Invalid skew column " + validate);
+         }
+       }
+ 
+       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+       Path tblPath = null;
+       boolean success = false, madeDir = false;
+       Database db = null;
+       try {
+         if (!tbl.isSetCatName()) {
+           tbl.setCatName(getDefaultCatalog(conf));
+         }
+         firePreEvent(new PreCreateTableEvent(tbl, this));
+ 
+         ms.openTransaction();
+ 
+         db = ms.getDatabase(tbl.getCatName(), tbl.getDbName());
+ 
+         // get_table checks whether database exists, it should be moved here
+         if (is_table_exists(ms, tbl.getCatName(), tbl.getDbName(), tbl.getTableName())) {
+           throw new AlreadyExistsException("Table " + getCatalogQualifiedTableName(tbl)
+               + " already exists");
+         }
+ 
+         if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) {
+           if (tbl.getSd().getLocation() == null
+               || tbl.getSd().getLocation().isEmpty()) {
+             tblPath = wh.getDefaultTablePath(db, tbl);
+           } else {
+             if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) {
+               LOG.warn("Location: " + tbl.getSd().getLocation()
+                   + " specified for non-external table:" + tbl.getTableName());
+             }
+             tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation()));
+           }
+           tbl.getSd().setLocation(tblPath.toString());
+         }
+ 
+         if (tblPath != null) {
+           if (!wh.isDir(tblPath)) {
+             if (!wh.mkdirs(tblPath)) {
+               throw new MetaException(tblPath
+                   + " is not a directory or unable to create one");
+             }
+             madeDir = true;
+           }
+         }
+         if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) &&
+             !MetaStoreUtils.isView(tbl)) {
+           MetaStoreUtils.updateTableStatsSlow(db, tbl, wh, madeDir, false, envContext);
+         }
+ 
+         // set create time
+         long time = System.currentTimeMillis() / 1000;
+         tbl.setCreateTime((int) time);
+         if (tbl.getParameters() == null ||
+             tbl.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
+           tbl.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
+         }
+ 
+         if (primaryKeys == null && foreignKeys == null
+                 && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null
+             && checkConstraints == null) {
+           ms.createTable(tbl);
+         } else {
+           // Check that constraints have catalog name properly set first
+           if (primaryKeys != null && !primaryKeys.isEmpty() && !primaryKeys.get(0).isSetCatName()) {
+             for (SQLPrimaryKey pkcol : primaryKeys) pkcol.setCatName(tbl.getCatName());
+           }
+           if (foreignKeys != null && !foreignKeys.isEmpty() && !foreignKeys.get(0).isSetCatName()) {
+             for (SQLForeignKey fkcol : foreignKeys) fkcol.setCatName(tbl.getCatName());
+           }
+           if (uniqueConstraints != null && !uniqueConstraints.isEmpty() && !uniqueConstraints.get(0).isSetCatName()) {
+             for (SQLUniqueConstraint uccol : uniqueConstraints) uccol.setCatName(tbl.getCatName());
+           }
+           if (notNullConstraints != null && !notNullConstraints.isEmpty() && !notNullConstraints.get(0).isSetCatName()) {
+             for (SQLNotNullConstraint nncol : notNullConstraints) nncol.setCatName(tbl.getCatName());
+           }
+           if (defaultConstraints != null && !defaultConstraints.isEmpty() && !defaultConstraints.get(0).isSetCatName()) {
+             for (SQLDefaultConstraint dccol : defaultConstraints) dccol.setCatName(tbl.getCatName());
+           }
+           if (checkConstraints != null && !checkConstraints.isEmpty() && !checkConstraints.get(0).isSetCatName()) {
+             for (SQLCheckConstraint cccol : checkConstraints) cccol.setCatName(tbl.getCatName());
+           }
+           // Set constraint name if null before sending to listener
+           List<String> constraintNames = ms.createTableWithConstraints(tbl, primaryKeys, foreignKeys,
+               uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+           int primaryKeySize = 0;
+           if (primaryKeys != null) {
+             primaryKeySize = primaryKeys.size();
+             for (int i = 0; i < primaryKeys.size(); i++) {
+               if (primaryKeys.get(i).getPk_name() == null) {
+                 primaryKeys.get(i).setPk_name(constraintNames.get(i));
+               }
+               if (!primaryKeys.get(i).isSetCatName()) primaryKeys.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int foreignKeySize = 0;
+           if (foreignKeys != null) {
+             foreignKeySize = foreignKeys.size();
+             for (int i = 0; i < foreignKeySize; i++) {
+               if (foreignKeys.get(i).getFk_name() == null) {
+                 foreignKeys.get(i).setFk_name(constraintNames.get(primaryKeySize + i));
+               }
+               if (!foreignKeys.get(i).isSetCatName()) foreignKeys.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int uniqueConstraintSize = 0;
+           if (uniqueConstraints != null) {
+             uniqueConstraintSize = uniqueConstraints.size();
+             for (int i = 0; i < uniqueConstraintSize; i++) {
+               if (uniqueConstraints.get(i).getUk_name() == null) {
+                 uniqueConstraints.get(i).setUk_name(constraintNames.get(primaryKeySize + foreignKeySize + i));
+               }
+               if (!uniqueConstraints.get(i).isSetCatName()) uniqueConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int notNullConstraintSize =  0;
+           if (notNullConstraints != null) {
+             for (int i = 0; i < notNullConstraints.size(); i++) {
+               if (notNullConstraints.get(i).getNn_name() == null) {
+                 notNullConstraints.get(i).setNn_name(constraintNames.get(primaryKeySize + foreignKeySize + uniqueConstraintSize + i));
+               }
+               if (!notNullConstraints.get(i).isSetCatName()) notNullConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int defaultConstraintSize =  0;
+           if (defaultConstraints!= null) {
+             for (int i = 0; i < defaultConstraints.size(); i++) {
+               if (defaultConstraints.get(i).getDc_name() == null) {
+                 defaultConstraints.get(i).setDc_name(constraintNames.get(primaryKeySize + foreignKeySize
+                     + uniqueConstraintSize + notNullConstraintSize + i));
+               }
+               if (!defaultConstraints.get(i).isSetCatName()) defaultConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           if (checkConstraints!= null) {
+             for (int i = 0; i < checkConstraints.size(); i++) {
+               if (checkConstraints.get(i).getDc_name() == null) {
+                 checkConstraints.get(i).setDc_name(constraintNames.get(primaryKeySize + foreignKeySize
+                                                                              + uniqueConstraintSize
+                                                                              + defaultConstraintSize
+                                                                            + notNullConstraintSize + i));
+               }
+               if (!checkConstraints.get(i).isSetCatName()) checkConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+         }
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+               EventType.CREATE_TABLE, new CreateTableEvent(tbl, true, this), envContext);
+           if (primaryKeys != null && !primaryKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PRIMARYKEY,
+                 new AddPrimaryKeyEvent(primaryKeys, true, this), envContext);
+           }
+           if (foreignKeys != null && !foreignKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_FOREIGNKEY,
+                 new AddForeignKeyEvent(foreignKeys, true, this), envContext);
+           }
+           if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_UNIQUECONSTRAINT,
+                 new AddUniqueConstraintEvent(uniqueConstraints, true, this), envContext);
+           }
+           if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_NOTNULLCONSTRAINT,
+                 new AddNotNullConstraintEvent(notNullConstraints, true, this), envContext);
+           }
+         }
+ 
+         success = ms.commitTransaction();
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+           if (madeDir) {
+             wh.deleteDir(tblPath, true, db);
+           }
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners, EventType.CREATE_TABLE,
+               new CreateTableEvent(tbl, success, this), envContext, transactionalListenerResponses, ms);
+           if (primaryKeys != null && !primaryKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PRIMARYKEY,
+                 new AddPrimaryKeyEvent(primaryKeys, success, this), envContext);
+           }
+           if (foreignKeys != null && !foreignKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_FOREIGNKEY,
+                 new AddForeignKeyEvent(foreignKeys, success, this), envContext);
+           }
+           if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_UNIQUECONSTRAINT,
+                 new AddUniqueConstraintEvent(uniqueConstraints, success, this), envContext);
+           }
+           if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_NOTNULLCONSTRAINT,
+                 new AddNotNullConstraintEvent(notNullConstraints, success, this), envContext);
+           }
+         }
+       }
+     }
+ 
+     @Override
+     public void create_table(final Table tbl) throws AlreadyExistsException,
+         MetaException, InvalidObjectException {
+       create_table_with_environment_context(tbl, null);
+     }
+ 
+     @Override
+     public void create_table_with_environment_context(final Table tbl,
+         final EnvironmentContext envContext)
+         throws AlreadyExistsException, MetaException, InvalidObjectException {
+       startFunction("create_table", ": " + tbl.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         create_table_core(getMS(), tbl, envContext);
+         success = true;
+       } catch (NoSuchObjectException e) {
+         LOG.warn("create_table_with_environment_context got ", e);
+         ex = e;
+         throw new InvalidObjectException(e.getMessage());
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_table", success, ex, tbl.getTableName());
+       }
+     }
+ 
+     @Override
+     public void create_table_with_constraints(final Table tbl,
+         final List<SQLPrimaryKey> primaryKeys, final List<SQLForeignKey> foreignKeys,
+         List<SQLUniqueConstraint> uniqueConstraints,
+         List<SQLNotNullConstraint> notNullConstraints,
+         List<SQLDefaultConstraint> defaultConstraints,
+         List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, MetaException, InvalidObjectException {
+       startFunction("create_table", ": " + tbl.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         create_table_core(getMS(), tbl, null, primaryKeys, foreignKeys,
+             uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+         success = true;
+       } catch (NoSuchObjectException e) {
+         ex = e;
+         throw new InvalidObjectException(e.getMessage());
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_table", success, ex, tbl.getTableName());
+       }
+     }
+ 
+     @Override
+     public void drop_constraint(DropConstraintRequest req)
+         throws MetaException, InvalidObjectException {
+       String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf);
+       String dbName = req.getDbname();
+       String tableName = req.getTablename();
+       String constraintName = req.getConstraintname();
+       startFunction("drop_constraint", ": " + constraintName);
+       boolean success = false;
+       Exception ex = null;
+       RawStore ms = getMS();
+       try {
+         ms.openTransaction();
+         ms.dropConstraint(catName, dbName, tableName, constraintName);
+         if (transactionalListeners.size() > 0) {
+           DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(catName, dbName,
+               tableName, constraintName, true, this);
+           for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+             transactionalListener.onDropConstraint(dropConstraintEvent);
+           }
+         }
+         success = ms.commitTransaction();
+       } catch (NoSuchObjectException e) {
+         ex = e;
+         throw new InvalidObjectException(e.getMessage());
+       } catch (MetaException e)

<TRUNCATED>

[22/54] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - more tests (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f0a2fffa/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 33d2be7..cc19f23 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -761,6 +761,13 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
    */
   public function rename_partition($db_name, $tbl_name, array $part_vals, \metastore\Partition $new_part);
   /**
+   * @param \metastore\RenamePartitionRequest $req
+   * @return \metastore\RenamePartitionResponse
+   * @throws \metastore\InvalidOperationException
+   * @throws \metastore\MetaException
+   */
+  public function rename_partition_req(\metastore\RenamePartitionRequest $req);
+  /**
    * @param string[] $part_vals
    * @param bool $throw_exception
    * @return bool
@@ -6776,6 +6783,63 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
     return;
   }
 
+  public function rename_partition_req(\metastore\RenamePartitionRequest $req)
+  {
+    $this->send_rename_partition_req($req);
+    return $this->recv_rename_partition_req();
+  }
+
+  public function send_rename_partition_req(\metastore\RenamePartitionRequest $req)
+  {
+    $args = new \metastore\ThriftHiveMetastore_rename_partition_req_args();
+    $args->req = $req;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'rename_partition_req', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('rename_partition_req', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_rename_partition_req()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_rename_partition_req_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_rename_partition_req_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    throw new \Exception("rename_partition_req failed: unknown result");
+  }
+
   public function partition_name_has_valid_characters(array $part_vals, $throw_exception)
   {
     $this->send_partition_name_has_valid_characters($part_vals, $throw_exception);
@@ -15828,14 +15892,14 @@ class ThriftHiveMetastore_get_databases_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size847 = 0;
-            $_etype850 = 0;
-            $xfer += $input->readListBegin($_etype850, $_size847);
-            for ($_i851 = 0; $_i851 < $_size847; ++$_i851)
+            $_size854 = 0;
+            $_etype857 = 0;
+            $xfer += $input->readListBegin($_etype857, $_size854);
+            for ($_i858 = 0; $_i858 < $_size854; ++$_i858)
             {
-              $elem852 = null;
-              $xfer += $input->readString($elem852);
-              $this->success []= $elem852;
+              $elem859 = null;
+              $xfer += $input->readString($elem859);
+              $this->success []= $elem859;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15871,9 +15935,9 @@ class ThriftHiveMetastore_get_databases_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter853)
+          foreach ($this->success as $iter860)
           {
-            $xfer += $output->writeString($iter853);
+            $xfer += $output->writeString($iter860);
           }
         }
         $output->writeListEnd();
@@ -16004,14 +16068,14 @@ class ThriftHiveMetastore_get_all_databases_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size854 = 0;
-            $_etype857 = 0;
-            $xfer += $input->readListBegin($_etype857, $_size854);
-            for ($_i858 = 0; $_i858 < $_size854; ++$_i858)
+            $_size861 = 0;
+            $_etype864 = 0;
+            $xfer += $input->readListBegin($_etype864, $_size861);
+            for ($_i865 = 0; $_i865 < $_size861; ++$_i865)
             {
-              $elem859 = null;
-              $xfer += $input->readString($elem859);
-              $this->success []= $elem859;
+              $elem866 = null;
+              $xfer += $input->readString($elem866);
+              $this->success []= $elem866;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16047,9 +16111,9 @@ class ThriftHiveMetastore_get_all_databases_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter860)
+          foreach ($this->success as $iter867)
           {
-            $xfer += $output->writeString($iter860);
+            $xfer += $output->writeString($iter867);
           }
         }
         $output->writeListEnd();
@@ -17050,18 +17114,18 @@ class ThriftHiveMetastore_get_type_all_result {
         case 0:
           if ($ftype == TType::MAP) {
             $this->success = array();
-            $_size861 = 0;
-            $_ktype862 = 0;
-            $_vtype863 = 0;
-            $xfer += $input->readMapBegin($_ktype862, $_vtype863, $_size861);
-            for ($_i865 = 0; $_i865 < $_size861; ++$_i865)
+            $_size868 = 0;
+            $_ktype869 = 0;
+            $_vtype870 = 0;
+            $xfer += $input->readMapBegin($_ktype869, $_vtype870, $_size868);
+            for ($_i872 = 0; $_i872 < $_size868; ++$_i872)
             {
-              $key866 = '';
-              $val867 = new \metastore\Type();
-              $xfer += $input->readString($key866);
-              $val867 = new \metastore\Type();
-              $xfer += $val867->read($input);
-              $this->success[$key866] = $val867;
+              $key873 = '';
+              $val874 = new \metastore\Type();
+              $xfer += $input->readString($key873);
+              $val874 = new \metastore\Type();
+              $xfer += $val874->read($input);
+              $this->success[$key873] = $val874;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -17097,10 +17161,10 @@ class ThriftHiveMetastore_get_type_all_result {
       {
         $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $kiter868 => $viter869)
+          foreach ($this->success as $kiter875 => $viter876)
           {
-            $xfer += $output->writeString($kiter868);
-            $xfer += $viter869->write($output);
+            $xfer += $output->writeString($kiter875);
+            $xfer += $viter876->write($output);
           }
         }
         $output->writeMapEnd();
@@ -17304,15 +17368,15 @@ class ThriftHiveMetastore_get_fields_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size870 = 0;
-            $_etype873 = 0;
-            $xfer += $input->readListBegin($_etype873, $_size870);
-            for ($_i874 = 0; $_i874 < $_size870; ++$_i874)
+            $_size877 = 0;
+            $_etype880 = 0;
+            $xfer += $input->readListBegin($_etype880, $_size877);
+            for ($_i881 = 0; $_i881 < $_size877; ++$_i881)
             {
-              $elem875 = null;
-              $elem875 = new \metastore\FieldSchema();
-              $xfer += $elem875->read($input);
-              $this->success []= $elem875;
+              $elem882 = null;
+              $elem882 = new \metastore\FieldSchema();
+              $xfer += $elem882->read($input);
+              $this->success []= $elem882;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17364,9 +17428,9 @@ class ThriftHiveMetastore_get_fields_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter876)
+          foreach ($this->success as $iter883)
           {
-            $xfer += $iter876->write($output);
+            $xfer += $iter883->write($output);
           }
         }
         $output->writeListEnd();
@@ -17608,15 +17672,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size877 = 0;
-            $_etype880 = 0;
-            $xfer += $input->readListBegin($_etype880, $_size877);
-            for ($_i881 = 0; $_i881 < $_size877; ++$_i881)
+            $_size884 = 0;
+            $_etype887 = 0;
+            $xfer += $input->readListBegin($_etype887, $_size884);
+            for ($_i888 = 0; $_i888 < $_size884; ++$_i888)
             {
-              $elem882 = null;
-              $elem882 = new \metastore\FieldSchema();
-              $xfer += $elem882->read($input);
-              $this->success []= $elem882;
+              $elem889 = null;
+              $elem889 = new \metastore\FieldSchema();
+              $xfer += $elem889->read($input);
+              $this->success []= $elem889;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17668,9 +17732,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter883)
+          foreach ($this->success as $iter890)
           {
-            $xfer += $iter883->write($output);
+            $xfer += $iter890->write($output);
           }
         }
         $output->writeListEnd();
@@ -17884,15 +17948,15 @@ class ThriftHiveMetastore_get_schema_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size884 = 0;
-            $_etype887 = 0;
-            $xfer += $input->readListBegin($_etype887, $_size884);
-            for ($_i888 = 0; $_i888 < $_size884; ++$_i888)
+            $_size891 = 0;
+            $_etype894 = 0;
+            $xfer += $input->readListBegin($_etype894, $_size891);
+            for ($_i895 = 0; $_i895 < $_size891; ++$_i895)
             {
-              $elem889 = null;
-              $elem889 = new \metastore\FieldSchema();
-              $xfer += $elem889->read($input);
-              $this->success []= $elem889;
+              $elem896 = null;
+              $elem896 = new \metastore\FieldSchema();
+              $xfer += $elem896->read($input);
+              $this->success []= $elem896;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17944,9 +18008,9 @@ class ThriftHiveMetastore_get_schema_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter890)
+          foreach ($this->success as $iter897)
           {
-            $xfer += $iter890->write($output);
+            $xfer += $iter897->write($output);
           }
         }
         $output->writeListEnd();
@@ -18188,15 +18252,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size891 = 0;
-            $_etype894 = 0;
-            $xfer += $input->readListBegin($_etype894, $_size891);
-            for ($_i895 = 0; $_i895 < $_size891; ++$_i895)
+            $_size898 = 0;
+            $_etype901 = 0;
+            $xfer += $input->readListBegin($_etype901, $_size898);
+            for ($_i902 = 0; $_i902 < $_size898; ++$_i902)
             {
-              $elem896 = null;
-              $elem896 = new \metastore\FieldSchema();
-              $xfer += $elem896->read($input);
-              $this->success []= $elem896;
+              $elem903 = null;
+              $elem903 = new \metastore\FieldSchema();
+              $xfer += $elem903->read($input);
+              $this->success []= $elem903;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18248,9 +18312,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter897)
+          foreach ($this->success as $iter904)
           {
-            $xfer += $iter897->write($output);
+            $xfer += $iter904->write($output);
           }
         }
         $output->writeListEnd();
@@ -18922,15 +18986,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->primaryKeys = array();
-            $_size898 = 0;
-            $_etype901 = 0;
-            $xfer += $input->readListBegin($_etype901, $_size898);
-            for ($_i902 = 0; $_i902 < $_size898; ++$_i902)
+            $_size905 = 0;
+            $_etype908 = 0;
+            $xfer += $input->readListBegin($_etype908, $_size905);
+            for ($_i909 = 0; $_i909 < $_size905; ++$_i909)
             {
-              $elem903 = null;
-              $elem903 = new \metastore\SQLPrimaryKey();
-              $xfer += $elem903->read($input);
-              $this->primaryKeys []= $elem903;
+              $elem910 = null;
+              $elem910 = new \metastore\SQLPrimaryKey();
+              $xfer += $elem910->read($input);
+              $this->primaryKeys []= $elem910;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18940,15 +19004,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->foreignKeys = array();
-            $_size904 = 0;
-            $_etype907 = 0;
-            $xfer += $input->readListBegin($_etype907, $_size904);
-            for ($_i908 = 0; $_i908 < $_size904; ++$_i908)
+            $_size911 = 0;
+            $_etype914 = 0;
+            $xfer += $input->readListBegin($_etype914, $_size911);
+            for ($_i915 = 0; $_i915 < $_size911; ++$_i915)
             {
-              $elem909 = null;
-              $elem909 = new \metastore\SQLForeignKey();
-              $xfer += $elem909->read($input);
-              $this->foreignKeys []= $elem909;
+              $elem916 = null;
+              $elem916 = new \metastore\SQLForeignKey();
+              $xfer += $elem916->read($input);
+              $this->foreignKeys []= $elem916;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18958,15 +19022,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 4:
           if ($ftype == TType::LST) {
             $this->uniqueConstraints = array();
-            $_size910 = 0;
-            $_etype913 = 0;
-            $xfer += $input->readListBegin($_etype913, $_size910);
-            for ($_i914 = 0; $_i914 < $_size910; ++$_i914)
+            $_size917 = 0;
+            $_etype920 = 0;
+            $xfer += $input->readListBegin($_etype920, $_size917);
+            for ($_i921 = 0; $_i921 < $_size917; ++$_i921)
             {
-              $elem915 = null;
-              $elem915 = new \metastore\SQLUniqueConstraint();
-              $xfer += $elem915->read($input);
-              $this->uniqueConstraints []= $elem915;
+              $elem922 = null;
+              $elem922 = new \metastore\SQLUniqueConstraint();
+              $xfer += $elem922->read($input);
+              $this->uniqueConstraints []= $elem922;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18976,15 +19040,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->notNullConstraints = array();
-            $_size916 = 0;
-            $_etype919 = 0;
-            $xfer += $input->readListBegin($_etype919, $_size916);
-            for ($_i920 = 0; $_i920 < $_size916; ++$_i920)
+            $_size923 = 0;
+            $_etype926 = 0;
+            $xfer += $input->readListBegin($_etype926, $_size923);
+            for ($_i927 = 0; $_i927 < $_size923; ++$_i927)
             {
-              $elem921 = null;
-              $elem921 = new \metastore\SQLNotNullConstraint();
-              $xfer += $elem921->read($input);
-              $this->notNullConstraints []= $elem921;
+              $elem928 = null;
+              $elem928 = new \metastore\SQLNotNullConstraint();
+              $xfer += $elem928->read($input);
+              $this->notNullConstraints []= $elem928;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18994,15 +19058,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 6:
           if ($ftype == TType::LST) {
             $this->defaultConstraints = array();
-            $_size922 = 0;
-            $_etype925 = 0;
-            $xfer += $input->readListBegin($_etype925, $_size922);
-            for ($_i926 = 0; $_i926 < $_size922; ++$_i926)
+            $_size929 = 0;
+            $_etype932 = 0;
+            $xfer += $input->readListBegin($_etype932, $_size929);
+            for ($_i933 = 0; $_i933 < $_size929; ++$_i933)
             {
-              $elem927 = null;
-              $elem927 = new \metastore\SQLDefaultConstraint();
-              $xfer += $elem927->read($input);
-              $this->defaultConstraints []= $elem927;
+              $elem934 = null;
+              $elem934 = new \metastore\SQLDefaultConstraint();
+              $xfer += $elem934->read($input);
+              $this->defaultConstraints []= $elem934;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19012,15 +19076,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 7:
           if ($ftype == TType::LST) {
             $this->checkConstraints = array();
-            $_size928 = 0;
-            $_etype931 = 0;
-            $xfer += $input->readListBegin($_etype931, $_size928);
-            for ($_i932 = 0; $_i932 < $_size928; ++$_i932)
+            $_size935 = 0;
+            $_etype938 = 0;
+            $xfer += $input->readListBegin($_etype938, $_size935);
+            for ($_i939 = 0; $_i939 < $_size935; ++$_i939)
             {
-              $elem933 = null;
-              $elem933 = new \metastore\SQLCheckConstraint();
-              $xfer += $elem933->read($input);
-              $this->checkConstraints []= $elem933;
+              $elem940 = null;
+              $elem940 = new \metastore\SQLCheckConstraint();
+              $xfer += $elem940->read($input);
+              $this->checkConstraints []= $elem940;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19056,9 +19120,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->primaryKeys));
         {
-          foreach ($this->primaryKeys as $iter934)
+          foreach ($this->primaryKeys as $iter941)
           {
-            $xfer += $iter934->write($output);
+            $xfer += $iter941->write($output);
           }
         }
         $output->writeListEnd();
@@ -19073,9 +19137,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->foreignKeys));
         {
-          foreach ($this->foreignKeys as $iter935)
+          foreach ($this->foreignKeys as $iter942)
           {
-            $xfer += $iter935->write($output);
+            $xfer += $iter942->write($output);
           }
         }
         $output->writeListEnd();
@@ -19090,9 +19154,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints));
         {
-          foreach ($this->uniqueConstraints as $iter936)
+          foreach ($this->uniqueConstraints as $iter943)
           {
-            $xfer += $iter936->write($output);
+            $xfer += $iter943->write($output);
           }
         }
         $output->writeListEnd();
@@ -19107,9 +19171,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints));
         {
-          foreach ($this->notNullConstraints as $iter937)
+          foreach ($this->notNullConstraints as $iter944)
           {
-            $xfer += $iter937->write($output);
+            $xfer += $iter944->write($output);
           }
         }
         $output->writeListEnd();
@@ -19124,9 +19188,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints));
         {
-          foreach ($this->defaultConstraints as $iter938)
+          foreach ($this->defaultConstraints as $iter945)
           {
-            $xfer += $iter938->write($output);
+            $xfer += $iter945->write($output);
           }
         }
         $output->writeListEnd();
@@ -19141,9 +19205,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->checkConstraints));
         {
-          foreach ($this->checkConstraints as $iter939)
+          foreach ($this->checkConstraints as $iter946)
           {
-            $xfer += $iter939->write($output);
+            $xfer += $iter946->write($output);
           }
         }
         $output->writeListEnd();
@@ -21143,14 +21207,14 @@ class ThriftHiveMetastore_truncate_table_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->partNames = array();
-            $_size940 = 0;
-            $_etype943 = 0;
-            $xfer += $input->readListBegin($_etype943, $_size940);
-            for ($_i944 = 0; $_i944 < $_size940; ++$_i944)
+            $_size947 = 0;
+            $_etype950 = 0;
+            $xfer += $input->readListBegin($_etype950, $_size947);
+            for ($_i951 = 0; $_i951 < $_size947; ++$_i951)
             {
-              $elem945 = null;
-              $xfer += $input->readString($elem945);
-              $this->partNames []= $elem945;
+              $elem952 = null;
+              $xfer += $input->readString($elem952);
+              $this->partNames []= $elem952;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21188,9 +21252,9 @@ class ThriftHiveMetastore_truncate_table_args {
       {
         $output->writeListBegin(TType::STRING, count($this->partNames));
         {
-          foreach ($this->partNames as $iter946)
+          foreach ($this->partNames as $iter953)
           {
-            $xfer += $output->writeString($iter946);
+            $xfer += $output->writeString($iter953);
           }
         }
         $output->writeListEnd();
@@ -21626,14 +21690,14 @@ class ThriftHiveMetastore_get_tables_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size947 = 0;
-            $_etype950 = 0;
-            $xfer += $input->readListBegin($_etype950, $_size947);
-            for ($_i951 = 0; $_i951 < $_size947; ++$_i951)
+            $_size954 = 0;
+            $_etype957 = 0;
+            $xfer += $input->readListBegin($_etype957, $_size954);
+            for ($_i958 = 0; $_i958 < $_size954; ++$_i958)
             {
-              $elem952 = null;
-              $xfer += $input->readString($elem952);
-              $this->success []= $elem952;
+              $elem959 = null;
+              $xfer += $input->readString($elem959);
+              $this->success []= $elem959;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21669,9 +21733,9 @@ class ThriftHiveMetastore_get_tables_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter953)
+          foreach ($this->success as $iter960)
           {
-            $xfer += $output->writeString($iter953);
+            $xfer += $output->writeString($iter960);
           }
         }
         $output->writeListEnd();
@@ -21873,14 +21937,14 @@ class ThriftHiveMetastore_get_tables_by_type_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size954 = 0;
-            $_etype957 = 0;
-            $xfer += $input->readListBegin($_etype957, $_size954);
-            for ($_i958 = 0; $_i958 < $_size954; ++$_i958)
+            $_size961 = 0;
+            $_etype964 = 0;
+            $xfer += $input->readListBegin($_etype964, $_size961);
+            for ($_i965 = 0; $_i965 < $_size961; ++$_i965)
             {
-              $elem959 = null;
-              $xfer += $input->readString($elem959);
-              $this->success []= $elem959;
+              $elem966 = null;
+              $xfer += $input->readString($elem966);
+              $this->success []= $elem966;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21916,9 +21980,9 @@ class ThriftHiveMetastore_get_tables_by_type_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter960)
+          foreach ($this->success as $iter967)
           {
-            $xfer += $output->writeString($iter960);
+            $xfer += $output->writeString($iter967);
           }
         }
         $output->writeListEnd();
@@ -22074,14 +22138,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size961 = 0;
-            $_etype964 = 0;
-            $xfer += $input->readListBegin($_etype964, $_size961);
-            for ($_i965 = 0; $_i965 < $_size961; ++$_i965)
+            $_size968 = 0;
+            $_etype971 = 0;
+            $xfer += $input->readListBegin($_etype971, $_size968);
+            for ($_i972 = 0; $_i972 < $_size968; ++$_i972)
             {
-              $elem966 = null;
-              $xfer += $input->readString($elem966);
-              $this->success []= $elem966;
+              $elem973 = null;
+              $xfer += $input->readString($elem973);
+              $this->success []= $elem973;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22117,9 +22181,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter967)
+          foreach ($this->success as $iter974)
           {
-            $xfer += $output->writeString($iter967);
+            $xfer += $output->writeString($iter974);
           }
         }
         $output->writeListEnd();
@@ -22224,14 +22288,14 @@ class ThriftHiveMetastore_get_table_meta_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->tbl_types = array();
-            $_size968 = 0;
-            $_etype971 = 0;
-            $xfer += $input->readListBegin($_etype971, $_size968);
-            for ($_i972 = 0; $_i972 < $_size968; ++$_i972)
+            $_size975 = 0;
+            $_etype978 = 0;
+            $xfer += $input->readListBegin($_etype978, $_size975);
+            for ($_i979 = 0; $_i979 < $_size975; ++$_i979)
             {
-              $elem973 = null;
-              $xfer += $input->readString($elem973);
-              $this->tbl_types []= $elem973;
+              $elem980 = null;
+              $xfer += $input->readString($elem980);
+              $this->tbl_types []= $elem980;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22269,9 +22333,9 @@ class ThriftHiveMetastore_get_table_meta_args {
       {
         $output->writeListBegin(TType::STRING, count($this->tbl_types));
         {
-          foreach ($this->tbl_types as $iter974)
+          foreach ($this->tbl_types as $iter981)
           {
-            $xfer += $output->writeString($iter974);
+            $xfer += $output->writeString($iter981);
           }
         }
         $output->writeListEnd();
@@ -22348,15 +22412,15 @@ class ThriftHiveMetastore_get_table_meta_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size975 = 0;
-            $_etype978 = 0;
-            $xfer += $input->readListBegin($_etype978, $_size975);
-            for ($_i979 = 0; $_i979 < $_size975; ++$_i979)
+            $_size982 = 0;
+            $_etype985 = 0;
+            $xfer += $input->readListBegin($_etype985, $_size982);
+            for ($_i986 = 0; $_i986 < $_size982; ++$_i986)
             {
-              $elem980 = null;
-              $elem980 = new \metastore\TableMeta();
-              $xfer += $elem980->read($input);
-              $this->success []= $elem980;
+              $elem987 = null;
+              $elem987 = new \metastore\TableMeta();
+              $xfer += $elem987->read($input);
+              $this->success []= $elem987;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22392,9 +22456,9 @@ class ThriftHiveMetastore_get_table_meta_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter981)
+          foreach ($this->success as $iter988)
           {
-            $xfer += $iter981->write($output);
+            $xfer += $iter988->write($output);
           }
         }
         $output->writeListEnd();
@@ -22550,14 +22614,14 @@ class ThriftHiveMetastore_get_all_tables_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size982 = 0;
-            $_etype985 = 0;
-            $xfer += $input->readListBegin($_etype985, $_size982);
-            for ($_i986 = 0; $_i986 < $_size982; ++$_i986)
+            $_size989 = 0;
+            $_etype992 = 0;
+            $xfer += $input->readListBegin($_etype992, $_size989);
+            for ($_i993 = 0; $_i993 < $_size989; ++$_i993)
             {
-              $elem987 = null;
-              $xfer += $input->readString($elem987);
-              $this->success []= $elem987;
+              $elem994 = null;
+              $xfer += $input->readString($elem994);
+              $this->success []= $elem994;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22593,9 +22657,9 @@ class ThriftHiveMetastore_get_all_tables_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter988)
+          foreach ($this->success as $iter995)
           {
-            $xfer += $output->writeString($iter988);
+            $xfer += $output->writeString($iter995);
           }
         }
         $output->writeListEnd();
@@ -22910,14 +22974,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->tbl_names = array();
-            $_size989 = 0;
-            $_etype992 = 0;
-            $xfer += $input->readListBegin($_etype992, $_size989);
-            for ($_i993 = 0; $_i993 < $_size989; ++$_i993)
+            $_size996 = 0;
+            $_etype999 = 0;
+            $xfer += $input->readListBegin($_etype999, $_size996);
+            for ($_i1000 = 0; $_i1000 < $_size996; ++$_i1000)
             {
-              $elem994 = null;
-              $xfer += $input->readString($elem994);
-              $this->tbl_names []= $elem994;
+              $elem1001 = null;
+              $xfer += $input->readString($elem1001);
+              $this->tbl_names []= $elem1001;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22950,9 +23014,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
       {
         $output->writeListBegin(TType::STRING, count($this->tbl_names));
         {
-          foreach ($this->tbl_names as $iter995)
+          foreach ($this->tbl_names as $iter1002)
           {
-            $xfer += $output->writeString($iter995);
+            $xfer += $output->writeString($iter1002);
           }
         }
         $output->writeListEnd();
@@ -23017,15 +23081,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size996 = 0;
-            $_etype999 = 0;
-            $xfer += $input->readListBegin($_etype999, $_size996);
-            for ($_i1000 = 0; $_i1000 < $_size996; ++$_i1000)
+            $_size1003 = 0;
+            $_etype1006 = 0;
+            $xfer += $input->readListBegin($_etype1006, $_size1003);
+            for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007)
             {
-              $elem1001 = null;
-              $elem1001 = new \metastore\Table();
-              $xfer += $elem1001->read($input);
-              $this->success []= $elem1001;
+              $elem1008 = null;
+              $elem1008 = new \metastore\Table();
+              $xfer += $elem1008->read($input);
+              $this->success []= $elem1008;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23053,9 +23117,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1002)
+          foreach ($this->success as $iter1009)
           {
-            $xfer += $iter1002->write($output);
+            $xfer += $iter1009->write($output);
           }
         }
         $output->writeListEnd();
@@ -24255,14 +24319,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1003 = 0;
-            $_etype1006 = 0;
-            $xfer += $input->readListBegin($_etype1006, $_size1003);
-            for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007)
+            $_size1010 = 0;
+            $_etype1013 = 0;
+            $xfer += $input->readListBegin($_etype1013, $_size1010);
+            for ($_i1014 = 0; $_i1014 < $_size1010; ++$_i1014)
             {
-              $elem1008 = null;
-              $xfer += $input->readString($elem1008);
-              $this->success []= $elem1008;
+              $elem1015 = null;
+              $xfer += $input->readString($elem1015);
+              $this->success []= $elem1015;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24314,9 +24378,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter1009)
+          foreach ($this->success as $iter1016)
           {
-            $xfer += $output->writeString($iter1009);
+            $xfer += $output->writeString($iter1016);
           }
         }
         $output->writeListEnd();
@@ -25839,266 +25903,13 @@ class ThriftHiveMetastore_add_partitions_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size1010 = 0;
-            $_etype1013 = 0;
-            $xfer += $input->readListBegin($_etype1013, $_size1010);
-            for ($_i1014 = 0; $_i1014 < $_size1010; ++$_i1014)
-            {
-              $elem1015 = null;
-              $elem1015 = new \metastore\Partition();
-              $xfer += $elem1015->read($input);
-              $this->new_parts []= $elem1015;
-            }
-            $xfer += $input->readListEnd();
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_args');
-    if ($this->new_parts !== null) {
-      if (!is_array($this->new_parts)) {
-        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
-      }
-      $xfer += $output->writeFieldBegin('new_parts', TType::LST, 1);
-      {
-        $output->writeListBegin(TType::STRUCT, count($this->new_parts));
-        {
-          foreach ($this->new_parts as $iter1016)
-          {
-            $xfer += $iter1016->write($output);
-          }
-        }
-        $output->writeListEnd();
-      }
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class ThriftHiveMetastore_add_partitions_result {
-  static $_TSPEC;
-
-  /**
-   * @var int
-   */
-  public $success = null;
-  /**
-   * @var \metastore\InvalidObjectException
-   */
-  public $o1 = null;
-  /**
-   * @var \metastore\AlreadyExistsException
-   */
-  public $o2 = null;
-  /**
-   * @var \metastore\MetaException
-   */
-  public $o3 = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        0 => array(
-          'var' => 'success',
-          'type' => TType::I32,
-          ),
-        1 => array(
-          'var' => 'o1',
-          'type' => TType::STRUCT,
-          'class' => '\metastore\InvalidObjectException',
-          ),
-        2 => array(
-          'var' => 'o2',
-          'type' => TType::STRUCT,
-          'class' => '\metastore\AlreadyExistsException',
-          ),
-        3 => array(
-          'var' => 'o3',
-          'type' => TType::STRUCT,
-          'class' => '\metastore\MetaException',
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['success'])) {
-        $this->success = $vals['success'];
-      }
-      if (isset($vals['o1'])) {
-        $this->o1 = $vals['o1'];
-      }
-      if (isset($vals['o2'])) {
-        $this->o2 = $vals['o2'];
-      }
-      if (isset($vals['o3'])) {
-        $this->o3 = $vals['o3'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'ThriftHiveMetastore_add_partitions_result';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 0:
-          if ($ftype == TType::I32) {
-            $xfer += $input->readI32($this->success);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 1:
-          if ($ftype == TType::STRUCT) {
-            $this->o1 = new \metastore\InvalidObjectException();
-            $xfer += $this->o1->read($input);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 2:
-          if ($ftype == TType::STRUCT) {
-            $this->o2 = new \metastore\AlreadyExistsException();
-            $xfer += $this->o2->read($input);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 3:
-          if ($ftype == TType::STRUCT) {
-            $this->o3 = new \metastore\MetaException();
-            $xfer += $this->o3->read($input);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_result');
-    if ($this->success !== null) {
-      $xfer += $output->writeFieldBegin('success', TType::I32, 0);
-      $xfer += $output->writeI32($this->success);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->o1 !== null) {
-      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
-      $xfer += $this->o1->write($output);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->o2 !== null) {
-      $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
-      $xfer += $this->o2->write($output);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->o3 !== null) {
-      $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
-      $xfer += $this->o3->write($output);
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class ThriftHiveMetastore_add_partitions_pspec_args {
-  static $_TSPEC;
-
-  /**
-   * @var \metastore\PartitionSpec[]
-   */
-  public $new_parts = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'new_parts',
-          'type' => TType::LST,
-          'etype' => TType::STRUCT,
-          'elem' => array(
-            'type' => TType::STRUCT,
-            'class' => '\metastore\PartitionSpec',
-            ),
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['new_parts'])) {
-        $this->new_parts = $vals['new_parts'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'ThriftHiveMetastore_add_partitions_pspec_args';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::LST) {
-            $this->new_parts = array();
             $_size1017 = 0;
             $_etype1020 = 0;
             $xfer += $input->readListBegin($_etype1020, $_size1017);
             for ($_i1021 = 0; $_i1021 < $_size1017; ++$_i1021)
             {
               $elem1022 = null;
-              $elem1022 = new \metastore\PartitionSpec();
+              $elem1022 = new \metastore\Partition();
               $xfer += $elem1022->read($input);
               $this->new_parts []= $elem1022;
             }
@@ -26119,7 +25930,7 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
 
   public function write($output) {
     $xfer = 0;
-    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_pspec_args');
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_args');
     if ($this->new_parts !== null) {
       if (!is_array($this->new_parts)) {
         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
@@ -26144,6 +25955,259 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
 
 }
 
+class ThriftHiveMetastore_add_partitions_result {
+  static $_TSPEC;
+
+  /**
+   * @var int
+   */
+  public $success = null;
+  /**
+   * @var \metastore\InvalidObjectException
+   */
+  public $o1 = null;
+  /**
+   * @var \metastore\AlreadyExistsException
+   */
+  public $o2 = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o3 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::I32,
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\InvalidObjectException',
+          ),
+        2 => array(
+          'var' => 'o2',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\AlreadyExistsException',
+          ),
+        3 => array(
+          'var' => 'o3',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+      if (isset($vals['o2'])) {
+        $this->o2 = $vals['o2'];
+      }
+      if (isset($vals['o3'])) {
+        $this->o3 = $vals['o3'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_add_partitions_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->success);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\InvalidObjectException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRUCT) {
+            $this->o2 = new \metastore\AlreadyExistsException();
+            $xfer += $this->o2->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::STRUCT) {
+            $this->o3 = new \metastore\MetaException();
+            $xfer += $this->o3->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_result');
+    if ($this->success !== null) {
+      $xfer += $output->writeFieldBegin('success', TType::I32, 0);
+      $xfer += $output->writeI32($this->success);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o2 !== null) {
+      $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+      $xfer += $this->o2->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o3 !== null) {
+      $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+      $xfer += $this->o3->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_add_partitions_pspec_args {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\PartitionSpec[]
+   */
+  public $new_parts = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'new_parts',
+          'type' => TType::LST,
+          'etype' => TType::STRUCT,
+          'elem' => array(
+            'type' => TType::STRUCT,
+            'class' => '\metastore\PartitionSpec',
+            ),
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['new_parts'])) {
+        $this->new_parts = $vals['new_parts'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_add_partitions_pspec_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::LST) {
+            $this->new_parts = array();
+            $_size1024 = 0;
+            $_etype1027 = 0;
+            $xfer += $input->readListBegin($_etype1027, $_size1024);
+            for ($_i1028 = 0; $_i1028 < $_size1024; ++$_i1028)
+            {
+              $elem1029 = null;
+              $elem1029 = new \metastore\PartitionSpec();
+              $xfer += $elem1029->read($input);
+              $this->new_parts []= $elem1029;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_pspec_args');
+    if ($this->new_parts !== null) {
+      if (!is_array($this->new_parts)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('new_parts', TType::LST, 1);
+      {
+        $output->writeListBegin(TType::STRUCT, count($this->new_parts));
+        {
+          foreach ($this->new_parts as $iter1030)
+          {
+            $xfer += $iter1030->write($output);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
 class ThriftHiveMetastore_add_partitions_pspec_result {
   static $_TSPEC;
 
@@ -26380,14 +26444,14 @@ class ThriftHiveMetastore_append_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1024 = 0;
-            $_etype1027 = 0;
-            $xfer += $input->readListBegin($_etype1027, $_size1024);
-            for ($_i1028 = 0; $_i1028 < $_size1024; ++$_i1028)
+            $_size1031 = 0;
+            $_etype1034 = 0;
+            $xfer += $input->readListBegin($_etype1034, $_size1031);
+            for ($_i1035 = 0; $_i1035 < $_size1031; ++$_i1035)
             {
-              $elem1029 = null;
-              $xfer += $input->readString($elem1029);
-              $this->part_vals []= $elem1029;
+              $elem1036 = null;
+              $xfer += $input->readString($elem1036);
+              $this->part_vals []= $elem1036;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26425,9 +26489,9 @@ class ThriftHiveMetastore_append_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1030)
+          foreach ($this->part_vals as $iter1037)
           {
-            $xfer += $output->writeString($iter1030);
+            $xfer += $output->writeString($iter1037);
           }
         }
         $output->writeListEnd();
@@ -26929,14 +26993,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1031 = 0;
-            $_etype1034 = 0;
-            $xfer += $input->readListBegin($_etype1034, $_size1031);
-            for ($_i1035 = 0; $_i1035 < $_size1031; ++$_i1035)
+            $_size1038 = 0;
+            $_etype1041 = 0;
+            $xfer += $input->readListBegin($_etype1041, $_size1038);
+            for ($_i1042 = 0; $_i1042 < $_size1038; ++$_i1042)
             {
-              $elem1036 = null;
-              $xfer += $input->readString($elem1036);
-              $this->part_vals []= $elem1036;
+              $elem1043 = null;
+              $xfer += $input->readString($elem1043);
+              $this->part_vals []= $elem1043;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26982,9 +27046,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1037)
+          foreach ($this->part_vals as $iter1044)
           {
-            $xfer += $output->writeString($iter1037);
+            $xfer += $output->writeString($iter1044);
           }
         }
         $output->writeListEnd();
@@ -27838,14 +27902,14 @@ class ThriftHiveMetastore_drop_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1038 = 0;
-            $_etype1041 = 0;
-            $xfer += $input->readListBegin($_etype1041, $_size1038);
-            for ($_i1042 = 0; $_i1042 < $_size1038; ++$_i1042)
+            $_size1045 = 0;
+            $_etype1048 = 0;
+            $xfer += $input->readListBegin($_etype1048, $_size1045);
+            for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049)
             {
-              $elem1043 = null;
-              $xfer += $input->readString($elem1043);
-              $this->part_vals []= $elem1043;
+              $elem1050 = null;
+              $xfer += $input->readString($elem1050);
+              $this->part_vals []= $elem1050;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27890,9 +27954,9 @@ class ThriftHiveMetastore_drop_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1044)
+          foreach ($this->part_vals as $iter1051)
           {
-            $xfer += $output->writeString($iter1044);
+            $xfer += $output->writeString($iter1051);
           }
         }
         $output->writeListEnd();
@@ -28145,14 +28209,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1045 = 0;
-            $_etype1048 = 0;
-            $xfer += $input->readListBegin($_etype1048, $_size1045);
-            for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049)
+            $_size1052 = 0;
+            $_etype1055 = 0;
+            $xfer += $input->readListBegin($_etype1055, $_size1052);
+            for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056)
             {
-              $elem1050 = null;
-              $xfer += $input->readString($elem1050);
-              $this->part_vals []= $elem1050;
+              $elem1057 = null;
+              $xfer += $input->readString($elem1057);
+              $this->part_vals []= $elem1057;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -28205,9 +28269,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1051)
+          foreach ($this->part_vals as $iter1058)
           {
-            $xfer += $output->writeString($iter1051);
+            $xfer += $output->writeString($iter1058);
           }
         }
         $output->writeListEnd();
@@ -29221,14 +29285,14 @@ class ThriftHiveMetastore_get_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1052 = 0;
-            $_etype1055 = 0;
-            $xfer += $input->readListBegin($_etype1055, $_size1052);
-            for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056)
+            $_size1059 = 0;
+            $_etype1062 = 0;
+            $xfer += $input->readListBegin($_etype1062, $_size1059);
+            for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063)
             {
-              $elem1057 = null;
-              $xfer += $input->readString($elem1057);
-              $this->part_vals []= $elem1057;
+              $elem1064 = null;
+              $xfer += $input->readString($elem1064);
+              $this->part_vals []= $elem1064;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -29266,9 +29330,9 @@ class ThriftHiveMetastore_get_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1058)
+          foreach ($this->part_vals as $iter1065)
           {
-            $xfer += $output->writeString($iter1058);
+            $xfer += $output->writeString($iter1065);
           }
         }
         $output->writeListEnd();
@@ -29510,17 +29574,17 @@ class ThriftHiveMetastore_exchange_partition_args {
         case 1:
           if ($ftype == TType::MAP) {
             $this->partitionSpecs = array();
-            $_size1059 = 0;
-            $_ktype1060 = 0;
-            $_vtype1061 = 0;
-            $xfer += $input->readMapBegin($_ktype1060, $_vtype1061, $_size1059);
-            for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063)
+            $_size1066 = 0;
+            $_ktype1067 = 0;
+            $_vtype1068 = 0;
+            $xfer += $input->readMapBegin($_ktype1067, $_vtype1068, $_size1066);
+            for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070)
             {
-              $key1064 = '';
-              $val1065 = '';
-              $xfer += $input->readString($key1064);
-              $xfer += $input->readString($val1065);
-              $this->partitionSpecs[$key1064] = $val1065;
+              $key1071 = '';
+              $val1072 = '';
+              $xfer += $input->readString($key1071);
+              $xfer += $input->readString($val1072);
+              $this->partitionSpecs[$key1071] = $val1072;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -29576,10 +29640,10 @@ class ThriftHiveMetastore_exchange_partition_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
         {
-          foreach ($this->partitionSpecs as $kiter1066 => $viter1067)
+          foreach ($this->partitionSpecs as $kiter1073 => $viter1074)
           {
-            $xfer += $output->writeString($kiter1066);
-            $xfer += $output->writeString($viter1067);
+            $xfer += $output->writeString($kiter1073);
+            $xfer += $output->writeString($viter1074);
           }
         }
         $output->writeMapEnd();
@@ -29891,17 +29955,17 @@ class ThriftHiveMetastore_exchange_partitions_args {
         case 1:
           if ($ftype == TType::MAP) {
             $this->partitionSpecs = array();
-            $_size1068 = 0;
-            $_ktype1069 = 0;
-            $_vtype1070 = 0;
-            $xfer += $input->readMapBegin($_ktype1069, $_vtype1070, $_size1068);
-            for ($_i1072 = 0; $_i1072 < $_size1068; ++$_i1072)
+            $_size1075 = 0;
+            $_ktype1076 = 0;
+            $_vtype1077 = 0;
+            $xfer += $input->readMapBegin($_ktype1076, $_vtype1077, $_size1075);
+            for ($_i1079 = 0; $_i1079 < $_size1075; ++$_i1079)
             {
-              $key1073 = '';
-              $val1074 = '';
-              $xfer += $input->readString($key1073);
-              $xfer += $input->readString($val1074);
-              $this->partitionSpecs[$key1073] = $val1074;
+              $key1080 = '';
+              $val1081 = '';
+              $xfer += $input->readString($key1080);
+              $xfer += $input->readString($val1081);
+              $this->partitionSpecs[$key1080] = $val1081;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -29957,10 +30021,10 @@ class ThriftHiveMetastore_exchange_partitions_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
         {
-          foreach ($this->partitionSpecs as $kiter1075 => $viter1076)
+          foreach ($this->partitionSpecs as $kiter1082 => $viter1083)
           {
-            $xfer += $output->writeString($kiter1075);
-            $xfer += $output->writeString($viter1076);
+            $xfer += $output->writeString($kiter1082);
+            $xfer += $output->writeString($viter1083);
           }
         }
         $output->writeMapEnd();
@@ -30093,15 +30157,15 @@ class ThriftHiveMetastore_exchange_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1077 = 0;
-            $_etype1080 = 0;
-            $xfer += $input->readListBegin($_etype1080, $_size1077);
-            for ($_i1081 = 0; $_i1081 < $_size1077; ++$_i1081)
+            $_size1084 = 0;
+            $_etype1087 = 0;
+            $xfer += $input->readListBegin($_etype1087, $_size1084);
+            for ($_i1088 = 0; $_i1088 < $_size1084; ++$_i1088)
             {
-              $elem1082 = null;
-              $elem1082 = new \metastore\Partition();
-              $xfer += $elem1082->read($input);
-              $this->success []= $elem1082;
+              $elem1089 = null;
+              $elem1089 = new \metastore\Partition();
+              $xfer += $elem1089->read($input);
+              $this->success []= $elem1089;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -30161,9 +30225,9 @@ class ThriftHiveMetastore_exchange_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1083)
+          foreach ($this->success as $iter1090)
           {
-            $xfer += $iter1083->write($output);
+            $xfer += $iter1090->write($output);
           }
         }
         $output->writeListEnd();
@@ -30309,14 +30373,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1084 = 0;
-            $_etype1087 = 0;
-            $xfer += $input->readListBegin($_etype1087, $_size1084);
-            for ($_i1088 = 0; $_i1088 < $_size1084; ++$_i1088)
+            $_size1091 = 0;
+            $_etype1094 = 0;
+            $xfer += $input->readListBegin($_etype1094, $_size1091);
+            for ($_i1095 = 0; $_i1095 < $_size1091; ++$_i1095)
             {
-              $elem1089 = null;
-              $xfer += $input->readString($elem1089);
-              $this->part_vals []= $elem1089;
+              $elem1096 = null;
+              $xfer += $input->readString($elem1096);
+              $this->part_vals []= $elem1096;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -30333,14 +30397,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size1090 = 0;
-            $_etype1093 = 0;
-            $xfer += $input->readListBegin($_etype1093, $_size1090);
-            for ($_i1094 = 0; $_i1094 < $_size1090; ++$_i1094)
+            $_size1097 = 0;
+            $_etype1100 = 0;
+            $xfer += $input->readListBegin($_etype1100, $_size1097);
+            for ($_i1101 = 0; $_i1101 < $_size1097; ++$_i1101)
             {
-              $elem1095 = null;
-              $xfer += $input->readString($elem1095);
-              $this->group_names []= $elem1095;
+              $elem1102 = null;
+              $xfer += $input->readString($elem1102);
+              $this->group_names []= $elem1102;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -30378,9 +30442,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1096)
+          foreach ($this->part_vals as $iter1103)
           {
-            $xfer += $output->writeString($iter1096);
+            $xfer += $output->writeString($iter1103);
           }
         }
         $output->writeListEnd();
@@ -30400,9 +30464,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter1097)
+          foreach ($this->group_names as $iter1104)
           {
-            $xfer += $output->writeString($iter1097);
+            $xfer += $output->writeString($iter1104);
           }
         }
         $output->writeListEnd();
@@ -30993,15 +31057,15 @@ class ThriftHiveMetastore_get_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1098 = 0;
-            $_etype1101 = 0;
-            $xfer += $input->readListBegin($_etype1101, $_size1098);
-            for ($_i1102 = 0; $_i1102 < $_size1098; ++$_i1102)
+            $_size1105 = 0;
+            $_etype1108 = 0;
+            $xfer += $input->readListBegin($_etype1108, $_size1105);
+            for ($_i1109 = 0; $_i1109 < $_size1105; ++$_i1109)
             {
-              $elem1103 = null;
-              $elem1103 = new \metastore\Partition();
-              $xfer += $elem1103->read($input);
-              $this->success []= $elem1103;
+              $elem1110 = null;
+              $elem1110 = new \metastore\Partition();
+              $xfer += $elem1110->read($input);
+              $this->success []= $elem1110;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31045,9 +31109,9 @@ class ThriftHiveMetastore_get_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1104)
+          foreach ($this->success as $iter1111)
           {
-            $xfer += $iter1104->write($output);
+            $xfer += $iter1111->write($output);
           }
         }
         $output->writeListEnd();
@@ -31193,14 +31257,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size1105 = 0;
-            $_etype1108 = 0;
-            $xfer += $input->readListBegin($_etype1108, $_size1105);
-            for ($_i1109 = 0; $_i1109 < $_size1105; ++$_i1109)
+            $_size1112 = 0;
+            $_etype1115 = 0;
+            $xfer += $input->readListBegin($_etype1115, $_size1112);
+            for ($_i1116 = 0; $_i1116 < $_size1112; ++$_i1116)
             {
-              $elem1110 = null;
-              $xfer += $input->readString($elem1110);
-              $this->group_names []= $elem1110;
+              $elem1117 = null;
+              $xfer += $input->readString($elem1117);
+              $this->group_names []= $elem1117;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31248,9 +31312,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter1111)
+          foreach ($this->group_names as $iter1118)
           {
-            $xfer += $output->writeString($iter1111);
+            $xfer += $output->writeString($iter1118);
           }
         }
         $output->writeListEnd();
@@ -31339,15 +31403,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1112 = 0;
-            $_etype1115 = 0;
-            $xfer += $input->readListBegin($_etype1115, $_size1112);
-            for ($_i1116 = 0; $_i1116 < $_size1112; ++$_i1116)
+            $_size1119 = 0;
+            $_etype1122 = 0;
+            $xfer += $input->readListBegin($_etype1122, $_size1119);
+            for ($_i1123 = 0; $_i1123 < $_size1119; ++$_i1123)
             {
-              $elem1117 = null;
-              $elem1117 = new \metastore\Partition();
-              $xfer += $elem1117->read($input);
-              $this->success []= $elem1117;
+              $elem1124 = null;
+              $elem1124 = new \metastore\Partition();
+              $xfer += $elem1124->read($input);
+              $this->success []= $elem1124;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31391,9 +31455,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1118)
+          foreach ($this->success as $iter1125)
           {
-            $xfer += $iter1118->write($output);
+            $xfer += $iter1125->write($output);
           }
         }
         $output->writeListEnd();
@@ -31613,15 +31677,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1119 = 0;
-            $_etype1122 = 0;
-            $xfer += $input->readListBegin($_etype1122, $_size1119);
-            for ($_i1123 = 0; $_i1123 < $_size1119; ++$_i1123)
+            $_size1126 = 0;
+            $_etype1129 = 0;
+            $xfer += $input->readListBegin($_etype1129, $_size1126);
+            for ($_i1130 = 0; $_i1130 < $_size1126; ++$_i1130)
             {
-              $elem1124 = null;
-              $elem1124 = new \metastore\PartitionSpec();
-              $xfer += $elem1124->read($input);
-              $this->success []= $elem1124;
+              $elem1131 = null;
+              $elem1131 = new \metastore\PartitionSpec();
+              $xfer += $elem1131->read($input);
+              $this->success []= $elem1131;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31665,9 +31729,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1125)
+          foreach ($this->success as $iter1132)
           {
-            $xfer += $iter1125->write($output);
+            $xfer += $iter1132->write($output);
           }
         }
         $output->writeListEnd();
@@ -31886,14 +31950,14 @@ class ThriftHiveMetastore_get_partition_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1126 = 0;
-            $_etype1129 = 0;
-            $xfer += $input->readListBegin($_etype1129, $_size1126);
-            for ($_i1130 = 0; $_i1130 < $_size1126; ++$_i1130)
+            $_size1133 = 0;
+            $_etype1136 = 0;
+            $xfer += $input->readListBegin($_etype1136, $_size1133);
+            for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137)
             {
-              $elem1131 = null;
-              $xfer += $input->readString($elem1131);
-              $this->success []= $elem1131;
+              $elem1138 = null;
+              $xfer += $input->readString($elem1138);
+              $this->success []= $elem1138;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31937,9 +32001,9 @@ class ThriftHiveMetastore_get_partition_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter1132)
+          foreach ($this->success as $iter1139)
           {
-            $xfer += $output->writeString($iter1132);
+            $xfer += $output->writeString($iter1139);
           }
         }
         $output->writeListEnd();
@@ -32270,14 +32334,14 @@ class ThriftHiveMetastore_get_partitions_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1133 = 0;
-            $_etype1136 = 0;
-            $xfer += $input->readListBegin($_etype1136, $_size1133);
-            for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137)
+            $_size1140 = 0;
+            $_etype1143 = 0;
+            $xfer += $input->readListBegin($_etype1143, $_size1140);
+            for ($_i1144 = 0; $_i1144 < $_size1140; ++$_i1144)
             {
-              $elem1138 = null;
-              $xfer += $input->readString($elem1138);
-              $this->part_vals []= $elem1138;
+              $elem1145 = null;
+              $xfer += $input->readString($elem1145);
+              $this->part_vals []= $elem1145;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32322,9 +32386,9 @@ class ThriftHiveMetastore_get_partitions_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1139)
+          foreach ($this->part_vals as $iter1146)
           {
-            $xfer += $output->writeString($iter1139);
+            $xfer += $output->writeString($iter1146);
           }
         }
         $output->writeListEnd();
@@ -32418,15 +32482,15 @@ class ThriftHiveMetastore_get_partitions_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1140 = 0;
-            $_etype1143 = 0;
-            $xfer += $input->readListBegin($_etype1143, $_size1140);
-            for ($_i1144 = 0; $_i1144 < $_size1140; ++$_i1144)
+            $_size1147 = 0;
+            $_etype1150 = 0;
+            $xfer += $input->readListBegin($_etype1150, $_size1147);
+            for ($_i1151 = 0; $_i1151 < $_size1147; ++$_i1151)
             {
-              $elem1145 = null;
-              $elem1145 = new \metastore\Partition();
-              $xfer += $elem1145->read($input);
-              $this->success []= $elem1145;
+              $elem1152 = null;
+              $elem1152 = new \metastore\Partition();
+              $xfer += $elem1152->read($input);
+              $this->success []= $elem1152;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32470,9 +32534,9 @@ class ThriftHiveMetastore_get_partitions_ps_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1146)
+          foreach ($this->success as $iter1153)
           {
-            $xfer += $iter1146->write($output);
+            $xfer += $iter1153->write($output);
           }
         }
         $output->writeListEnd();
@@ -32619,14 +32683,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1147 = 0;
-            $_etype1150 = 0;
-            $xfer += $input->readListBegin($_etype1150, $_size1147);
-            for ($_i1151 = 0; $_i1151 < $_size1147; ++$_i1151)
+            $_size1154 = 0;
+            $_etype1157 = 0;
+            $xfer += $input->readListBegin($_etype1157, $_size1154);
+            for ($_i1158 = 0; $_i1158 < $_size1154; ++$_i1158)
             {
-              $elem1152 = null;
-              $xfer += $input->readString($elem1152);
-              $this->part_vals []= $elem1152;
+              $elem1159 = null;
+              $xfer += $input->readString($elem1159);
+              $this->part_vals []= $elem1159;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32650,14 +32714,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 6:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size1153 = 0;
-            $_etype1156 = 0;
-            $xfer += $input->readListBegin($_etype1156, $_size1153);
-            for ($_i1157 = 0; $_i1157 < $_size1153; ++$_i1157)
+            $_size1160 = 0;
+            $_etype1163 = 0;
+            $xfer += $input->readListBegin($_etype1163, $_size1160);
+            for ($_i1164 = 0; $_i1164 < $_size1160; ++$_i1164)
             {
-              $elem1158 = null;
-              $xfer += $input->readString($elem1158);
-              $this->group_names []= $elem1158;
+              $elem1165 = null;
+              $xfer += $input->readString($elem1165);
+              $this->group_names []= $elem1165;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32695,9 +32759,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1159)
+          foreach ($this->part_vals as $iter1166)
           {
-            $xfer += $output->writeString($iter1159);
+            $xfer += $output->writeString($iter1166);
           }
         }
         $output->writeListEnd();
@@ -32722,9 +32786,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter1160)
+          foreach ($this->group_names as $iter1167)
           {
-            $xfer += $output->writeString($iter1160);
+            $xfer += $output->writeString($iter1167);
           }
         }
         $output->writeListEnd();
@@ -32813,15 +32877,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1161 = 0;
-            $_etype1164 = 0;
-            $xfer += $input->readListBegin($_etype1164, $_size1161);
-            for ($_i1165 = 0; $_i1165 < $_size1161; ++$_i1165)
+            $_size1168 = 0;
+            $_etype1171 = 0;
+            $xfer += $input->readListBegin($_etype1171, $_size1168);
+            for ($_i1172 = 0; $_i1172 < $_size1168; ++$_i1172)
             {
-              $elem1166 = null;
-              $elem1166 = new \metastore\Partition();
-              $xfer += $elem1166->read($input);
-              $this->success []= $elem1166;
+              $elem1173 = null;
+              $elem1173 = new \metastore\Partition();
+              $xfer += $elem1173->read($input);
+              $this->success []= $elem1173;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32865,9 +32929,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1167)
+          foreach ($this->success as $iter1174)
           {
-            $xfer += $iter1167->write($output);
+            $xfer += $iter1174->write($output);
           }
         }
         $output->writeListEnd();
@@ -32988,14 +33052,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1168 = 0;
-            $_etype1171 = 0;
-            $xfer += $input->readListBegin($_etype1171, $_size1168);
-            for ($_i1172 = 0; $_i1172 < $_size1168; ++$_i1172)
+            $_size1175 = 0;
+            $_etype1178 = 0;
+            $xfer += $input->readListBegin($_etype1178, $_size1175);
+            for ($_i1179 = 0; $_i1179 < $_size1175; ++$_i1179)
             {
-              $elem1173 = null;
-              $xfer += $input->readString($elem1173);
-              $this->part_vals []= $elem1173;
+              $elem1180 = null;
+              $xfer += $input->readString($elem1180);
+              $this->part_vals []= $elem1180;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33040,9 +33104,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1174)
+          foreach ($this->part_vals as $iter1181)
           {
-            $xfer += $output->writeString($iter1174);
+            $xfer += $output->writeString($iter1181);
           }
         }
         $output->writeListEnd();
@@ -33135,14 +33199,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1175 = 0;
-            $_etype1178 = 0;
-            $xfer += $input->readListBegin($_etype1178, $_size1175);
-            for ($_i1179 = 0; $_i1179 < $_size1175; ++$_i1179)
+            $_size1182 = 0;
+            $_etype1185 = 0;
+            $xfer += $input->readListBegin($_etype1185, $_size1182);
+            for ($_i1186 = 0; $_i1186 < $_size1182; ++$_i1186)
             {
-              $elem1180 = null;
-              $xfer += $input->readString($elem1180);
-              $this->success []= $elem1180;
+              $elem1187 = null;
+              $xfer += $input->readString($elem1187);
+              $this->success []= $elem1187;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33186,9 +33250,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter1181)
+          foreach ($this->success as $iter1188)
           {
-            $xfer += $output->writeString($iter1181);
+            $xfer += $output->writeString($iter1188);
           }
         }
         $output->writeListEnd();
@@ -33431,15 +33495,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1182 = 0;
-            $_etype1185 = 0;
-            $xfer += $input->readListBegin($_etype1185, $_size1182);
-            for ($_i1186 = 0; $_i1186 < $_size1182; ++$_i1186)
+            $_size1189 = 0;
+            $_etype1192 = 0;
+            $xfer += $input->readListBegin($_etype1192, $_size1189);
+            for ($_i1193 = 0; $_i1193 < $_size1189; ++$_i1193)
             {
-              $elem1187 = null;
-              $elem1187 = new \metastore\Partition();
-              $xfer += $elem1187->read($input);
-              $this->success []= $elem1187;
+              $elem1194 = null;
+              $elem1194 = new \metastore\Partition();
+              $xfer += $elem1194->read($input);
+              $this->success []= $elem1194;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33483,9 +33547,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1188)
+          foreach ($this->success as $iter1195)
           {
-            $xfer += $iter1188->write($output);
+            $xfer += $iter1195->write($output);
           }
         }
         $output->writeListEnd();
@@ -33728,15 +33792,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1189 = 0;
-            $_etype1192 = 0;
-            $xfer += $input->readListBegin($_etype1192, $_size1189);
-            for ($_i1193 = 0; $_i1193 < $_size1189; ++$_i1193)
+            $_size1196 = 0;
+            $_etype1199 = 0;
+            $xfer += $input->readListBegin($_etype1199, $_size1196);
+            for ($_i1200 = 0; $_i1200 < $_size1196; ++$_i1200)
             {
-              $elem1194 = null;
-              $elem1194 = new \metastore\PartitionSpec();
-              $xfer += $elem1194->read($input);
-              $this->success []= $elem1194;
+              $elem1201 = null;
+              $elem1201 = new \metastore\PartitionSpec();
+              $xfer += $elem1201->read($input);
+              $this->success []= $elem1201;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33780,9 +33844,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1195)
+          foreach ($this->success as $iter1202)
           {
-            $xfer += $iter1195->write($output);
+            $xfer += $iter1202->write($output);
           }
         }
         $output->writeListEnd();
@@ -34348,14 +34412,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->names = array();
-            $_size1196 = 0;
-            $_etype1199 = 0;
-            $xfer += $input->readListBegin($_etype1199, $_size1196);
-            for ($_i1200 = 0; $_i1200 < $_size1196; ++$_i1200)
+            $_size1203 = 0;
+            $_etype1206 = 0;
+            $xfer += $input->readListBegin($_etype1206, $_size1203);
+            for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207)
             {
-              $elem1201 = null;
-              $xfer += $input->readString($elem1201);
-              $this->names []= $elem1201;
+              $elem1208 = null;
+              $xfer += $input->readString($elem1208);
+              $this->names []= $elem1208;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -34393,9 +34457,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
       {
         $output->writeListBegin(TType::STRING, count($this->names));
         {
-          foreach ($this->names as $iter1202)
+          foreach ($this->names as $iter1209)
           {
-            $xfer += $output->writeString($iter1202);
+            $xfer += $output->writeString($iter1209);
           }
         }
         $output->writeListEnd();
@@ -34484,15 +34548,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1203 = 0;
-            $_etype1206 = 0;
-            $xfer += $input->readListBegin($_etype1206, $_size1203);
-            for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207)
+            $_size1210 = 0;
+            $_etype1213 = 0;
+            $xfer += $input->readListBegin($_etype1213, $_size1210);
+            for ($_i1214 = 0; $_i1214 < $_size1210; ++$_i1214)
             {
-              $elem1208 = null;
-              $elem1208 = new \metastore\Partition();
-              $xfer += $elem1208->read($input);
-              $this->success []= $elem1208;
+              $elem1215 = null;
+              $elem1215 = new \metastore\Partition();
+              $xfer += $elem1215->read($input);
+              $this->success []= $elem1215;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -34536,9 +34600,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1209)
+          foreach ($this->success as $iter1216)
           {
-            $xfer += $iter1209->write($output);
+            $xfer += $iter1216->write($output);
           }
         }
         $output->writeListEnd();
@@ -34877,15 +34941,15 @@ class ThriftHiveMetastore_alter_partitions_args {
       

<TRUNCATED>

[08/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query5.q.out b/ql/src/test/results/clientpositive/perf/tez/query5.q.out
index 6b054f0..e554dd4 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query5.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query5.q.out
@@ -287,287 +287,293 @@ Stage-0
     limit:100
     Stage-1
       Reducer 8 vectorized
-      File Output Operator [FS_304]
-        Limit [LIM_303] (rows=100 width=110)
+      File Output Operator [FS_309]
+        Limit [LIM_308] (rows=100 width=110)
           Number of rows:100
-          Select Operator [SEL_302] (rows=1136898901 width=110)
+          Select Operator [SEL_307] (rows=1136898901 width=110)
             Output:["_col0","_col1","_col2","_col3","_col4"]
           <-Reducer 7 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_301]
-              Select Operator [SEL_300] (rows=1136898901 width=110)
+            SHUFFLE [RS_306]
+              Select Operator [SEL_305] (rows=1136898901 width=110)
                 Output:["_col0","_col1","_col2","_col3","_col4"]
-                Group By Operator [GBY_299] (rows=1136898901 width=110)
+                Group By Operator [GBY_304] (rows=1136898901 width=110)
                   Output:["_col0","_col1","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2
                 <-Union 6 [SIMPLE_EDGE]
                   <-Reducer 14 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_317]
+                    Reduce Output Operator [RS_323]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_316] (rows=2273797803 width=110)
+                      Group By Operator [GBY_322] (rows=2273797803 width=110)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_315] (rows=191657181 width=132)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Group By Operator [GBY_314] (rows=191657181 width=132)
-                            Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
-                          <-Reducer 13 [SIMPLE_EDGE]
-                            SHUFFLE [RS_47]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_46] (rows=383314363 width=132)
-                                Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col9
-                                Merge Join Operator [MERGEJOIN_221] (rows=383314363 width=132)
-                                  Conds:RS_42._col0=RS_310._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col9"]
-                                <-Map 25 [SIMPLE_EDGE] vectorized
-                                  PARTITION_ONLY_SHUFFLE [RS_310]
-                                    PartitionCols:_col0
-                                    Select Operator [SEL_309] (rows=46000 width=460)
-                                      Output:["_col0","_col1"]
-                                      Filter Operator [FIL_308] (rows=46000 width=460)
-                                        predicate:cp_catalog_page_sk is not null
-                                        TableScan [TS_36] (rows=46000 width=460)
-                                          default@catalog_page,catalog_page,Tbl:COMPLETE,Col:NONE,Output:["cp_catalog_page_sk","cp_catalog_page_id"]
-                                <-Reducer 12 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_42]
-                                    PartitionCols:_col0
-                                    Merge Join Operator [MERGEJOIN_220] (rows=348467596 width=132)
-                                      Conds:Union 23._col1=RS_272._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5"]
-                                    <-Map 10 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_272]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_269] (rows=8116 width=1119)
-                                          Output:["_col0"]
-                                          Filter Operator [FIL_268] (rows=8116 width=1119)
-                                            predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00' AND TIMESTAMP'1998-08-18 00:00:00' and d_date_sk is not null)
-                                            TableScan [TS_8] (rows=73049 width=1119)
-                                              default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
-                                    <-Union 23 [SIMPLE_EDGE]
-                                      <-Map 22 [CONTAINS] vectorized
-                                        Reduce Output Operator [RS_334]
-                                          PartitionCols:_col1
-                                          Select Operator [SEL_333] (rows=287989836 width=135)
-                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                            Filter Operator [FIL_332] (rows=287989836 width=135)
-                                              predicate:((cs_catalog_page_sk BETWEEN DynamicValue(RS_43_catalog_page_cp_catalog_page_sk_min) AND DynamicValue(RS_43_catalog_page_cp_catalog_page_sk_max) and in_bloom_filter(cs_catalog_page_sk, DynamicValue(RS_43_catalog_page_cp_catalog_page_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_40_date_dim_d_date_sk_min) AND DynamicValue(RS_40_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_40_date_dim_d_date_sk_bloom_filter))) and cs_catalog_page_sk is not null and cs_sold_date_sk is not null)
-                                              TableScan [TS_249] (rows=287989836 width=135)
-                                                Output:["cs_sold_date_sk","cs_catalog_page_sk","cs_ext_sales_price","cs_net_profit"]
-                                              <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_329]
-                                                  Group By Operator [GBY_328] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_280]
-                                                      Group By Operator [GBY_277] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_273] (rows=8116 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_269]
-                                              <-Reducer 26 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_331]
-                                                  Group By Operator [GBY_330] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 25 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_313]
-                                                      Group By Operator [GBY_312] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_311] (rows=46000 width=460)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_309]
-                                      <-Map 24 [CONTAINS] vectorized
-                                        Reduce Output Operator [RS_337]
-                                          PartitionCols:_col1
-                                          Select Operator [SEL_336] (rows=28798881 width=106)
-                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                            Filter Operator [FIL_335] (rows=28798881 width=106)
-                                              predicate:(cr_catalog_page_sk is not null and cr_returned_date_sk is not null)
-                                              TableScan [TS_254] (rows=28798881 width=106)
-                                                Output:["cr_returned_date_sk","cr_catalog_page_sk","cr_return_amount","cr_net_loss"]
+                        Top N Key Operator [TNK_321] (rows=757932601 width=110)
+                          keys:_col0, _col1, 0L,sort order:+++,top n:100
+                          Select Operator [SEL_320] (rows=191657181 width=132)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Group By Operator [GBY_319] (rows=191657181 width=132)
+                              Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
+                            <-Reducer 13 [SIMPLE_EDGE]
+                              SHUFFLE [RS_47]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_46] (rows=383314363 width=132)
+                                  Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col9
+                                  Merge Join Operator [MERGEJOIN_222] (rows=383314363 width=132)
+                                    Conds:RS_42._col0=RS_315._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col9"]
+                                  <-Map 25 [SIMPLE_EDGE] vectorized
+                                    PARTITION_ONLY_SHUFFLE [RS_315]
+                                      PartitionCols:_col0
+                                      Select Operator [SEL_314] (rows=46000 width=460)
+                                        Output:["_col0","_col1"]
+                                        Filter Operator [FIL_313] (rows=46000 width=460)
+                                          predicate:cp_catalog_page_sk is not null
+                                          TableScan [TS_36] (rows=46000 width=460)
+                                            default@catalog_page,catalog_page,Tbl:COMPLETE,Col:NONE,Output:["cp_catalog_page_sk","cp_catalog_page_id"]
+                                  <-Reducer 12 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_42]
+                                      PartitionCols:_col0
+                                      Merge Join Operator [MERGEJOIN_221] (rows=348467596 width=132)
+                                        Conds:Union 23._col1=RS_276._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5"]
+                                      <-Map 10 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_276]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_273] (rows=8116 width=1119)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_272] (rows=8116 width=1119)
+                                              predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00' AND TIMESTAMP'1998-08-18 00:00:00' and d_date_sk is not null)
+                                              TableScan [TS_8] (rows=73049 width=1119)
+                                                default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
+                                      <-Union 23 [SIMPLE_EDGE]
+                                        <-Map 22 [CONTAINS] vectorized
+                                          Reduce Output Operator [RS_341]
+                                            PartitionCols:_col1
+                                            Select Operator [SEL_340] (rows=287989836 width=135)
+                                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                              Filter Operator [FIL_339] (rows=287989836 width=135)
+                                                predicate:((cs_catalog_page_sk BETWEEN DynamicValue(RS_43_catalog_page_cp_catalog_page_sk_min) AND DynamicValue(RS_43_catalog_page_cp_catalog_page_sk_max) and in_bloom_filter(cs_catalog_page_sk, DynamicValue(RS_43_catalog_page_cp_catalog_page_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_40_date_dim_d_date_sk_min) AND DynamicValue(RS_40_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_40_date_dim_d_date_sk_bloom_filter))) and cs_catalog_page_sk is not null and cs_sold_date_sk is not null)
+                                                TableScan [TS_253] (rows=287989836 width=135)
+                                                  Output:["cs_sold_date_sk","cs_catalog_page_sk","cs_ext_sales_price","cs_net_profit"]
+                                                <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_336]
+                                                    Group By Operator [GBY_335] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_284]
+                                                        Group By Operator [GBY_281] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_277] (rows=8116 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_273]
+                                                <-Reducer 26 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_338]
+                                                    Group By Operator [GBY_337] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 25 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_318]
+                                                        Group By Operator [GBY_317] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_316] (rows=46000 width=460)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_314]
+                                        <-Map 24 [CONTAINS] vectorized
+                                          Reduce Output Operator [RS_344]
+                                            PartitionCols:_col1
+                                            Select Operator [SEL_343] (rows=28798881 width=106)
+                                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                              Filter Operator [FIL_342] (rows=28798881 width=106)
+                                                predicate:(cr_catalog_page_sk is not null and cr_returned_date_sk is not null)
+                                                TableScan [TS_258] (rows=28798881 width=106)
+                                                  Output:["cr_returned_date_sk","cr_catalog_page_sk","cr_return_amount","cr_net_loss"]
                   <-Reducer 18 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_327]
+                    Reduce Output Operator [RS_334]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_326] (rows=2273797803 width=110)
+                      Group By Operator [GBY_333] (rows=2273797803 width=110)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_325] (rows=182955399 width=135)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Group By Operator [GBY_324] (rows=182955399 width=135)
-                            Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
-                          <-Reducer 17 [SIMPLE_EDGE]
-                            SHUFFLE [RS_80]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_79] (rows=365910798 width=135)
-                                Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col9
-                                Merge Join Operator [MERGEJOIN_223] (rows=365910798 width=135)
-                                  Conds:RS_75._col0=RS_320._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col9"]
-                                <-Map 34 [SIMPLE_EDGE] vectorized
-                                  PARTITION_ONLY_SHUFFLE [RS_320]
-                                    PartitionCols:_col0
-                                    Select Operator [SEL_319] (rows=84 width=1850)
-                                      Output:["_col0","_col1"]
-                                      Filter Operator [FIL_318] (rows=84 width=1850)
-                                        predicate:web_site_sk is not null
-                                        TableScan [TS_69] (rows=84 width=1850)
-                                          default@web_site,web_site,Tbl:COMPLETE,Col:NONE,Output:["web_site_sk","web_site_id"]
-                                <-Reducer 16 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_75]
-                                    PartitionCols:_col0
-                                    Merge Join Operator [MERGEJOIN_222] (rows=332646173 width=135)
-                                      Conds:Union 28._col1=RS_274._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5"]
-                                    <-Map 10 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_274]
-                                        PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_269]
-                                    <-Union 28 [SIMPLE_EDGE]
-                                      <-Map 27 [CONTAINS] vectorized
-                                        Reduce Output Operator [RS_345]
-                                          PartitionCols:_col1
-                                          Select Operator [SEL_344] (rows=144002668 width=135)
-                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                            Filter Operator [FIL_343] (rows=144002668 width=135)
-                                              predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_73_date_dim_d_date_sk_min) AND DynamicValue(RS_73_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_73_date_dim_d_date_sk_bloom_filter))) and (ws_web_site_sk BETWEEN DynamicValue(RS_76_web_site_web_site_sk_min) AND DynamicValue(RS_76_web_site_web_site_sk_max) and in_bloom_filter(ws_web_site_sk, DynamicValue(RS_76_web_site_web_site_sk_bloom_filter))) and ws_sold_date_sk is not null and ws_web_site_sk is not null)
-                                              TableScan [TS_259] (rows=144002668 width=135)
-                                                Output:["ws_sold_date_sk","ws_web_site_sk","ws_ext_sales_price","ws_net_profit"]
-                                              <-Reducer 35 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_341]
-                                                  Group By Operator [GBY_340] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 34 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_323]
-                                                      Group By Operator [GBY_322] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_321] (rows=84 width=1850)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_319]
-                                              <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_339]
-                                                  Group By Operator [GBY_338] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_281]
-                                                      Group By Operator [GBY_278] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_275] (rows=8116 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_269]
-                                      <-Reducer 30 [CONTAINS]
-                                        Reduce Output Operator [RS_267]
-                                          PartitionCols:_col1
-                                          Select Operator [SEL_265] (rows=158402938 width=135)
-                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                            Merge Join Operator [MERGEJOIN_264] (rows=158402938 width=135)
-                                              Conds:RS_361._col0, _col2=RS_348._col1, _col2(Inner),Output:["_col1","_col3","_col6","_col7"]
-                                            <-Map 31 [SIMPLE_EDGE] vectorized
-                                              PARTITION_ONLY_SHUFFLE [RS_348]
-                                                PartitionCols:_col1, _col2
-                                                Select Operator [SEL_347] (rows=14398467 width=92)
-                                                  Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                  Filter Operator [FIL_346] (rows=14398467 width=92)
-                                                    predicate:(wr_item_sk is not null and wr_order_number is not null and wr_returned_date_sk is not null)
-                                                    TableScan [TS_57] (rows=14398467 width=92)
-                                                      default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_returned_date_sk","wr_item_sk","wr_order_number","wr_return_amt","wr_net_loss"]
-                                            <-Map 29 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_361]
-                                                PartitionCols:_col0, _col2
-                                                Select Operator [SEL_360] (rows=144002668 width=135)
-                                                  Output:["_col0","_col1","_col2"]
-                                                  Filter Operator [FIL_359] (rows=144002668 width=135)
-                                                    predicate:((ws_item_sk BETWEEN DynamicValue(RS_61_web_returns_wr_item_sk_min) AND DynamicValue(RS_61_web_returns_wr_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_61_web_returns_wr_item_sk_bloom_filter))) and (ws_order_number BETWEEN DynamicValue(RS_61_web_returns_wr_order_number_min) AND DynamicValue(RS_61_web_returns_wr_order_number_max) and in_bloom_filter(ws_order_number, DynamicValue(RS_61_web_returns_wr_order_number_bloom_filter))) and (ws_web_site_sk BETWEEN DynamicValue(RS_76_web_site_web_site_sk_min) AND DynamicValue(RS_76_web_site_web_site_sk_max) and in_bloom_filter(ws_web_site_sk, DynamicValue(RS_76_web_site_web_site_sk_bloom_filter))) and ws_item_sk is not null and ws_order_number is not null and ws_web_site_sk is not null)
-                                                    TableScan [TS_54] (rows=144002668 width=135)
-                                                      default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_item_sk","ws_web_site_sk","ws_order_number"]
-                                                    <-Reducer 35 [BROADCAST_EDGE] vectorized
-                                                      BROADCAST [RS_342]
-                                                         Please refer to the previous Group By Operator [GBY_340]
-                                                    <-Reducer 32 [BROADCAST_EDGE] vectorized
-                                                      BROADCAST [RS_356]
-                                                        Group By Operator [GBY_355] (rows=1 width=12)
-                                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=14398467)"]
-                                                        <-Map 31 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                          PARTITION_ONLY_SHUFFLE [RS_353]
-                                                            Group By Operator [GBY_351] (rows=1 width=12)
-                                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=14398467)"]
-                                                              Select Operator [SEL_349] (rows=14398467 width=92)
-                                                                Output:["_col0"]
-                                                                 Please refer to the previous Select Operator [SEL_347]
-                                                    <-Reducer 33 [BROADCAST_EDGE] vectorized
-                                                      BROADCAST [RS_358]
-                                                        Group By Operator [GBY_357] (rows=1 width=12)
-                                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=14398467)"]
-                                                        <-Map 31 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                          PARTITION_ONLY_SHUFFLE [RS_354]
-                                                            Group By Operator [GBY_352] (rows=1 width=12)
-                                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=14398467)"]
-                                                              Select Operator [SEL_350] (rows=14398467 width=92)
-                                                                Output:["_col0"]
-                                                                 Please refer to the previous Select Operator [SEL_347]
+                        Top N Key Operator [TNK_332] (rows=757932601 width=110)
+                          keys:_col0, _col1, 0L,sort order:+++,top n:100
+                          Select Operator [SEL_331] (rows=182955399 width=135)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Group By Operator [GBY_330] (rows=182955399 width=135)
+                              Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
+                            <-Reducer 17 [SIMPLE_EDGE]
+                              SHUFFLE [RS_80]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_79] (rows=365910798 width=135)
+                                  Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col9
+                                  Merge Join Operator [MERGEJOIN_224] (rows=365910798 width=135)
+                                    Conds:RS_75._col0=RS_326._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col9"]
+                                  <-Map 34 [SIMPLE_EDGE] vectorized
+                                    PARTITION_ONLY_SHUFFLE [RS_326]
+                                      PartitionCols:_col0
+                                      Select Operator [SEL_325] (rows=84 width=1850)
+                                        Output:["_col0","_col1"]
+                                        Filter Operator [FIL_324] (rows=84 width=1850)
+                                          predicate:web_site_sk is not null
+                                          TableScan [TS_69] (rows=84 width=1850)
+                                            default@web_site,web_site,Tbl:COMPLETE,Col:NONE,Output:["web_site_sk","web_site_id"]
+                                  <-Reducer 16 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_75]
+                                      PartitionCols:_col0
+                                      Merge Join Operator [MERGEJOIN_223] (rows=332646173 width=135)
+                                        Conds:Union 28._col1=RS_278._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5"]
+                                      <-Map 10 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_278]
+                                          PartitionCols:_col0
+                                           Please refer to the previous Select Operator [SEL_273]
+                                      <-Union 28 [SIMPLE_EDGE]
+                                        <-Map 27 [CONTAINS] vectorized
+                                          Reduce Output Operator [RS_352]
+                                            PartitionCols:_col1
+                                            Select Operator [SEL_351] (rows=144002668 width=135)
+                                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                              Filter Operator [FIL_350] (rows=144002668 width=135)
+                                                predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_73_date_dim_d_date_sk_min) AND DynamicValue(RS_73_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_73_date_dim_d_date_sk_bloom_filter))) and (ws_web_site_sk BETWEEN DynamicValue(RS_76_web_site_web_site_sk_min) AND DynamicValue(RS_76_web_site_web_site_sk_max) and in_bloom_filter(ws_web_site_sk, DynamicValue(RS_76_web_site_web_site_sk_bloom_filter))) and ws_sold_date_sk is not null and ws_web_site_sk is not null)
+                                                TableScan [TS_263] (rows=144002668 width=135)
+                                                  Output:["ws_sold_date_sk","ws_web_site_sk","ws_ext_sales_price","ws_net_profit"]
+                                                <-Reducer 35 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_348]
+                                                    Group By Operator [GBY_347] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 34 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_329]
+                                                        Group By Operator [GBY_328] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_327] (rows=84 width=1850)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_325]
+                                                <-Reducer 19 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_346]
+                                                    Group By Operator [GBY_345] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_285]
+                                                        Group By Operator [GBY_282] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_279] (rows=8116 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_273]
+                                        <-Reducer 30 [CONTAINS]
+                                          Reduce Output Operator [RS_271]
+                                            PartitionCols:_col1
+                                            Select Operator [SEL_269] (rows=158402938 width=135)
+                                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                              Merge Join Operator [MERGEJOIN_268] (rows=158402938 width=135)
+                                                Conds:RS_368._col0, _col2=RS_355._col1, _col2(Inner),Output:["_col1","_col3","_col6","_col7"]
+                                              <-Map 31 [SIMPLE_EDGE] vectorized
+                                                PARTITION_ONLY_SHUFFLE [RS_355]
+                                                  PartitionCols:_col1, _col2
+                                                  Select Operator [SEL_354] (rows=14398467 width=92)
+                                                    Output:["_col0","_col1","_col2","_col3","_col4"]
+                                                    Filter Operator [FIL_353] (rows=14398467 width=92)
+                                                      predicate:(wr_item_sk is not null and wr_order_number is not null and wr_returned_date_sk is not null)
+                                                      TableScan [TS_57] (rows=14398467 width=92)
+                                                        default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_returned_date_sk","wr_item_sk","wr_order_number","wr_return_amt","wr_net_loss"]
+                                              <-Map 29 [SIMPLE_EDGE] vectorized
+                                                SHUFFLE [RS_368]
+                                                  PartitionCols:_col0, _col2
+                                                  Select Operator [SEL_367] (rows=144002668 width=135)
+                                                    Output:["_col0","_col1","_col2"]
+                                                    Filter Operator [FIL_366] (rows=144002668 width=135)
+                                                      predicate:((ws_item_sk BETWEEN DynamicValue(RS_61_web_returns_wr_item_sk_min) AND DynamicValue(RS_61_web_returns_wr_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_61_web_returns_wr_item_sk_bloom_filter))) and (ws_order_number BETWEEN DynamicValue(RS_61_web_returns_wr_order_number_min) AND DynamicValue(RS_61_web_returns_wr_order_number_max) and in_bloom_filter(ws_order_number, DynamicValue(RS_61_web_returns_wr_order_number_bloom_filter))) and (ws_web_site_sk BETWEEN DynamicValue(RS_76_web_site_web_site_sk_min) AND DynamicValue(RS_76_web_site_web_site_sk_max) and in_bloom_filter(ws_web_site_sk, DynamicValue(RS_76_web_site_web_site_sk_bloom_filter))) and ws_item_sk is not null and ws_order_number is not null and ws_web_site_sk is not null)
+                                                      TableScan [TS_54] (rows=144002668 width=135)
+                                                        default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_item_sk","ws_web_site_sk","ws_order_number"]
+                                                      <-Reducer 35 [BROADCAST_EDGE] vectorized
+                                                        BROADCAST [RS_349]
+                                                           Please refer to the previous Group By Operator [GBY_347]
+                                                      <-Reducer 32 [BROADCAST_EDGE] vectorized
+                                                        BROADCAST [RS_363]
+                                                          Group By Operator [GBY_362] (rows=1 width=12)
+                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=14398467)"]
+                                                          <-Map 31 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                            PARTITION_ONLY_SHUFFLE [RS_360]
+                                                              Group By Operator [GBY_358] (rows=1 width=12)
+                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=14398467)"]
+                                                                Select Operator [SEL_356] (rows=14398467 width=92)
+                                                                  Output:["_col0"]
+                                                                   Please refer to the previous Select Operator [SEL_354]
+                                                      <-Reducer 33 [BROADCAST_EDGE] vectorized
+                                                        BROADCAST [RS_365]
+                                                          Group By Operator [GBY_364] (rows=1 width=12)
+                                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=14398467)"]
+                                                          <-Map 31 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                            PARTITION_ONLY_SHUFFLE [RS_361]
+                                                              Group By Operator [GBY_359] (rows=1 width=12)
+                                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=14398467)"]
+                                                                Select Operator [SEL_357] (rows=14398467 width=92)
+                                                                  Output:["_col0"]
+                                                                   Please refer to the previous Select Operator [SEL_354]
                   <-Reducer 5 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_298]
+                    Reduce Output Operator [RS_303]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_297] (rows=2273797803 width=110)
+                      Group By Operator [GBY_302] (rows=2273797803 width=110)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_296] (rows=383320021 width=87)
-                          Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Group By Operator [GBY_295] (rows=383320021 width=87)
-                            Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
-                          <-Reducer 4 [SIMPLE_EDGE]
-                            SHUFFLE [RS_22]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_21] (rows=766640042 width=87)
-                                Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col9
-                                Merge Join Operator [MERGEJOIN_219] (rows=766640042 width=87)
-                                  Conds:RS_17._col0=RS_286._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col9"]
-                                <-Map 20 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_286]
-                                    PartitionCols:_col0
-                                    Select Operator [SEL_285] (rows=1704 width=1910)
-                                      Output:["_col0","_col1"]
-                                      Filter Operator [FIL_284] (rows=1704 width=1910)
-                                        predicate:s_store_sk is not null
-                                        TableScan [TS_11] (rows=1704 width=1910)
-                                          default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id"]
-                                <-Reducer 3 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_17]
-                                    PartitionCols:_col0
-                                    Merge Join Operator [MERGEJOIN_218] (rows=696945478 width=87)
-                                      Conds:Union 2._col1=RS_270._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5"]
-                                    <-Map 10 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_270]
-                                        PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_269]
-                                    <-Union 2 [SIMPLE_EDGE]
-                                      <-Map 1 [CONTAINS] vectorized
-                                        Reduce Output Operator [RS_294]
-                                          PartitionCols:_col1
-                                          Select Operator [SEL_293] (rows=575995635 width=88)
-                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                            Filter Operator [FIL_292] (rows=575995635 width=88)
-                                              predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_15_date_dim_d_date_sk_min) AND DynamicValue(RS_15_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_15_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_18_store_s_store_sk_min) AND DynamicValue(RS_18_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_18_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
-                                              TableScan [TS_224] (rows=575995635 width=88)
-                                                Output:["ss_sold_date_sk","ss_store_sk","ss_ext_sales_price","ss_net_profit"]
-                                              <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_283]
-                                                  Group By Operator [GBY_282] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_279]
-                                                      Group By Operator [GBY_276] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_271] (rows=8116 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_269]
-                                              <-Reducer 21 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_291]
-                                                  Group By Operator [GBY_290] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_289]
-                                                      Group By Operator [GBY_288] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_287] (rows=1704 width=1910)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_285]
-                                      <-Map 9 [CONTAINS] vectorized
-                                        Reduce Output Operator [RS_307]
-                                          PartitionCols:_col1
-                                          Select Operator [SEL_306] (rows=57591150 width=77)
-                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                            Filter Operator [FIL_305] (rows=57591150 width=77)
-                                              predicate:(sr_returned_date_sk is not null and sr_store_sk is not null)
-                                              TableScan [TS_234] (rows=57591150 width=77)
-                                                Output:["sr_returned_date_sk","sr_store_sk","sr_return_amt","sr_net_loss"]
+                        Top N Key Operator [TNK_301] (rows=757932601 width=110)
+                          keys:_col0, _col1, 0L,sort order:+++,top n:100
+                          Select Operator [SEL_300] (rows=383320021 width=87)
+                            Output:["_col0","_col1","_col2","_col3","_col4"]
+                            Group By Operator [GBY_299] (rows=383320021 width=87)
+                              Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0
+                            <-Reducer 4 [SIMPLE_EDGE]
+                              SHUFFLE [RS_22]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_21] (rows=766640042 width=87)
+                                  Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col9
+                                  Merge Join Operator [MERGEJOIN_220] (rows=766640042 width=87)
+                                    Conds:RS_17._col0=RS_290._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col9"]
+                                  <-Map 20 [SIMPLE_EDGE] vectorized
+                                    SHUFFLE [RS_290]
+                                      PartitionCols:_col0
+                                      Select Operator [SEL_289] (rows=1704 width=1910)
+                                        Output:["_col0","_col1"]
+                                        Filter Operator [FIL_288] (rows=1704 width=1910)
+                                          predicate:s_store_sk is not null
+                                          TableScan [TS_11] (rows=1704 width=1910)
+                                            default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id"]
+                                  <-Reducer 3 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_17]
+                                      PartitionCols:_col0
+                                      Merge Join Operator [MERGEJOIN_219] (rows=696945478 width=87)
+                                        Conds:Union 2._col1=RS_274._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5"]
+                                      <-Map 10 [SIMPLE_EDGE] vectorized
+                                        SHUFFLE [RS_274]
+                                          PartitionCols:_col0
+                                           Please refer to the previous Select Operator [SEL_273]
+                                      <-Union 2 [SIMPLE_EDGE]
+                                        <-Map 1 [CONTAINS] vectorized
+                                          Reduce Output Operator [RS_298]
+                                            PartitionCols:_col1
+                                            Select Operator [SEL_297] (rows=575995635 width=88)
+                                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                              Filter Operator [FIL_296] (rows=575995635 width=88)
+                                                predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_15_date_dim_d_date_sk_min) AND DynamicValue(RS_15_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_15_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_18_store_s_store_sk_min) AND DynamicValue(RS_18_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_18_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
+                                                TableScan [TS_225] (rows=575995635 width=88)
+                                                  Output:["ss_sold_date_sk","ss_store_sk","ss_ext_sales_price","ss_net_profit"]
+                                                <-Reducer 11 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_287]
+                                                    Group By Operator [GBY_286] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_283]
+                                                        Group By Operator [GBY_280] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_275] (rows=8116 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_273]
+                                                <-Reducer 21 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_295]
+                                                    Group By Operator [GBY_294] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_293]
+                                                        Group By Operator [GBY_292] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_291] (rows=1704 width=1910)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_289]
+                                        <-Map 9 [CONTAINS] vectorized
+                                          Reduce Output Operator [RS_312]
+                                            PartitionCols:_col1
+                                            Select Operator [SEL_311] (rows=57591150 width=77)
+                                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                              Filter Operator [FIL_310] (rows=57591150 width=77)
+                                                predicate:(sr_returned_date_sk is not null and sr_store_sk is not null)
+                                                TableScan [TS_236] (rows=57591150 width=77)
+                                                  Output:["sr_returned_date_sk","sr_store_sk","sr_return_amt","sr_net_loss"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query50.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query50.q.out b/ql/src/test/results/clientpositive/perf/tez/query50.q.out
index e723140..efbae5c 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query50.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query50.q.out
@@ -135,137 +135,139 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_155]
-        Limit [LIM_154] (rows=100 width=88)
+      File Output Operator [FS_156]
+        Limit [LIM_155] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_153] (rows=383325119 width=88)
+          Select Operator [SEL_154] (rows=383325119 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_152]
-              Group By Operator [GBY_151] (rows=383325119 width=88)
+            SHUFFLE [RS_153]
+              Group By Operator [GBY_152] (rows=383325119 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6, KEY._col7, KEY._col8, KEY._col9
               <-Reducer 5 [SIMPLE_EDGE]
                 SHUFFLE [RS_30]
                   PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
                   Group By Operator [GBY_29] (rows=766650239 width=88)
                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"],aggregations:["sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
-                    Select Operator [SEL_27] (rows=766650239 width=88)
-                      Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
-                      Merge Join Operator [MERGEJOIN_119] (rows=766650239 width=88)
-                        Conds:RS_24._col10=RS_142._col0(Inner),Output:["_col0","_col7","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23"]
-                      <-Map 15 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_142]
-                          PartitionCols:_col0
-                          Select Operator [SEL_141] (rows=1704 width=1910)
-                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"]
-                            Filter Operator [FIL_140] (rows=1704 width=1910)
-                              predicate:s_store_sk is not null
-                              TableScan [TS_12] (rows=1704 width=1910)
-                                default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name","s_company_id","s_street_number","s_street_name","s_street_type","s_suite_number","s_city","s_county","s_state","s_zip"]
-                      <-Reducer 4 [SIMPLE_EDGE]
-                        SHUFFLE [RS_24]
-                          PartitionCols:_col10
-                          Merge Join Operator [MERGEJOIN_118] (rows=696954748 width=88)
-                            Conds:RS_21._col7=RS_134._col0(Inner),Output:["_col0","_col7","_col10"]
-                          <-Map 13 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_134]
-                              PartitionCols:_col0
-                              Select Operator [SEL_133] (rows=73049 width=1119)
-                                Output:["_col0"]
-                                Filter Operator [FIL_132] (rows=73049 width=1119)
-                                  predicate:d_date_sk is not null
-                                  TableScan [TS_9] (rows=73049 width=1119)
-                                    default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk"]
-                          <-Reducer 3 [SIMPLE_EDGE]
-                            SHUFFLE [RS_21]
-                              PartitionCols:_col7
-                              Merge Join Operator [MERGEJOIN_117] (rows=633595212 width=88)
-                                Conds:RS_18._col1, _col2, _col3=RS_150._col1, _col2, _col4(Inner),Output:["_col0","_col7","_col10"]
-                              <-Reducer 2 [SIMPLE_EDGE]
-                                PARTITION_ONLY_SHUFFLE [RS_18]
-                                  PartitionCols:_col1, _col2, _col3
-                                  Merge Join Operator [MERGEJOIN_116] (rows=63350266 width=77)
-                                    Conds:RS_122._col0=RS_125._col0(Inner),Output:["_col0","_col1","_col2","_col3"]
-                                  <-Map 1 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_122]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_121] (rows=57591150 width=77)
-                                        Output:["_col0","_col1","_col2","_col3"]
-                                        Filter Operator [FIL_120] (rows=57591150 width=77)
-                                          predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
-                                          TableScan [TS_0] (rows=57591150 width=77)
-                                            default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number"]
-                                  <-Map 11 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_125]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_124] (rows=18262 width=1119)
-                                        Output:["_col0"]
-                                        Filter Operator [FIL_123] (rows=18262 width=1119)
-                                          predicate:((d_moy = 9) and (d_year = 2000) and d_date_sk is not null)
-                                          TableScan [TS_3] (rows=73049 width=1119)
-                                            default@date_dim,d2,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                              <-Map 12 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_150]
-                                  PartitionCols:_col1, _col2, _col4
-                                  Select Operator [SEL_149] (rows=575995635 width=88)
-                                    Output:["_col0","_col1","_col2","_col3","_col4"]
-                                    Filter Operator [FIL_148] (rows=575995635 width=88)
-                                      predicate:((ss_customer_sk BETWEEN DynamicValue(RS_18_store_returns_sr_customer_sk_min) AND DynamicValue(RS_18_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_18_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_18_store_returns_sr_item_sk_min) AND DynamicValue(RS_18_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_18_store_returns_sr_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_22_d1_d_date_sk_min) AND DynamicValue(RS_22_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_22_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_25_store_s_store_sk_min) AND DynamicValue(RS_25_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_25_store_s_store_sk_bloom_filter))) and (ss_ticket_number BETWEEN DynamicValue(RS_18_store_returns_sr_ticket_number_min) AND DynamicValue(RS_18_
 store_returns_sr_ticket_number_max) and in_bloom_filter(ss_ticket_number, DynamicValue(RS_18_store_returns_sr_ticket_number_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
-                                      TableScan [TS_6] (rows=575995635 width=88)
-                                        default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number"]
-                                      <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_131]
-                                          Group By Operator [GBY_130] (rows=1 width=12)
-                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
-                                          <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
-                                            PARTITION_ONLY_SHUFFLE [RS_93]
-                                              Group By Operator [GBY_92] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                Select Operator [SEL_91] (rows=63350266 width=77)
-                                                  Output:["_col0"]
-                                                   Please refer to the previous Merge Join Operator [MERGEJOIN_116]
-                                      <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_139]
-                                          Group By Operator [GBY_138] (rows=1 width=12)
-                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                          <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_137]
-                                              Group By Operator [GBY_136] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                Select Operator [SEL_135] (rows=73049 width=1119)
-                                                  Output:["_col0"]
-                                                   Please refer to the previous Select Operator [SEL_133]
-                                      <-Reducer 16 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_147]
-                                          Group By Operator [GBY_146] (rows=1 width=12)
-                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                          <-Map 15 [CUSTOM_SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_145]
-                                              Group By Operator [GBY_144] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                Select Operator [SEL_143] (rows=1704 width=1910)
-                                                  Output:["_col0"]
-                                                   Please refer to the previous Select Operator [SEL_141]
-                                      <-Reducer 8 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_127]
-                                          Group By Operator [GBY_126] (rows=1 width=12)
-                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
-                                          <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
-                                            PARTITION_ONLY_SHUFFLE [RS_83]
-                                              Group By Operator [GBY_82] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                Select Operator [SEL_81] (rows=63350266 width=77)
-                                                  Output:["_col0"]
-                                                   Please refer to the previous Merge Join Operator [MERGEJOIN_116]
-                                      <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_129]
-                                          Group By Operator [GBY_128] (rows=1 width=12)
-                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
-                                          <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
-                                            PARTITION_ONLY_SHUFFLE [RS_88]
-                                              Group By Operator [GBY_87] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                Select Operator [SEL_86] (rows=63350266 width=77)
-                                                  Output:["_col0"]
-                                                   Please refer to the previous Merge Join Operator [MERGEJOIN_116]
+                    Top N Key Operator [TNK_56] (rows=766650239 width=88)
+                      keys:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9,sort order:++++++++++,top n:100
+                      Select Operator [SEL_27] (rows=766650239 width=88)
+                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
+                        Merge Join Operator [MERGEJOIN_120] (rows=766650239 width=88)
+                          Conds:RS_24._col10=RS_143._col0(Inner),Output:["_col0","_col7","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23"]
+                        <-Map 15 [SIMPLE_EDGE] vectorized
+                          SHUFFLE [RS_143]
+                            PartitionCols:_col0
+                            Select Operator [SEL_142] (rows=1704 width=1910)
+                              Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"]
+                              Filter Operator [FIL_141] (rows=1704 width=1910)
+                                predicate:s_store_sk is not null
+                                TableScan [TS_12] (rows=1704 width=1910)
+                                  default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name","s_company_id","s_street_number","s_street_name","s_street_type","s_suite_number","s_city","s_county","s_state","s_zip"]
+                        <-Reducer 4 [SIMPLE_EDGE]
+                          SHUFFLE [RS_24]
+                            PartitionCols:_col10
+                            Merge Join Operator [MERGEJOIN_119] (rows=696954748 width=88)
+                              Conds:RS_21._col7=RS_135._col0(Inner),Output:["_col0","_col7","_col10"]
+                            <-Map 13 [SIMPLE_EDGE] vectorized
+                              SHUFFLE [RS_135]
+                                PartitionCols:_col0
+                                Select Operator [SEL_134] (rows=73049 width=1119)
+                                  Output:["_col0"]
+                                  Filter Operator [FIL_133] (rows=73049 width=1119)
+                                    predicate:d_date_sk is not null
+                                    TableScan [TS_9] (rows=73049 width=1119)
+                                      default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk"]
+                            <-Reducer 3 [SIMPLE_EDGE]
+                              SHUFFLE [RS_21]
+                                PartitionCols:_col7
+                                Merge Join Operator [MERGEJOIN_118] (rows=633595212 width=88)
+                                  Conds:RS_18._col1, _col2, _col3=RS_151._col1, _col2, _col4(Inner),Output:["_col0","_col7","_col10"]
+                                <-Reducer 2 [SIMPLE_EDGE]
+                                  PARTITION_ONLY_SHUFFLE [RS_18]
+                                    PartitionCols:_col1, _col2, _col3
+                                    Merge Join Operator [MERGEJOIN_117] (rows=63350266 width=77)
+                                      Conds:RS_123._col0=RS_126._col0(Inner),Output:["_col0","_col1","_col2","_col3"]
+                                    <-Map 1 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_123]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_122] (rows=57591150 width=77)
+                                          Output:["_col0","_col1","_col2","_col3"]
+                                          Filter Operator [FIL_121] (rows=57591150 width=77)
+                                            predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
+                                            TableScan [TS_0] (rows=57591150 width=77)
+                                              default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number"]
+                                    <-Map 11 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_126]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_125] (rows=18262 width=1119)
+                                          Output:["_col0"]
+                                          Filter Operator [FIL_124] (rows=18262 width=1119)
+                                            predicate:((d_moy = 9) and (d_year = 2000) and d_date_sk is not null)
+                                            TableScan [TS_3] (rows=73049 width=1119)
+                                              default@date_dim,d2,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                <-Map 12 [SIMPLE_EDGE] vectorized
+                                  SHUFFLE [RS_151]
+                                    PartitionCols:_col1, _col2, _col4
+                                    Select Operator [SEL_150] (rows=575995635 width=88)
+                                      Output:["_col0","_col1","_col2","_col3","_col4"]
+                                      Filter Operator [FIL_149] (rows=575995635 width=88)
+                                        predicate:((ss_customer_sk BETWEEN DynamicValue(RS_18_store_returns_sr_customer_sk_min) AND DynamicValue(RS_18_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_18_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_18_store_returns_sr_item_sk_min) AND DynamicValue(RS_18_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_18_store_returns_sr_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_22_d1_d_date_sk_min) AND DynamicValue(RS_22_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_22_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_25_store_s_store_sk_min) AND DynamicValue(RS_25_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_25_store_s_store_sk_bloom_filter))) and (ss_ticket_number BETWEEN DynamicValue(RS_18_store_returns_sr_ticket_number_min) AND DynamicValue(RS_1
 8_store_returns_sr_ticket_number_max) and in_bloom_filter(ss_ticket_number, DynamicValue(RS_18_store_returns_sr_ticket_number_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
+                                        TableScan [TS_6] (rows=575995635 width=88)
+                                          default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number"]
+                                        <-Reducer 10 [BROADCAST_EDGE] vectorized
+                                          BROADCAST [RS_132]
+                                            Group By Operator [GBY_131] (rows=1 width=12)
+                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
+                                            <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
+                                              PARTITION_ONLY_SHUFFLE [RS_94]
+                                                Group By Operator [GBY_93] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
+                                                  Select Operator [SEL_92] (rows=63350266 width=77)
+                                                    Output:["_col0"]
+                                                     Please refer to the previous Merge Join Operator [MERGEJOIN_117]
+                                        <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                          BROADCAST [RS_140]
+                                            Group By Operator [GBY_139] (rows=1 width=12)
+                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                            <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
+                                              SHUFFLE [RS_138]
+                                                Group By Operator [GBY_137] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                  Select Operator [SEL_136] (rows=73049 width=1119)
+                                                    Output:["_col0"]
+                                                     Please refer to the previous Select Operator [SEL_134]
+                                        <-Reducer 16 [BROADCAST_EDGE] vectorized
+                                          BROADCAST [RS_148]
+                                            Group By Operator [GBY_147] (rows=1 width=12)
+                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                            <-Map 15 [CUSTOM_SIMPLE_EDGE] vectorized
+                                              SHUFFLE [RS_146]
+                                                Group By Operator [GBY_145] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                  Select Operator [SEL_144] (rows=1704 width=1910)
+                                                    Output:["_col0"]
+                                                     Please refer to the previous Select Operator [SEL_142]
+                                        <-Reducer 8 [BROADCAST_EDGE] vectorized
+                                          BROADCAST [RS_128]
+                                            Group By Operator [GBY_127] (rows=1 width=12)
+                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
+                                            <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
+                                              PARTITION_ONLY_SHUFFLE [RS_84]
+                                                Group By Operator [GBY_83] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
+                                                  Select Operator [SEL_82] (rows=63350266 width=77)
+                                                    Output:["_col0"]
+                                                     Please refer to the previous Merge Join Operator [MERGEJOIN_117]
+                                        <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                          BROADCAST [RS_130]
+                                            Group By Operator [GBY_129] (rows=1 width=12)
+                                              Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
+                                            <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
+                                              PARTITION_ONLY_SHUFFLE [RS_89]
+                                                Group By Operator [GBY_88] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
+                                                  Select Operator [SEL_87] (rows=63350266 width=77)
+                                                    Output:["_col0"]
+                                                     Please refer to the previous Merge Join Operator [MERGEJOIN_117]
 


[27/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
index 0000000,54bf3d7..0aab253
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
@@@ -1,0 -1,1117 +1,1154 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.client;
+ 
++import java.net.ProtocolException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.List;
+ 
+ import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+ import org.apache.thrift.TException;
++import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.transport.TTransportException;
+ 
+ import com.google.common.collect.Lists;
+ 
+ import org.junit.After;
+ import org.junit.AfterClass;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.junit.runner.RunWith;
+ import org.junit.runners.Parameterized;
+ 
+ import static java.util.stream.Collectors.joining;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertFalse;
+ import static org.junit.Assert.assertNotEquals;
+ import static org.junit.Assert.assertTrue;
+ import static org.junit.Assert.fail;
+ 
+ /**
+  * API tests for HMS client's  alterPartitions methods.
+  */
+ @RunWith(Parameterized.class)
+ @Category(MetastoreCheckinTest.class)
+ public class TestAlterPartitions extends MetaStoreClientTest {
+   private static final int NEW_CREATE_TIME = 123456789;
+   private AbstractMetaStoreService metaStore;
+   private IMetaStoreClient client;
+ 
+   private static final String DB_NAME = "testpartdb";
+   private static final String TABLE_NAME = "testparttable";
+   private static final List<String> PARTCOL_SCHEMA = Lists.newArrayList("yyyy", "mm", "dd");
+ 
+   public TestAlterPartitions(String name, AbstractMetaStoreService metaStore) {
+     this.metaStore = metaStore;
+   }
+ 
+   @Before
+   public void setUp() throws Exception {
+     // Get new client
+     client = metaStore.getClient();
+ 
+     // Clean up the database
+     client.dropDatabase(DB_NAME, true, true, true);
+     metaStore.cleanWarehouseDirs();
+     createDB(DB_NAME);
+   }
+ 
+   @After
+   public void tearDown() throws Exception {
+     try {
+       if (client != null) {
+         try {
+           client.close();
+         } catch (Exception e) {
+           // HIVE-19729: Shallow the exceptions based on the discussion in the Jira
+         }
+       }
+     } finally {
+       client = null;
+     }
+   }
+ 
+   private void createDB(String dbName) throws TException {
+     new DatabaseBuilder().
+             setName(dbName).
+             create(client, metaStore.getConf());
+   }
+ 
+   private Table createTestTable(IMetaStoreClient client, String dbName, String tableName,
+                                        List<String> partCols, boolean setPartitionLevelPrivilages)
+           throws Exception {
+     TableBuilder builder = new TableBuilder()
+             .setDbName(dbName)
+             .setTableName(tableName)
+             .addCol("id", "int")
+             .addCol("name", "string");
+ 
+     partCols.forEach(col -> builder.addPartCol(col, "string"));
+     Table table = builder.build(metaStore.getConf());
+ 
+     if (setPartitionLevelPrivilages) {
+       table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true");
+     }
+ 
+     client.createTable(table);
+     return table;
+   }
+ 
+   private void addPartition(IMetaStoreClient client, Table table, List<String> values)
+           throws TException {
+     PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table);
+     values.forEach(val -> partitionBuilder.addValue(val));
+     client.add_partition(partitionBuilder.build(metaStore.getConf()));
+   }
+ 
+   private List<List<String>> createTable4PartColsParts(IMetaStoreClient client) throws
+           Exception {
+     Table t = createTestTable(client, DB_NAME, TABLE_NAME, PARTCOL_SCHEMA, false);
+     List<List<String>> testValues = Lists.newArrayList(
+             Lists.newArrayList("1999", "01", "02"),
+             Lists.newArrayList("2009", "02", "10"),
+             Lists.newArrayList("2017", "10", "26"),
+             Lists.newArrayList("2017", "11", "27"));
+ 
+     for(List<String> vals : testValues){
+       addPartition(client, t, vals);
+     }
+ 
+     return testValues;
+   }
+ 
+   private static void assertPartitionsHaveCorrectValues(List<Partition> partitions,
+                                     List<List<String>> testValues) throws Exception {
+     assertEquals(testValues.size(), partitions.size());
+     for (int i = 0; i < partitions.size(); ++i) {
+       assertEquals(testValues.get(i), partitions.get(i).getValues());
+     }
+   }
+ 
+   private static void makeTestChangesOnPartition(Partition partition) {
+     partition.getParameters().put("hmsTestParam001", "testValue001");
+     partition.setCreateTime(NEW_CREATE_TIME);
+     partition.setLastAccessTime(NEW_CREATE_TIME);
+     partition.getSd().setLocation(partition.getSd().getLocation()+"/hh=01");
+     partition.getSd().getCols().add(new FieldSchema("newcol", "string", ""));
+   }
+ 
+   private void assertPartitionUnchanged(Partition partition, List<String> testValues,
+                                                List<String> partCols) throws MetaException {
+     assertFalse(partition.getParameters().containsKey("hmsTestParam001"));
+ 
+     List<String> expectedKVPairs = new ArrayList<>();
+     for (int i = 0; i < partCols.size(); ++i) {
+       expectedKVPairs.add(partCols.get(i) + "=" + testValues.get(i));
+     }
+     String partPath = expectedKVPairs.stream().collect(joining("/"));
+     assertTrue(partition.getSd().getLocation().equals(metaStore.getWarehouseRoot()
+         + "/testpartdb.db/testparttable/" + partPath));
+     assertNotEquals(NEW_CREATE_TIME, partition.getCreateTime());
+     assertNotEquals(NEW_CREATE_TIME, partition.getLastAccessTime());
+     assertEquals(2, partition.getSd().getCols().size());
+   }
+ 
+   private void assertPartitionChanged(Partition partition, List<String> testValues,
+                                       List<String> partCols) throws MetaException {
+     assertEquals("testValue001", partition.getParameters().get("hmsTestParam001"));
+ 
+     List<String> expectedKVPairs = new ArrayList<>();
+     for (int i = 0; i < partCols.size(); ++i) {
+       expectedKVPairs.add(partCols.get(i) + "=" + testValues.get(i));
+     }
+     String partPath = expectedKVPairs.stream().collect(joining("/"));
+     assertTrue(partition.getSd().getLocation().equals(metaStore.getWarehouseRoot()
+         + "/testpartdb.db/testparttable/" + partPath + "/hh=01"));
+     assertEquals(NEW_CREATE_TIME, partition.getCreateTime());
+     assertEquals(NEW_CREATE_TIME, partition.getLastAccessTime());
+     assertEquals(3, partition.getSd().getCols().size());
+   }
+ 
+ 
+ 
+   /**
+    * Testing alter_partition(String,String,Partition) ->
+    *         alter_partition_with_environment_context(String,String,Partition,null).
+    */
+   @Test
+   public void testAlterPartition() throws Exception {
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition oldPart = oldParts.get(3);
+ 
+     assertPartitionUnchanged(oldPart, testValues.get(3), PARTCOL_SCHEMA);
+     makeTestChangesOnPartition(oldPart);
+ 
+     client.alter_partition(DB_NAME, TABLE_NAME, oldPart);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition newPart = newParts.get(3);
+     assertPartitionChanged(newPart, testValues.get(3), PARTCOL_SCHEMA);
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+ 
+   }
+ 
+   @Test
+   public void otherCatalog() throws TException {
+     String catName = "alter_partition_catalog";
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName))
+         .build();
+     client.createCatalog(cat);
+ 
+     String dbName = "alter_partition_database_in_other_catalog";
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setCatalogName(catName)
+         .create(client, metaStore.getConf());
+ 
+     String tableName = "table_in_other_catalog";
+     Table table = new TableBuilder()
+         .inDb(db)
+         .setTableName(tableName)
+         .addCol("id", "int")
+         .addCol("name", "string")
+         .addPartCol("partcol", "string")
+         .create(client, metaStore.getConf());
+ 
+     Partition[] parts = new Partition[5];
+     for (int i = 0; i < 5; i++) {
+       parts[i] = new PartitionBuilder()
+           .inTable(table)
+           .addValue("a" + i)
+           .setLocation(MetaStoreTestUtils.getTestWarehouseDir("b" + i))
+           .build(metaStore.getConf());
+     }
+     client.add_partitions(Arrays.asList(parts));
+ 
+     Partition newPart =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a0"));
+     newPart.getParameters().put("test_key", "test_value");
+     client.alter_partition(catName, dbName, tableName, newPart);
+ 
+     Partition fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a0"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+ 
+     newPart =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
+     newPart.setLastAccessTime(3);
+     Partition newPart1 =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
+     newPart1.getSd().setLocation(MetaStoreTestUtils.getTestWarehouseDir("somewhere"));
+     client.alter_partitions(catName, dbName, tableName, Arrays.asList(newPart, newPart1));
+     fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertEquals(3L, fetched.getLastAccessTime());
+     fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere"));
+ 
+     newPart =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a4"));
+     newPart.getParameters().put("test_key", "test_value");
+     EnvironmentContext ec = new EnvironmentContext();
+     ec.setProperties(Collections.singletonMap("a", "b"));
+     client.alter_partition(catName, dbName, tableName, newPart, ec);
+     fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a4"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+ 
+ 
+     client.dropDatabase(catName, dbName, true, true, true);
+     client.dropCatalog(catName);
+   }
+ 
+   @SuppressWarnings("deprecation")
+   @Test
+   public void deprecatedCalls() throws TException {
+     String tableName = "deprecated_table";
+     Table table = new TableBuilder()
+         .setTableName(tableName)
+         .addCol("id", "int")
+         .addCol("name", "string")
+         .addPartCol("partcol", "string")
+         .create(client, metaStore.getConf());
+ 
+     Partition[] parts = new Partition[5];
+     for (int i = 0; i < 5; i++) {
+       parts[i] = new PartitionBuilder()
+           .inTable(table)
+           .addValue("a" + i)
+           .setLocation(MetaStoreTestUtils.getTestWarehouseDir("a" + i))
+           .build(metaStore.getConf());
+     }
+     client.add_partitions(Arrays.asList(parts));
+ 
+     Partition newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0"));
+     newPart.getParameters().put("test_key", "test_value");
+     client.alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart);
+ 
+     Partition fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0"));
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+ 
+     newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1"));
+     newPart.setLastAccessTime(3);
+     Partition newPart1 =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2"));
+     newPart1.getSd().setLocation("somewhere");
+     client.alter_partitions(DEFAULT_DATABASE_NAME, tableName, Arrays.asList(newPart, newPart1));
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1"));
+     Assert.assertEquals(3L, fetched.getLastAccessTime());
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2"));
+     Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere"));
+ 
+     newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3"));
+     newPart.setValues(Collections.singletonList("b3"));
+     client.renamePartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3"), newPart);
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("b3"));
+     Assert.assertEquals(1, fetched.getValuesSize());
+     Assert.assertEquals("b3", fetched.getValues().get(0));
+ 
+     newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4"));
+     newPart.getParameters().put("test_key", "test_value");
+     EnvironmentContext ec = new EnvironmentContext();
+     ec.setProperties(Collections.singletonMap("a", "b"));
+     client.alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart, ec);
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4"));
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionUnknownPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionBogusCatalogName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition("nosuch", DB_NAME, TABLE_NAME, partitions.get(3));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition("", TABLE_NAME, partitions.get(3));
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 -    client.alter_partition(null, TABLE_NAME, partitions.get(3));
++    try {
++      client.alter_partition(null, TABLE_NAME, partitions.get(3));
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition(DB_NAME, "", partitions.get(3));
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 -    client.alter_partition(DB_NAME, null, partitions.get(3));
++    try {
++      client.alter_partition(DB_NAME, null, partitions.get(3));
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test
+   public void testAlterPartitionNullPartition() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+       client.alter_partition(DB_NAME, TABLE_NAME, null);
+       fail("Should have thrown exception");
+     } catch (NullPointerException | TTransportException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setDbName(DB_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setTableName(TABLE_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partition(DB_NAME, TABLE_NAME, partition);
+   }
+ 
+ 
+   /**
+    * Testing alter_partition(String,String,Partition,EnvironmentContext) ->
+    *         alter_partition_with_environment_context(String,String,Partition,EnvironmentContext).
+    */
+   @Test
+   public void testAlterPartitionWithEnvironmentCtx() throws Exception {
+     EnvironmentContext context = new EnvironmentContext();
+     context.setProperties(new HashMap<String, String>(){
+       {
+         put("TestKey", "TestValue");
+       }
+     });
+ 
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = oldParts.get(3);
+ 
+     assertPartitionUnchanged(partition, testValues.get(3), PARTCOL_SCHEMA);
+     makeTestChangesOnPartition(partition);
+ 
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, context);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     partition = newParts.get(3);
+     assertPartitionChanged(partition, testValues.get(3), PARTCOL_SCHEMA);
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+ 
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, null);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxUnknownPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition("", TABLE_NAME, partitions.get(3), new EnvironmentContext());
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionWithEnvironmentCtxNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 -    client.alter_partition(null, TABLE_NAME, partitions.get(3), new EnvironmentContext());
++    try {
++      client.alter_partition(null, TABLE_NAME, partitions.get(3), new EnvironmentContext());
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition(DB_NAME, "", partitions.get(3), new EnvironmentContext());
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionWithEnvironmentCtxNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 -    client.alter_partition(DB_NAME, null, partitions.get(3), new EnvironmentContext());
++    try {
++      client.alter_partition(DB_NAME, null, partitions.get(3), new EnvironmentContext());
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test
+   public void testAlterPartitionWithEnvironmentCtxNullPartition() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1);
+       client.alter_partition(DB_NAME, TABLE_NAME, null, new EnvironmentContext());
+       fail("Should have thrown exception");
+     } catch (NullPointerException | TTransportException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setDbName(DB_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setTableName(TABLE_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+   }
+ 
+ 
+ 
+   /**
+    * Testing
+    *    alter_partitions(String,String,List(Partition)) ->
+    *    alter_partitions_with_environment_context(String,String,List(Partition),null).
+    */
+   @Test
+   public void testAlterPartitions() throws Exception {
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionUnchanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     oldParts.forEach(p -> makeTestChangesOnPartition(p));
+ 
+     client.alter_partitions(DB_NAME, TABLE_NAME, oldParts);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsEmptyPartitionList() throws Exception {
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList());
+   }
+ 
+   @Test
+   public void testAlterPartitionsUnknownPartition() throws Exception {
+     Partition part1 = null;
+     try {
+       createTable4PartColsParts(client);
+       Table t = client.getTable(DB_NAME, TABLE_NAME);
+       PartitionBuilder builder = new PartitionBuilder();
+       Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+       part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0);
+       makeTestChangesOnPartition(part1);
+       client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
+       fail("Should have thrown InvalidOperationException");
+     } catch (InvalidOperationException e) {
+       part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0);
+       assertPartitionUnchanged(part1, part1.getValues(), PARTCOL_SCHEMA);
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsBogusCatalogName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions("", TABLE_NAME, Lists.newArrayList(part));
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionsNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part));
++    try {
++      client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part));
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, "", Lists.newArrayList(part));
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionsNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions(DB_NAME, null, Lists.newArrayList(part));
++    try {
++      client.alter_partitions(DB_NAME, null, Lists.newArrayList(part));
++      Assert.fail("didn't throw");
++    } catch (TProtocolException | MetaException e) {
++      // By design
++    }
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsNullPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, null));
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsNullPartitions() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(null, null));
+   }
+ 
+   @Test
+   public void testAlterPartitionsNullPartitionList() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+       client.alter_partitions(DB_NAME, TABLE_NAME, null);
+       fail("Should have thrown exception");
 -    } catch (NullPointerException | TTransportException e) {
++    } catch (NullPointerException | TTransportException | TProtocolException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setDbName(DB_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setTableName(TABLE_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p));
+   }
+ 
+ 
+ 
+   /**
+    * Testing
+    *    alter_partitions(String,String,List(Partition),EnvironmentContext) ->
+    *    alter_partitions_with_environment_context(String,String,List(Partition),EnvironmentContext).
+    */
+   @Test
+   public void testAlterPartitionsWithEnvironmentCtx() throws Exception {
+     EnvironmentContext context = new EnvironmentContext();
+     context.setProperties(new HashMap<String, String>(){
+       {
+         put("TestKey", "TestValue");
+       }
+     });
+ 
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionUnchanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     oldParts.forEach(p -> makeTestChangesOnPartition(p));
+ 
+     client.alter_partitions(DB_NAME, TABLE_NAME, oldParts, context);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+ 
+     client.alter_partitions(DB_NAME, TABLE_NAME, newParts, new EnvironmentContext());
 -    client.alter_partitions(DB_NAME, TABLE_NAME, newParts, null);
++    client.alter_partitions(DB_NAME, TABLE_NAME, newParts);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxEmptyPartitionList() throws Exception {
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(), new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxUnknownPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
++    client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext(),
++        -1, null, -1);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions("", TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionsWithEnvironmentCtxNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
++    try {
++      client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, "", Lists.newArrayList(part), new EnvironmentContext());
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionsWithEnvironmentCtxNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext());
++    try {
++      client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext());
++      Assert.fail("didn't throw");
++    } catch (MetaException | TProtocolException ex) {
++      // By design.
++    }
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNullPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, null),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNullPartitions() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(null, null),
+             new EnvironmentContext());
+   }
+ 
+   @Test
+   public void testAlterPartitionsWithEnvironmentCtxNullPartitionList() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+       client.alter_partitions(DB_NAME, TABLE_NAME, null, new EnvironmentContext());
+       fail("Should have thrown exception");
 -    } catch (NullPointerException | TTransportException e) {
++    } catch (NullPointerException | TTransportException | TProtocolException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setDbName(DB_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setTableName(TABLE_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext());
+   }
+ 
+   /**
+    * Testing
+    *    renamePartition(String,String,List(String),Partition) ->
+    *    renamePartition(String,String,List(String),Partition).
+    */
+   @Test
+   public void testRenamePartition() throws Exception {
+ 
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<List<String>> newValues = new ArrayList<>();
+ 
+     List<String> newVal = Lists.newArrayList("2018", "01", "16");
+     newValues.addAll(oldValues.subList(0, 3));
+     newValues.add(newVal);
+ 
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(newVal);
+     makeTestChangesOnPartition(partToRename);
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     assertPartitionsHaveCorrectValues(newParts, newValues);
+ 
+ 
+     //Asserting other partition parameters can also be changed, but not the location
+     assertFalse(newParts.get(3).getSd().getLocation().endsWith("hh=01"));
+     assertEquals(3, newParts.get(3).getSd().getCols().size());
+     assertEquals("testValue001", newParts.get(3).getParameters().get("hmsTestParam001"));
+     assertEquals(NEW_CREATE_TIME, newParts.get(3).getCreateTime());
+     assertEquals(NEW_CREATE_TIME, newParts.get(3).getLastAccessTime());
+ 
+ 
+ 
+     assertTrue(client.listPartitions(DB_NAME, TABLE_NAME, oldValues.get(3), (short)-1).isEmpty());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionTargetAlreadyExisting() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), oldParts.get(2));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoSuchOldPartition() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("1", "2", ""), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullTableInPartition() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setTableName(null);
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2017", "11", "27"),
+             partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullDbInPartition() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setDbName(null);
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2017", "11", "27"),
+             partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionEmptyOldPartList() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList(), partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNullOldPartList() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, null, partToRename);
+   }
+ 
+   @Test
+   public void testRenamePartitionNullNewPart() throws Exception {
+     try {
+       List<List<String>> oldValues = createTable4PartColsParts(client);
+       List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1);
+ 
+       Partition partToRename = oldParts.get(3);
+       partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+       client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), null);
+     } catch (NullPointerException | TTransportException e) {
+     }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionBogusCatalogName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
 -    client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
++    client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename, -1, null);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoDbName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition("", TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoTblName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, "", oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullDbName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(null, TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullTblName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, null, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionChangeTblName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setTableName(TABLE_NAME + "_2");
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionChangeDbName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setDbName(DB_NAME + "_2");
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoTable() throws Exception {
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2018", "01", "16"),
+             new Partition());
+   }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
index 0000000,8ce8531..462584a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
@@@ -1,0 -1,594 +1,600 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.client;
+ 
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.stream.Collectors;
+ 
+ import org.apache.commons.lang.StringUtils;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.transport.TTransportException;
+ import org.junit.After;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.junit.runner.RunWith;
+ import org.junit.runners.Parameterized;
+ 
+ import com.google.common.collect.Lists;
+ 
+ /**
+  * Tests for appending partitions.
+  */
+ @RunWith(Parameterized.class)
+ @Category(MetastoreCheckinTest.class)
+ public class TestAppendPartitions extends MetaStoreClientTest {
+   private AbstractMetaStoreService metaStore;
+   private IMetaStoreClient client;
+ 
+   private static final String DB_NAME = "test_append_part_db";
+   private static Table tableWithPartitions;
+   private static Table externalTable;
+   private static Table tableNoPartColumns;
+   private static Table tableView;
+ 
+   public TestAppendPartitions(String name, AbstractMetaStoreService metaStore) {
+     this.metaStore = metaStore;
+   }
+ 
+   @Before
+   public void setUp() throws Exception {
+     // Get new client
+     client = metaStore.getClient();
+ 
+     // Clean up the database
+     client.dropDatabase(DB_NAME, true, true, true);
+     metaStore.cleanWarehouseDirs();
+     new DatabaseBuilder()
+         .setName(DB_NAME)
+         .create(client, metaStore.getConf());
+ 
+     tableWithPartitions = createTableWithPartitions();
+     externalTable = createExternalTable();
+     tableNoPartColumns = createTableNoPartitionColumns();
+     tableView = createView();
+   }
+ 
+   @After
+   public void tearDown() throws Exception {
+     try {
+       if (client != null) {
+         try {
+           client.close();
+         } catch (Exception e) {
+           // HIVE-19729: Shallow the exceptions based on the discussion in the Jira
+         }
+       }
+     } finally {
+       client = null;
+     }
+   }
+ 
+   // Tests for Partition appendPartition(String tableName, String dbName, List<String> partVals) method
+ 
+   @Test
+   public void testAppendPartition() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     Table table = tableWithPartitions;
+ 
+     Partition appendedPart =
+         client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+ 
+     Assert.assertNotNull(appendedPart);
+     Partition partition =
+         client.getPartition(table.getDbName(), table.getTableName(), partitionValues);
++    appendedPart.setWriteId(partition.getWriteId());
+     Assert.assertEquals(partition, appendedPart);
+     verifyPartition(partition, table, partitionValues, "year=2017/month=may");
+     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
+         "year=2018/month=march", "year=2017/month=may"));
+   }
+ 
+   @Test
+   public void testAppendPartitionToExternalTable() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     Table table = externalTable;
+ 
+     Partition appendedPart =
+         client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+ 
+     Assert.assertNotNull(appendedPart);
+     Partition partition =
+         client.getPartition(table.getDbName(), table.getTableName(), partitionValues);
++    appendedPart.setWriteId(partition.getWriteId());
+     Assert.assertEquals(partition, appendedPart);
+     verifyPartition(partition, table, partitionValues, "year=2017/month=may");
+     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=may"));
+   }
+ 
+   @Test
+   public void testAppendPartitionMultiplePartitions() throws Exception {
+ 
+     List<String> partitionValues1 = Lists.newArrayList("2017", "may");
+     List<String> partitionValues2 = Lists.newArrayList("2018", "may");
+     List<String> partitionValues3 = Lists.newArrayList("2017", "june");
+ 
+     Table table = tableWithPartitions;
+ 
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues1);
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues2);
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues3);
+ 
+     verifyPartitionNames(table,
+         Lists.newArrayList("year=2017/month=may", "year=2018/month=may", "year=2017/month=june",
+             "year=2017/month=march", "year=2017/month=april", "year=2018/month=march"));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionToTableWithoutPartCols() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     Table table = tableNoPartColumns;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionToView() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     Table table = tableView;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = AlreadyExistsException.class)
+   public void testAppendPartitionAlreadyExists() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "april");
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionNonExistingDB() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition("nonexistingdb", tableWithPartitions.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionNonExistingTable() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition(tableWithPartitions.getDbName(), "nonexistingtable", partitionValues);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionEmptyDB() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition("", tableWithPartitions.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionEmptyTable() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition(tableWithPartitions.getDbName(), "", partitionValues);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionNullDB() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition(null, tableWithPartitions.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionNullTable() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition(tableWithPartitions.getDbName(), null, partitionValues);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionEmptyPartValues() throws Exception {
+ 
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), new ArrayList<>());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionNullPartValues() throws Exception {
+ 
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), (List<String>) null);
+   }
+ 
+   @Test
+   public void testAppendPartitionLessPartValues() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2019");
+     Table table = tableWithPartitions;
+ 
+     try {
+       client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+       Assert.fail("Exception should have been thrown.");
+     } catch (MetaException e) {
+       // Expected exception
+     }
+     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
+         "year=2018/month=march"));
+     String partitionLocation = table.getSd().getLocation() + "/year=2019";
+     Assert.assertFalse(metaStore.isPathExists(new Path(partitionLocation)));
+   }
+ 
+   @Test
+   public void testAppendPartitionMorePartValues() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2019", "march", "12");
+     Table table = tableWithPartitions;
+ 
+     try {
+       client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+       Assert.fail("Exception should have been thrown.");
+     } catch (MetaException e) {
+       // Expected exception
+     }
+     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
+         "year=2018/month=march"));
+     String partitionLocation = tableWithPartitions.getSd().getLocation() + "/year=2019";
+     Assert.assertFalse(metaStore.isPathExists(new Path(partitionLocation)));
+   }
+ 
+   // Tests for Partition appendPartition(String tableName, String dbName, String name) method
+ 
+   @Test
+   public void testAppendPart() throws Exception {
+ 
+     Table table = tableWithPartitions;
+     String partitionName = "year=2017/month=may";
+ 
+     Partition appendedPart =
+         client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+ 
+     Assert.assertNotNull(appendedPart);
+     Partition partition = client.getPartition(table.getDbName(), table.getTableName(),
+         getPartitionValues(partitionName));
++    appendedPart.setWriteId(partition.getWriteId());
+     Assert.assertEquals(partition, appendedPart);
+     verifyPartition(partition, table, getPartitionValues(partitionName), partitionName);
+     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
+         "year=2018/month=march", partitionName));
+   }
+ 
+   @Test
+   public void testAppendPartToExternalTable() throws Exception {
+ 
+     Table table = externalTable;
+     String partitionName = "year=2017/month=may";
+ 
+     Partition appendedPart =
+         client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+ 
+     Assert.assertNotNull(appendedPart);
+     Partition partition = client.getPartition(table.getDbName(), table.getTableName(),
+         getPartitionValues(partitionName));
++    appendedPart.setWriteId(partition.getWriteId());
+     Assert.assertEquals(partition, appendedPart);
+     verifyPartition(partition, table, getPartitionValues(partitionName), partitionName);
+     verifyPartitionNames(table, Lists.newArrayList(partitionName));
+   }
+ 
+   @Test
+   public void testAppendPartMultiplePartitions() throws Exception {
+ 
+     String partitionName1 = "year=2017/month=may";
+     String partitionName2 = "year=2018/month=may";
+     String partitionName3 = "year=2017/month=june";
+     Table table = tableWithPartitions;
+ 
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName1);
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName2);
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName3);
+ 
+     verifyPartitionNames(table, Lists.newArrayList(partitionName1, partitionName2, partitionName3,
+         "year=2017/month=march", "year=2017/month=april", "year=2018/month=march"));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartToTableWithoutPartCols() throws Exception {
+ 
+     String partitionName = "year=2017/month=may";
+     Table table = tableNoPartColumns;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartToView() throws Exception {
+ 
+     String partitionName = "year=2017/month=may";
+     Table table = tableView;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = AlreadyExistsException.class)
+   public void testAppendPartAlreadyExists() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartNonExistingDB() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition("nonexistingdb", tableWithPartitions.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartNonExistingTable() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition(tableWithPartitions.getDbName(), "nonexistingtable", partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartEmptyDB() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition("", tableWithPartitions.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartEmptyTable() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition(tableWithPartitions.getDbName(), "", partitionName);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartNullDB() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition(null, tableWithPartitions.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartNullTable() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition(tableWithPartitions.getDbName(), null, partitionName);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartEmptyPartName() throws Exception {
+ 
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), "");
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartNullPartName() throws Exception {
+ 
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), (String) null);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartLessPartValues() throws Exception {
+ 
+     String partitionName = "year=2019";
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test
+   public void testAppendPartMorePartValues() throws Exception {
+ 
+     String partitionName = "year=2019/month=march/day=12";
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartInvalidPartName() throws Exception {
+ 
+     String partitionName = "invalidpartname";
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartWrongColumnInPartName() throws Exception {
+ 
+     String partitionName = "year=2019/honap=march";
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test
+   public void otherCatalog() throws TException {
+     String catName = "append_partition_catalog";
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName))
+         .build();
+     client.createCatalog(cat);
+ 
+     String dbName = "append_partition_database_in_other_catalog";
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setCatalogName(catName)
+         .create(client, metaStore.getConf());
+ 
+     String tableName = "table_in_other_catalog";
+     new TableBuilder()
+         .inDb(db)
+         .setTableName(tableName)
+         .addCol("id", "int")
+         .addCol("name", "string")
+         .addPartCol("partcol", "string")
+         .create(client, metaStore.getConf());
+ 
+     Partition created =
+         client.appendPartition(catName, dbName, tableName, Collections.singletonList("a1"));
+     Assert.assertEquals(1, created.getValuesSize());
+     Assert.assertEquals("a1", created.getValues().get(0));
+     Partition fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
++    created.setWriteId(fetched.getWriteId());
+     Assert.assertEquals(created, fetched);
+ 
+     created = client.appendPartition(catName, dbName, tableName, "partcol=a2");
+     Assert.assertEquals(1, created.getValuesSize());
+     Assert.assertEquals("a2", created.getValues().get(0));
+     fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
++    created.setWriteId(fetched.getWriteId());
+     Assert.assertEquals(created, fetched);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionBogusCatalog() throws Exception {
+     client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(),
+         Lists.newArrayList("2017", "may"));
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionByNameBogusCatalog() throws Exception {
+     client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(),
+         "year=2017/month=april");
+   }
+ 
+   // Helper methods
+ 
+   private Table createTableWithPartitions() throws Exception {
+     Table table = createTable("test_append_part_table_with_parts", getYearAndMonthPartCols(), null,
+         TableType.MANAGED_TABLE.name(),
+         metaStore.getWarehouseRoot() + "/test_append_part_table_with_parts");
+     createPartition(table, Lists.newArrayList("2017", "march"));
+     createPartition(table, Lists.newArrayList("2017", "april"));
+     createPartition(table, Lists.newArrayList("2018", "march"));
+     return table;
+   }
+ 
+   private Table createTableNoPartitionColumns() throws Exception {
+     Table table = createTable("test_append_part_table_no_part_columns", null, null, "MANAGED_TABLE",
+         metaStore.getWarehouseRoot() + "/test_append_part_table_no_part_columns");
+     return table;
+   }
+ 
+   private Table createExternalTable() throws Exception {
+     Map<String, String> tableParams = new HashMap<>();
+     tableParams.put("EXTERNAL", "TRUE");
+     Table table = createTable("test_append_part_external_table", getYearAndMonthPartCols(),
+         tableParams, TableType.EXTERNAL_TABLE.name(),
+         metaStore.getWarehouseRoot() + "/test_append_part_external_table");
+     return table;
+   }
+ 
+   private Table createView() throws Exception {
+     Table table = createTable("test_append_part_table_view", getYearAndMonthPartCols(), null,
+         TableType.VIRTUAL_VIEW.name(), null);
+     return table;
+   }
+ 
+   private Table createTable(String tableName, List<FieldSchema> partCols, Map<String,
+       String> tableParams, String tableType, String location) throws Exception {
+     new TableBuilder()
+         .setDbName(DB_NAME)
+         .setTableName(tableName)
+         .addCol("test_id", "int", "test col id")
+         .addCol("test_value", "string", "test col value")
+         .setPartCols(partCols)
+         .setTableParams(tableParams)
+         .setType(tableType)
+         .setLocation(location)
+         .create(client, metaStore.getConf());
+     return client.getTable(DB_NAME, tableName);
+   }
+ 
+   private void createPartition(Table table, List<String> values) throws Exception {
+     new PartitionBuilder()
+         .inTable(table)
+         .setValues(values)
+         .addToTable(client, metaStore.getConf());
+   }
+ 
+   private static List<FieldSchema> getYearAndMonthPartCols() {
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema("year", "string", "year part col"));
+     cols.add(new FieldSchema("month", "string", "month part col"));
+     return cols;
+   }
+ 
+   private static List<String> getPartitionValues(String partitionsName) {
+     List<String> values = new ArrayList<>();
+     if (StringUtils.isEmpty(partitionsName)) {
+       return values;
+     }
+     values = Arrays.stream(partitionsName.split("/")).map(v -> v.split("=")[1])
+         .collect(Collectors.toList());
+     return values;
+   }
+ 
+   private void verifyPartition(Partition partition, Table table, List<String> expectedPartValues,
+       String partitionName) throws Exception {
+     Assert.assertEquals(table.getTableName(), partition.getTableName());
+     Assert.assertEquals(table.getDbName(), partition.getDbName());
+     Assert.assertEquals(expectedPartValues, partition.getValues());
+     Assert.assertNotEquals(0, partition.getCreateTime());
+     Assert.assertEquals(0, partition.getLastAccessTime());
+     Assert.assertEquals(1, partition.getParameters().size());
+     Assert.assertTrue(partition.getParameters().containsKey("transient_lastDdlTime"));
+     StorageDescriptor partitionSD = partition.getSd();
+     Assert.assertEquals(table.getSd().getLocation() + "/" + partitionName,
+         partitionSD.getLocation());
+     partition.getSd().setLocation(table.getSd().getLocation());
+     Assert.assertEquals(table.getSd(), partitionSD);
+     Assert.assertTrue(metaStore.isPathExists(new Path(partitionSD.getLocation())));
+   }
+ 
+   private void verifyPartitionNames(Table table, List<String> expectedPartNames) throws Exception {
+     List<String> partitionNames =
+         client.listPartitionNames(table.getDbName(), table.getTableName(), (short) -1);
+     Assert.assertEquals(expectedPartNames.size(), partitionNames.size());
+     Assert.assertTrue(partitionNames.containsAll(expectedPartNames));
+   }
+ }


[46/54] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 0000000,73a518d..95e8445
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@@ -1,0 -1,1682 +1,1719 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.hive.common.TableName;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
++import org.apache.hadoop.hive.metastore.api.*;
+ 
+ import java.lang.annotation.ElementType;
+ import java.lang.annotation.Retention;
+ import java.lang.annotation.RetentionPolicy;
+ import java.lang.annotation.Target;
+ import java.nio.ByteBuffer;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configurable;
 -import org.apache.hadoop.hive.metastore.api.AggrStats;
 -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 -import org.apache.hadoop.hive.metastore.api.Database;
 -import org.apache.hadoop.hive.metastore.api.FieldSchema;
 -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 -import org.apache.hadoop.hive.metastore.api.Function;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 -import org.apache.hadoop.hive.metastore.api.ISchema;
 -import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 -import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 -import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 -import org.apache.hadoop.hive.metastore.api.MetaException;
 -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 -import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 -import org.apache.hadoop.hive.metastore.api.Partition;
 -import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 -import org.apache.hadoop.hive.metastore.api.PrincipalType;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 -import org.apache.hadoop.hive.metastore.api.Role;
 -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 -import org.apache.hadoop.hive.metastore.api.RuntimeStat;
 -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 -import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableMeta;
 -import org.apache.hadoop.hive.metastore.api.Type;
 -import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 -import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 -import org.apache.hadoop.hive.metastore.api.WMMapping;
 -import org.apache.hadoop.hive.metastore.api.WMNullablePool;
 -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMPool;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.thrift.TException;
+ 
+ public interface RawStore extends Configurable {
+ 
+   /***
+    * Annotation to skip retries
+    */
+   @Target(value = ElementType.METHOD)
+   @Retention(value = RetentionPolicy.RUNTIME)
+   @interface CanNotRetry {
+   }
+ 
+   void shutdown();
+ 
+   /**
+    * Opens a new one or the one already created Every call of this function must
+    * have corresponding commit or rollback function call
+    *
+    * @return an active transaction
+    */
+ 
+   boolean openTransaction();
+ 
+   /**
+    * if this is the commit of the first open call then an actual commit is
+    * called.
+    *
+    * @return true or false
+    */
+   @CanNotRetry
+   boolean commitTransaction();
+ 
+   boolean isActiveTransaction();
+ 
+   /**
+    * Rolls back the current transaction if it is active
+    */
+   @CanNotRetry
+   void rollbackTransaction();
+ 
+   /**
+    * Create a new catalog.
+    * @param cat Catalog to create.
+    * @throws MetaException if something goes wrong, usually in storing it to the database.
+    */
+   void createCatalog(Catalog cat) throws MetaException;
+ 
+   /**
+    * Alter an existing catalog.  Only description and location can be changed, and the change of
+    * location is for internal use only.
+    * @param catName name of the catalog to alter.
+    * @param cat new version of the catalog.
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws InvalidOperationException attempt to change something about the catalog that is not
+    * changeable, like the name.
+    */
+   void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException;
+ 
+   /**
+    * Get a catalog.
+    * @param catalogName name of the catalog.
+    * @return The catalog.
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws MetaException if something goes wrong, usually in reading it from the database.
+    */
+   Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get all the catalogs.
+    * @return list of names of all catalogs in the system
+    * @throws MetaException if something goes wrong, usually in reading from the database.
+    */
+   List<String> getCatalogs() throws MetaException;
+ 
+   /**
+    * Drop a catalog.  The catalog must be empty.
+    * @param catalogName name of the catalog to drop.
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws MetaException could mean the catalog isn't empty, could mean general database error.
+    */
+   void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Create a database.
+    * @param db database to create.
+    * @throws InvalidObjectException not sure it actually ever throws this.
+    * @throws MetaException if something goes wrong, usually in writing it to the database.
+    */
+   void createDatabase(Database db)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get a database.
+    * @param catalogName catalog the database is in.
+    * @param name name of the database.
+    * @return the database.
+    * @throws NoSuchObjectException if no such database exists.
+    */
+   Database getDatabase(String catalogName, String name)
+       throws NoSuchObjectException;
+ 
+   /**
+    * Drop a database.
+    * @param catalogName catalog the database is in.
+    * @param dbname name of the database.
+    * @return true if the database was dropped, pretty much always returns this if it returns.
+    * @throws NoSuchObjectException no database in this catalog of this name to drop
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   boolean dropDatabase(String catalogName, String dbname)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Alter a database.
+    * @param catalogName name of the catalog the database is in.
+    * @param dbname name of the database to alter
+    * @param db new version of the database.  This should be complete as it will fully replace the
+    *          existing db object.
+    * @return true if the change succeeds, could fail due to db constraint violations.
+    * @throws NoSuchObjectException no database of this name exists to alter.
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   boolean alterDatabase(String catalogName, String dbname, Database db)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get all database in a catalog having names that match a pattern.
+    * @param catalogName name of the catalog to search for databases in
+    * @param pattern pattern names should match
+    * @return list of matching database names.
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   List<String> getDatabases(String catalogName, String pattern) throws MetaException;
+ 
+   /**
+    * Get names of all the databases in a catalog.
+    * @param catalogName name of the catalog to search for databases in
+    * @return list of names of all databases in the catalog
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   List<String> getAllDatabases(String catalogName) throws MetaException;
+ 
+   boolean createType(Type type);
+ 
+   Type getType(String typeName);
+ 
+   boolean dropType(String typeName);
+ 
+   void createTable(Table tbl) throws InvalidObjectException,
+       MetaException;
+ 
+   /**
+    * Drop a table.
+    * @param catalogName catalog the table is in
+    * @param dbName database the table is in
+    * @param tableName table name
+    * @return true if the table was dropped
+    * @throws MetaException something went wrong, usually in the RDBMS or storage
+    * @throws NoSuchObjectException No table of this name
+    * @throws InvalidObjectException Don't think this is ever actually thrown
+    * @throws InvalidInputException Don't think this is ever actually thrown
+    */
+   boolean dropTable(String catalogName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Get a table object.
+    * @param catalogName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @return table object, or null if no such table exists (wow it would be nice if we either
+    * consistently returned null or consistently threw NoSuchObjectException).
+    * @throws MetaException something went wrong in the RDBMS
+    */
+   Table getTable(String catalogName, String dbName, String tableName) throws MetaException;
+ 
+   /**
++   * Get a table object.
++   * @param catalogName catalog the table is in.
++   * @param dbName database the table is in.
++   * @param tableName table name.
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return table object, or null if no such table exists (wow it would be nice if we either
++   * consistently returned null or consistently threw NoSuchObjectException).
++   * @throws MetaException something went wrong in the RDBMS
++   */
++  Table getTable(String catalogName, String dbName, String tableName,
++                 long txnId, String writeIdList) throws MetaException;
++
++  /**
+    * Add a partition.
+    * @param part partition to add
+    * @return true if the partition was successfully added.
+    * @throws InvalidObjectException the provided partition object is not valid.
+    * @throws MetaException error writing to the RDBMS.
+    */
+   boolean addPartition(Partition part)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add a list of partitions to a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param parts list of partitions to be added.
+    * @return true if the operation succeeded.
+    * @throws InvalidObjectException never throws this AFAICT
+    * @throws MetaException the partitions don't belong to the indicated table or error writing to
+    * the RDBMS.
+    */
+   boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add a list of partitions to a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partitionSpec specification for the partition
+    * @param ifNotExists whether it is in an error if the partition already exists.  If true, then
+    *                   it is not an error if the partition exists, if false, it is.
+    * @return whether the partition was created.
+    * @throws InvalidObjectException The passed in partition spec or table specification is invalid.
+    * @throws MetaException error writing to RDBMS.
+    */
+   boolean addPartitions(String catName, String dbName, String tblName,
+                         PartitionSpecProxy partitionSpec, boolean ifNotExists)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get a partition.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param part_vals partition values for this table.
+    * @return the partition.
+    * @throws MetaException error reading from RDBMS.
+    * @throws NoSuchObjectException no partition matching this specification exists.
+    */
+   Partition getPartition(String catName, String dbName, String tableName,
+       List<String> part_vals) throws MetaException, NoSuchObjectException;
++  /**
++   * Get a partition.
++   * @param catName catalog name.
++   * @param dbName database name.
++   * @param tableName table name.
++   * @param part_vals partition values for this table.
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return the partition.
++   * @throws MetaException error reading from RDBMS.
++   * @throws NoSuchObjectException no partition matching this specification exists.
++   */
++  Partition getPartition(String catName, String dbName, String tableName,
++                         List<String> part_vals,
++                         long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Check whether a partition exists.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param partKeys list of partition keys used to generate the partition name.
+    * @param part_vals list of partition values.
+    * @return true if the partition exists, false otherwise.
+    * @throws MetaException failure reading RDBMS
+    * @throws NoSuchObjectException this is never thrown.
+    */
+   boolean doesPartitionExist(String catName, String dbName, String tableName,
+       List<FieldSchema> partKeys, List<String> part_vals)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Drop a partition.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param part_vals list of partition values.
+    * @return true if the partition was dropped.
+    * @throws MetaException Error accessing the RDBMS.
+    * @throws NoSuchObjectException no partition matching this description exists
+    * @throws InvalidObjectException error dropping the statistics for the partition
+    * @throws InvalidInputException error dropping the statistics for the partition
+    */
+   boolean dropPartition(String catName, String dbName, String tableName,
+       List<String> part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException,
+       InvalidInputException;
+ 
+   /**
+    * Get some or all partitions for a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name
+    * @param max maximum number of partitions, or -1 to get all partitions.
+    * @return list of partitions
+    * @throws MetaException error access the RDBMS.
+    * @throws NoSuchObjectException no such table exists
+    */
+   List<Partition> getPartitions(String catName, String dbName,
+       String tableName, int max) throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get the location for every partition of a given table. If a partition location is a child of
+    * baseLocationToNotShow then the partitionName is returned, but the only null location is
+    * returned.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param baseLocationToNotShow Partition locations which are child of this path are omitted, and
+    *     null value returned instead.
+    * @param max The maximum number of partition locations returned, or -1 for all
+    * @return The map of the partitionName, location pairs
+    */
+   Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max);
+ 
+   /**
+    * Alter a table.
+    * @param catName catalog the table is in.
+    * @param dbname database the table is in.
+    * @param name name of the table.
+    * @param newTable New table object.  Which parts of the table can be altered are
+    *                 implementation specific.
+    * @throws InvalidObjectException The new table object is invalid.
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    */
 -  void alterTable(String catName, String dbname, String name, Table newTable)
++  void alterTable(String catName, String dbname, String name, Table newTable,
++      long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Update creation metadata for a materialized view.
+    * @param catName catalog name.
+    * @param dbname database name.
+    * @param tablename table name.
+    * @param cm new creation metadata
+    * @throws MetaException error accessing the RDBMS.
+    */
+   void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException;
+ 
+   /**
+    * Get table names that match a pattern.
+    * @param catName catalog to search in
+    * @param dbName database to search in
+    * @param pattern pattern to match
+    * @return list of table names, if any
+    * @throws MetaException failure in querying the RDBMS
+    */
+   List<String> getTables(String catName, String dbName, String pattern)
+       throws MetaException;
+ 
+   /**
+    * Get table names that match a pattern.
+    * @param catName catalog to search in
+    * @param dbName database to search in
+    * @param pattern pattern to match
+    * @param tableType type of table to look for
+    * @return list of table names, if any
+    * @throws MetaException failure in querying the RDBMS
+    */
+   List<String> getTables(String catName, String dbName, String pattern, TableType tableType)
+       throws MetaException;
+ 
+   /**
+    * Get list of materialized views in a database.
+    * @param catName catalog name
+    * @param dbName database name
+    * @return names of all materialized views in the database
+    * @throws MetaException error querying the RDBMS
+    * @throws NoSuchObjectException no such database
+    */
+   List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+ 
+    * @param catName catalog name to search in. Search must be confined to one catalog.
+    * @param dbNames databases to search in.
+    * @param tableNames names of tables to select.
+    * @param tableTypes types of tables to look for.
+    * @return list of matching table meta information.
+    * @throws MetaException failure in querying the RDBMS.
+    */
+   List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
+                                List<String> tableTypes) throws MetaException;
+ 
+   /**
+    * @param catName catalog name
+    * @param dbname
+    *        The name of the database from which to retrieve the tables
+    * @param tableNames
+    *        The names of the tables to retrieve.
+    * @return A list of the tables retrievable from the database
+    *          whose names are in the list tableNames.
+    *         If there are duplicate names, only one instance of the table will be returned
+    * @throws MetaException failure in querying the RDBMS.
+    */
+   List<Table> getTableObjectsByName(String catName, String dbname, List<String> tableNames)
+       throws MetaException, UnknownDBException;
+ 
+   /**
+    * Get all tables in a database.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @return list of table names
+    * @throws MetaException failure in querying the RDBMS.
+    */
+   List<String> getAllTables(String catName, String dbName) throws MetaException;
+ 
+   /**
+    * Gets a list of tables based on a filter string and filter type.
+    * @param catName catalog name
+    * @param dbName
+    *          The name of the database from which you will retrieve the table names
+    * @param filter
+    *          The filter string
+    * @param max_tables
+    *          The maximum number of tables returned
+    * @return  A list of table names that match the desired filter
+    * @throws MetaException
+    * @throws UnknownDBException
+    */
+   List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+                                       short max_tables) throws MetaException, UnknownDBException;
+ 
+   /**
+    * Get a partial or complete list of names for partitions of a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param max_parts maximum number of partitions to retrieve, -1 for all.
+    * @return list of partition names.
+    * @throws MetaException there was an error accessing the RDBMS
+    */
+   List<String> listPartitionNames(String catName, String db_name,
+       String tbl_name, short max_parts) throws MetaException;
+ 
+   /**
+    * Get a list of partition values as one big struct.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param cols partition key columns
+    * @param applyDistinct whether to apply distinct to the list
+    * @param filter filter to apply to the partition names
+    * @param ascending whether to put in ascending order
+    * @param order whether to order
+    * @param maxParts maximum number of parts to return, or -1 for all
+    * @return struct with all of the partition value information
+    * @throws MetaException error access the RDBMS
+    */
+   PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name,
+                                               List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending,
+                                               List<FieldSchema> order, long maxParts) throws MetaException;
+ 
+   /**
+    * Alter a partition.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partition values that describe the partition.
+    * @param new_part new partition object.  This should be a complete copy of the old with
+    *                 changes values, not just the parts to update.
+    * @throws InvalidObjectException No such partition.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   void alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
 -      Partition new_part) throws InvalidObjectException, MetaException;
++      Partition new_part, long queryTxnId, String queryValidWriteIds)
++          throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Alter a set of partitions.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals_list list of list of partition values.  Each outer list describes one
+    *                       partition (with its list of partition values).
+    * @param new_parts list of new partitions.  The order must match the old partitions described in
+    *                  part_vals_list.  Each of these should be a complete copy of the new
+    *                  partition, not just the pieces to update.
++   * @param txnId transaction id of the transaction that called this method.
++   * @param writeIdList valid write id list of the transaction on the current table
++   * @param writeid write id of the transaction for the table
+    * @throws InvalidObjectException One of the indicated partitions does not exist.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   void alterPartitions(String catName, String db_name, String tbl_name,
 -      List<List<String>> part_vals_list, List<Partition> new_parts)
++      List<List<String>> part_vals_list, List<Partition> new_parts, long writeId,
++      long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get partitions with a filter.  This is a portion of the SQL where clause.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tblName table name
+    * @param filter SQL where clause filter
+    * @param maxParts maximum number of partitions to return, or -1 for all.
+    * @return list of partition objects matching the criteria
+    * @throws MetaException Error accessing the RDBMS or processing the filter.
+    * @throws NoSuchObjectException no such table.
+    */
+   List<Partition> getPartitionsByFilter(
+       String catName, String dbName, String tblName, String filter, short maxParts)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get partitions using an already parsed expression.
+    * @param catName catalog name.
+    * @param dbName database name
+    * @param tblName table name
+    * @param expr an already parsed Hive expression
+    * @param defaultPartitionName default name of a partition
+    * @param maxParts maximum number of partitions to return, or -1 for all
+    * @param result list to place resulting partitions in
+    * @return true if the result contains unknown partitions.
+    * @throws TException error executing the expression
+    */
+   boolean getPartitionsByExpr(String catName, String dbName, String tblName,
+       byte[] expr, String defaultPartitionName, short maxParts, List<Partition> result)
+       throws TException;
+ 
+   /**
+    * Get the number of partitions that match a provided SQL filter.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param filter filter from Hive's SQL where clause
+    * @return number of matching partitions.
+    * @throws MetaException error accessing the RDBMS or executing the filter
+    * @throws NoSuchObjectException no such table
+    */
+   int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter)
+     throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get the number of partitions that match an already parsed expression.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param expr an already parsed Hive expression
+    * @return number of matching partitions.
+    * @throws MetaException error accessing the RDBMS or working with the expression.
+    * @throws NoSuchObjectException no such table.
+    */
+   int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get partitions by name.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partNames list of partition names.  These are names not values, so they will include
+    *                  both the key and the value.
+    * @return list of matching partitions
+    * @throws MetaException error accessing the RDBMS.
+    * @throws NoSuchObjectException No such table.
+    */
+   List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+                                        List<String> partNames)
+       throws MetaException, NoSuchObjectException;
+ 
+   Table markPartitionForEvent(String catName, String dbName, String tblName, Map<String,String> partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException;
+ 
+   boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map<String, String> partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException;
+ 
+   boolean addRole(String rowName, String ownerName)
+       throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   boolean removeRole(String roleName) throws MetaException, NoSuchObjectException;
+ 
+   boolean grantRole(Role role, String userName, PrincipalType principalType,
+       String grantor, PrincipalType grantorType, boolean grantOption)
+       throws MetaException, NoSuchObjectException, InvalidObjectException;
+ 
+   boolean revokeRole(Role role, String userName, PrincipalType principalType,
+       boolean grantOption) throws MetaException, NoSuchObjectException;
+ 
+   PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a database for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated database
+    * @throws InvalidObjectException no such database
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getDBPrivilegeSet (String catName, String dbName, String userName,
+       List<String> groupNames)  throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a table for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated table
+    * @throws InvalidObjectException no such table
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getTablePrivilegeSet (String catName, String dbName, String tableName,
+       String userName, List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a partition for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partition partition name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated partition
+    * @throws InvalidObjectException no such partition
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getPartitionPrivilegeSet (String catName, String dbName, String tableName,
+       String partition, String userName, List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a column in a table or partition for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partitionName partition name, or null for table level column permissions
+    * @param columnName column name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated column in the table or partition
+    * @throws InvalidObjectException no such table, partition, or column
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getColumnPrivilegeSet (String catName, String dbName, String tableName, String partitionName,
+       String columnName, String userName, List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+       PrincipalType principalType);
+ 
+   /**
+    * For a given principal name and type, list the DB Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listAllTableGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partName partition name (not value)
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param columnName column name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalTableColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, String columnName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partName partition name (not value)
+    * @param columnName column name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName, String columnName);
+ 
+   boolean grantPrivileges (PrivilegeBag privileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+   throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+   throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   org.apache.hadoop.hive.metastore.api.Role getRole(
+       String roleName) throws NoSuchObjectException;
+ 
+   List<String> listRoleNames();
+ 
+   List<Role> listRoles(String principalName,
+       PrincipalType principalType);
+ 
+   List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+                                                       PrincipalType principalType);
+ 
+ 
+   /**
+    * Get the role to principal grant mapping for given role
+    * @param roleName
+    * @return
+    */
+   List<RolePrincipalGrant> listRoleMembers(String roleName);
+ 
+ 
+   /**
+    * Fetch a partition along with privilege information for a particular user.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partVals partition values
+    * @param user_name user to get privilege information for.
+    * @param group_names groups to get privilege information for.
+    * @return a partition
+    * @throws MetaException error accessing the RDBMS.
+    * @throws NoSuchObjectException no such partition exists
+    * @throws InvalidObjectException error fetching privilege information
+    */
+   Partition getPartitionWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, String user_name, List<String> group_names)
+       throws MetaException, NoSuchObjectException, InvalidObjectException;
+ 
+   /**
+    * Fetch some or all partitions for a table, along with privilege information for a particular
+    * user.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param maxParts maximum number of partitions to fetch, -1 for all partitions.
+    * @param userName user to get privilege information for.
+    * @param groupNames groups to get privilege information for.
+    * @return list of partitions.
+    * @throws MetaException error access the RDBMS.
+    * @throws NoSuchObjectException no such table exists
+    * @throws InvalidObjectException error fetching privilege information.
+    */
+   List<Partition> getPartitionsWithAuth(String catName, String dbName,
+       String tblName, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException;
+ 
+   /**
+    * Lists partition names that match a given partial specification
+    * @param catName catalog name.
+    * @param db_name
+    *          The name of the database which has the partitions
+    * @param tbl_name
+    *          The name of the table which has the partitions
+    * @param part_vals
+    *          A partial list of values for partitions in order of the table's partition keys.
+    *          Entries can be empty if you only want to specify latter partitions.
+    * @param max_parts
+    *          The maximum number of partitions to return
+    * @return A list of partition names that match the partial spec.
+    * @throws MetaException error accessing RDBMS
+    * @throws NoSuchObjectException No such table exists
+    */
+   List<String> listPartitionNamesPs(String catName, String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Lists partitions that match a given partial specification and sets their auth privileges.
+    *   If userName and groupNames null, then no auth privileges are set.
+    * @param catName catalog name.
+    * @param db_name
+    *          The name of the database which has the partitions
+    * @param tbl_name
+    *          The name of the table which has the partitions
+    * @param part_vals
+    *          A partial list of values for partitions in order of the table's partition keys
+    *          Entries can be empty if you need to specify latter partitions.
+    * @param max_parts
+    *          The maximum number of partitions to return
+    * @param userName
+    *          The user name for the partition for authentication privileges
+    * @param groupNames
+    *          The groupNames for the partition for authentication privileges
+    * @return A list of partitions that match the partial spec.
+    * @throws MetaException error access RDBMS
+    * @throws NoSuchObjectException No such table exists
+    * @throws InvalidObjectException error access privilege information
+    */
+   List<Partition> listPartitionsPsWithAuth(String catName, String db_name, String tbl_name,
+       List<String> part_vals, short max_parts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException;
+ 
+   /** Persists the given column statistics object to the metastore
+    * @param colStats object to persist
+    * @return Boolean indicating the outcome of the operation
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws InvalidObjectException the stats object is invalid
+    * @throws InvalidInputException unable to record the stats for the table
+    */
 -  boolean updateTableColumnStatistics(ColumnStatistics colStats)
++  boolean updateTableColumnStatistics(ColumnStatistics colStats, long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   /** Persists the given column statistics object to the metastore
+    * @param statsObj object to persist
+    * @param partVals partition values to persist the stats for
+    * @return Boolean indicating the outcome of the operation
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws InvalidObjectException the stats object is invalid
+    * @throws InvalidInputException unable to record the stats for the table
++   * @throws TException
+    */
+   boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
 -     List<String> partVals)
++     List<String> partVals, long txnId, String validWriteIds, long writeId)
+      throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Returns the relevant column statistics for a given column in a given table in a given database
+    * if such statistics exist.
+    * @param catName catalog name.
+    * @param dbName name of the database, defaults to current database
+    * @param tableName name of the table
+    * @param colName names of the columns for which statistics is requested
+    * @return Relevant column statistics for the column for the given table
+    * @throws NoSuchObjectException No such table
+    * @throws MetaException error accessing the RDBMS
+    *
+    */
+   ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+     List<String> colName) throws MetaException, NoSuchObjectException;
+ 
+   /**
++   * Returns the relevant column statistics for a given column in a given table in a given database
++   * if such statistics exist.
++   * @param catName catalog name.
++   * @param dbName name of the database, defaults to current database
++   * @param tableName name of the table
++   * @param colName names of the columns for which statistics is requested
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return Relevant column statistics for the column for the given table
++   * @throws NoSuchObjectException No such table
++   * @throws MetaException error accessing the RDBMS
++   *
++   */
++  ColumnStatistics getTableColumnStatistics(
++    String catName, String dbName, String tableName,
++    List<String> colName, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
++  /**
+    * Get statistics for a partition for a set of columns.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partNames list of partition names.  These are names so must be key1=val1[/key2=val2...]
+    * @param colNames list of columns to get stats for
+    * @return list of statistics objects
+    * @throws MetaException error accessing the RDBMS
+    * @throws NoSuchObjectException no such partition.
+    */
+   List<ColumnStatistics> getPartitionColumnStatistics(
+      String catName, String dbName, String tblName, List<String> partNames, List<String> colNames)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
++   * Get statistics for a partition for a set of columns.
++   * @param catName catalog name.
++   * @param dbName database name.
++   * @param tblName table name.
++   * @param partNames list of partition names.  These are names so must be key1=val1[/key2=val2...]
++   * @param colNames list of columns to get stats for
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return list of statistics objects
++   * @throws MetaException error accessing the RDBMS
++   * @throws NoSuchObjectException no such partition.
++   */
++  List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName,
++      List<String> partNames, List<String> colNames,
++      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
++  /**
+    * Deletes column statistics if present associated with a given db, table, partition and col. If
+    * null is passed instead of a colName, stats when present for all columns associated
+    * with a given db, table and partition are deleted.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param partName partition name.
+    * @param partVals partition values.
+    * @param colName column name.
+    * @return Boolean indicating the outcome of the operation
+    * @throws NoSuchObjectException no such partition
+    * @throws MetaException error access the RDBMS
+    * @throws InvalidObjectException error dropping the stats
+    * @throws InvalidInputException bad input, such as null table or database name.
+    */
+   boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
+       String partName, List<String> partVals, String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Delete statistics for a single column or all columns in a table.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param colName column name.  Null to delete stats for all columns in the table.
+    * @return true if the statistics were deleted.
+    * @throws NoSuchObjectException no such table or column.
+    * @throws MetaException error access the RDBMS.
+    * @throws InvalidObjectException error dropping the stats
+    * @throws InvalidInputException bad inputs, such as null table name.
+    */
+   boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
+                                       String colName)
+     throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   long cleanupEvents();
+ 
+   boolean addToken(String tokenIdentifier, String delegationToken);
+ 
+   boolean removeToken(String tokenIdentifier);
+ 
+   String getToken(String tokenIdentifier);
+ 
+   List<String> getAllTokenIdentifiers();
+ 
+   int addMasterKey(String key) throws MetaException;
+ 
+   void updateMasterKey(Integer seqNo, String key)
+      throws NoSuchObjectException, MetaException;
+ 
+   boolean removeMasterKey(Integer keySeq);
+ 
+   String[] getMasterKeys();
+ 
+   void verifySchema() throws MetaException;
+ 
+   String getMetaStoreSchemaVersion() throws  MetaException;
+ 
+   abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException;
+ 
+   /**
+    * Drop a list of partitions.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name
+    * @param partNames list of partition names.
+    * @throws MetaException error access RDBMS or storage.
+    * @throws NoSuchObjectException One or more of the partitions does not exist.
+    */
+   void dropPartitions(String catName, String dbName, String tblName, List<String> partNames)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * List all DB grants for a given principal.
+    * @param principalName principal name
+    * @param principalType type
+    * @return all DB grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Table grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Table grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Partition grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Partition grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Table column grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Table column grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Partition column grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Partition column grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   List<HiveObjectPrivilege> listGlobalGrantsAll();
+ 
+   /**
+    * Find all the privileges for a given database.
+    * @param catName catalog name
+    * @param dbName database name
+    * @return list of all privileges.
+    */
+   List<HiveObjectPrivilege> listDBGrantsAll(String catName, String dbName);
+ 
+   /**
+    * Find all of the privileges for a given column in a given partition.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partitionName partition name (not value)
+    * @param columnName column name
+    * @return all privileges on this column in this partition
+    */
+   List<HiveObjectPrivilege> listPartitionColumnGrantsAll(
+       String catName, String dbName, String tableName, String partitionName, String columnName);
+ 
+   /**
+    * Find all of the privileges for a given table
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @return all privileges on this table
+    */
+   List<HiveObjectPrivilege> listTableGrantsAll(String catName, String dbName, String tableName);
+ 
+   /**
+    * Find all of the privileges for a given partition.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partitionName partition name (not value)
+    * @return all privileges on this partition
+    */
+   List<HiveObjectPrivilege> listPartitionGrantsAll(
+       String catName, String dbName, String tableName, String partitionName);
+ 
+   /**
+    * Find all of the privileges for a given column in a given table.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param columnName column name
+    * @return all privileges on this column in this table
+    */
+   List<HiveObjectPrivilege> listTableColumnGrantsAll(
+       String catName, String dbName, String tableName, String columnName);
+ 
+   /**
+    * Register a user-defined function based on the function specification passed in.
+    * @param func function to create
+    * @throws InvalidObjectException incorrectly specified function
+    * @throws MetaException error accessing the RDBMS
+    */
+   void createFunction(Function func)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Alter function based on new function specs.
+    * @param dbName database name
+    * @param funcName function name
+    * @param newFunction new function specification
+    * @throws InvalidObjectException no such function, or incorrectly specified new function
+    * @throws MetaException incorrectly specified function
+    */
+   void alterFunction(String catName, String dbName, String funcName, Function newFunction)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Drop a function definition.
+    * @param dbName database name
+    * @param funcName function name
+    * @throws MetaException incorrectly specified function
+    * @throws NoSuchObjectException no such function
+    * @throws InvalidObjectException not sure when this is thrown
+    * @throws InvalidInputException not sure when this is thrown
+    */
+   void dropFunction(String catName, String dbName, String funcName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Retrieve function by name.
+    * @param dbName database name
+    * @param funcName function name
+    * @return the function
+    * @throws MetaException incorrectly specified function
+    */
+   Function getFunction(String catName, String dbName, String funcName) throws MetaException;
+ 
+   /**
+    * Retrieve all functions.
+    * @return all functions in a catalog
+    * @throws MetaException incorrectly specified function
+    */
+   List<Function> getAllFunctions(String catName) throws MetaException;
+ 
+   /**
+    * Retrieve list of function names based on name pattern.
+    * @param dbName database name
+    * @param pattern pattern to match
+    * @return functions that match the pattern
+    * @throws MetaException incorrectly specified function
+    */
+   List<String> getFunctions(String catName, String dbName, String pattern) throws MetaException;
+ 
+   /**
+    * Get aggregated stats for a table or partition(s).
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partNames list of partition names.  These are the names of the partitions, not
+    *                  values.
+    * @param colNames list of column names
+    * @return aggregated stats
+    * @throws MetaException error accessing RDBMS
+    * @throws NoSuchObjectException no such table or partition
+    */
+   AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
+     List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException;
+ 
+   /**
++   * Get aggregated stats for a table or partition(s).
++   * @param catName catalog name.
++   * @param dbName database name.
++   * @param tblName table name.
++   * @param partNames list of partition names.  These are the names of the partitions, not
++   *                  values.
++   * @param colNames list of column names
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return aggregated stats
++   * @throws MetaException error accessing RDBMS
++   * @throws NoSuchObjectException no such table or partition
++   */
++  AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
++    List<String> partNames, List<String> colNames,
++    long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
++  /**
+    * Get column stats for all partitions of all tables in the database
+    * @param catName catalog name
+    * @param dbName database name
+    * @return List of column stats objects for all partitions of all tables in the database
+    * @throws MetaException error accessing RDBMS
+    * @throws NoSuchObjectException no such database
+    */
+   List<ColStatsObjWithSourceInfo> getPartitionColStatsForDatabase(String catName, String dbName)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get the next notification event.
+    * @param rqst Request containing information on the last processed notification.
+    * @return list of notifications, sorted by eventId
+    */
+   NotificationEventResponse getNextNotification(NotificationEventRequest rqst);
+ 
+ 
+   /**
+    * Add a notification entry.  This should only be called from inside the metastore
+    * @param event the notification to add
+    * @throws MetaException error accessing RDBMS
+    */
+   void addNotificationEvent(NotificationEvent event) throws MetaException;
+ 
+   /**
+    * Remove older notification events.
+    * @param olderThan Remove any events older than a given number of seconds
+    */
+   void cleanNotificationEvents(int olderThan);
+ 
+   /**
+    * Get the last issued notification event id.  This is intended for use by the export command
+    * so that users can determine the state of the system at the point of the export,
+    * and determine which notification events happened before or after the export.
+    * @return
+    */
+   CurrentNotificationEventId getCurrentNotificationEventId();
+ 
+   /**
+    * Get the number of events corresponding to given database with fromEventId.
+    * This is intended for use by the repl commands to track the progress of incremental dump.
+    * @return
+    */
+   NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst);
+ 
+   /*
+    * Flush any catalog objects held by the metastore implementation.  Note that this does not
+    * flush statistics objects.  This should be called at the beginning of each query.
+    */
+   void flushCache();
+ 
+   /**
+    * @param fileIds List of file IDs from the filesystem.
+    * @return File metadata buffers from file metadata cache. The array is fileIds-sized, and
+    *         the entries (or nulls, if metadata is not in cache) correspond to fileIds in the list
+    */
+   ByteBuffer[] getFileMetadata(List<Long> fileIds) throws MetaException;
+ 
+   /**
+    * @param fileIds List of file IDs from the filesystem.
+    * @param metadata Metadata buffers corresponding to fileIds in the list.
+    * @param type The type; determines the class that can do additiona processing for metadata.
+    */
+   void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata,
+       FileMetadataExprType type) throws MetaException;
+ 
+   /**
+    * @return Whether file metadata cache is supported by this implementation.
+    */
+   boolean isFileMetadataSupported();
+ 
+   /**
+    * Gets file metadata from cache after applying a format-specific expression that can
+    * produce additional information based on file metadata and also filter the file list.
+    * @param fileIds List of file IDs from the filesystem.
+    * @param expr Format-specific serialized expression applicable to the files' metadatas.
+    * @param type Expression type; used to determine the class that handles the metadata.
+    * @param metadatas Output parameter; fileIds-sized array to receive the metadatas
+    *                  for corresponding files, if any.
+    * @param exprResults Output parameter; fileIds-sized array to receive the format-specific
+    *                    expression results for the corresponding files.
+    * @param eliminated Output parameter; fileIds-sized array to receive the indication of whether
+    *                   the corresponding files are entirely eliminated by the expression.
+    */
+   void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+       ByteBuffer[] metadatas, ByteBuffer[] exprResults, boolean[] eliminated)
+           throws MetaException;
+ 
+   /** Gets file metadata handler for the corresponding type. */
+   FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type);
+ 
+   /**
+    * Gets total number of tables.
+    */
+   @InterfaceStability.Evolving
+   int getTableCount() throws MetaException;
+ 
+   /**
+    * Gets total number of partitions.
+    */
+   @InterfaceStability.Evolving
+   int getPartitionCount() throws MetaException;
+ 
+   /**
+    * Gets total number of databases.
+    */
+   @InterfaceStability.Evolving
+   int getDatabaseCount() throws MetaException;
+ 
+   /**
+    * Get the primary associated with a table.  Strangely enough each SQLPrimaryKey is actually a
+    * column in they key, not the key itself.  Thus the list.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @return list of primary key columns or an empty list if the table does not have a primary key
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String tbl_name)
+       throws MetaException;
+ 
+   /**
+    * Get the foreign keys for a table.  All foreign keys for a particular table can be fetched by
+    * passing null for the last two arguments.
+    * @param catName catalog name.
+    * @param parent_db_name Database the table referred to is in.  This can be null to match all
+    *                       databases.
+    * @param parent_tbl_name Table that is referred to.  This can be null to match all tables.
+    * @param foreign_db_name Database the table with the foreign key is in.
+    * @param foreign_tbl_name Table with the foreign key.
+    * @return List of all matching foreign key columns.  Note that if more than one foreign key
+    * matches the arguments the results here will be all mixed together into a single list.
+    * @throws MetaException error access the RDBMS.
+    */
+   List<SQLForeignKey> getForeignKeys(String catName, String parent_db_name,
+     String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+     throws MetaException;
+ 
+   /**
+    * Get unique constraints associated with a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @return list of unique constraints
+    * @throws MetaException error access the RDBMS.
+    */
+   List<SQLUniqueConstraint> getUniqueConstraints(String catName, String db_name,
+     String tbl_name) throws MetaException;
+ 
+   /**
+    * Get not null constraints on a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @return list of not null constraints
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<SQLNotNullConstraint> getNotNullConstraints(String catName, String db_name,
+     String tbl_name) throws MetaException;
+ 
+   /**
+    * Get default values for columns in a table.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @return list of default values defined on the table.
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<SQLDefaultConstraint> getDefaultConstraints(String catName, String db_name,
+                                                    String tbl_name) throws MetaException;
+ 
+   /**
+    * Get check constraints for columns in a table.
+    * @param catName catalog name.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @return ccheck constraints for this table
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<SQLCheckConstraint> getCheckConstraints(String catName, String db_name,
+                                                    String tbl_name) throws MetaException;
+ 
+   /**
+    * Create a table with constraints
+    * @param tbl table definition
+    * @param primaryKeys primary key definition, or null
+    * @param foreignKeys foreign key definition, or null
+    * @param uniqueConstraints unique constraints definition, or null
+    * @param notNullConstraints not null constraints definition, or null
+    * @param defaultConstraints default values definition, or null
+    * @return list of constraint names
+    * @throws InvalidObjectException one of the provided objects is malformed.
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<String> createTableWithConstraints(Table tbl, List<SQLPrimaryKey> primaryKeys,
+     List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Drop a constraint, any constraint.  I have no idea why add and get each have separate
+    * methods for each constraint type but drop has only one.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param constraintName name of the constraint
+    * @throws NoSuchObjectException no constraint of this name exists
+    */
+   default void dropConstraint(String catName, String dbName, String tableName,
+                               String constraintName) throws NoSuchObjectException {
+     dropConstraint(catName, dbName, tableName, constraintName, false);
+   }
+ 
+   /**
+    * Drop a constraint, any constraint.  I have no idea why add and get each have separate
+    * methods for each constraint type but drop has only one.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param constraintName name of the constraint
+    * @param missingOk if true, it is not an error if there is no constraint of this name.  If
+    *                  false and there is no constraint of this name an exception will be thrown.
+    * @throws NoSuchObjectException no constraint of this name exists and missingOk = false
+    */
+   void dropConstraint(String catName, String dbName, String tableName, String constraintName,
+                       boolean missingOk) throws NoSuchObjectException;
+ 
+   /**
+    * Add a primary key to a table.
+    * @param pks Columns in the primary key.
+    * @return the name of the constraint, as a list of strings.
+    * @throws InvalidObjectException The SQLPrimaryKeys list is malformed
+    * @throws MetaException error accessing the RDMBS
+    */
+   List<String> addPrimaryKeys(List<SQLPrimaryKey> pks) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add a foreign key to a table.
+    * @param fks foreign key specification
+    * @return foreign key name.
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addForeignKeys(List<SQLForeignKey> fks) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add unique constraints to a table.
+    * @param uks unique constraints specification
+    * @return unique constraint names.
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add not null constraints to a table.
+    * @param nns not null constraint specifications
+    * @return constraint names.
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add default values to a table definition
+    * @param dv list of default values
+    * @return constraint names
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addDefaultConstraints(List<SQLDefaultConstraint> dv)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add check constraints to a table
+    * @param cc check constraints to add
+    * @return list of constraint names
+    * @throws InvalidObjectException the specification is malformed
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<String> addCheckConstraints(List<SQLCheckConstraint> cc) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Gets the unique id of the backing datastore for the metadata
+    * @return
+    * @throws MetaException
+    */
+   String getMetastoreDbUuid() throws MetaException;
+ 
+   void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize)
+       throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException;
+ 
+   WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException;
+ 
+   List<WMResourcePlan> getAllResourcePlans() throws MetaException;
+ 
+   WMFullResourcePlan alterResourcePlan(String name, WMNullableResourcePlan resourcePlan,
+       boolean canActivateDisabled, boolean canDeactivate, boolean isReplace)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   WMFullResourcePlan getActiveResourcePlan() throws MetaException;
+ 
+   WMValidateResourcePlanResponse validateResourcePlan(String name)
+       throws NoSuchObjectException, InvalidObjectException, MetaException;
+ 
+   void dropResourcePlan(String name) throws NoSuchObjectException, MetaException;
+ 
+   void createWMTrigger(WMTrigger trigger)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   void alterWMTrigger(WMTrigger trigger)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void dropWMTrigger(String resourcePlanName, String triggerName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+       throws NoSuchObjectException, MetaException;
+ 
+   void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException;
+ 
+   void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException,
+       NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void dropWMPool(String resourcePlanName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   void dropWMMapping(WMMapping mapping)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   /**
+    * Create a new ISchema.
+    * @param schema schema to create
+    * @throws AlreadyExistsException there's already a schema with this name
+    * @throws MetaException general database exception
+    */
+   void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
+       NoSuchObjectException;
+ 
+   /**
+    * Alter an existing ISchema.  This assumes the caller has already checked that such a schema
+    * exists.
+    * @param schemaName name of the schema
+    * @param newSchema new schema object
+    * @throws NoSuchObjectException no function with this name exists
+    * @throws MetaException general database exception
+    */
+   void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get an ISchema by name.
+    * @param schemaName schema descriptor
+    * @return ISchema
+    * @throws MetaException general database exception
+    */
+   ISchema getISchema(ISchemaName schemaName) throws MetaException;
+ 
+   /**
+    * Drop an ISchema.  This does not check whether there are valid versions of the schema in
+    * existence, it assumes the caller has already done that.
+    * @param schemaName schema descriptor
+    * @throws NoSuchObjectException no schema of this name exists
+    * @throws MetaException general database exception
+    */
+   void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Create a new version of an existing schema.
+    * @param schemaVersion version number
+    * @throws AlreadyExistsException a version of the schema with the same version number already
+    * exists.
+    * @throws InvalidObjectException the passed in SchemaVersion object has problems.
+    * @throws NoSuchObjectException no schema with the passed in name exists.
+    * @throws MetaException general database exception
+    */
+   void addSchemaVersion(SchemaVersion schemaVersion)
+       throws AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException;
+ 
+   /**
+    * Alter a schema version.  Note that the Thrift interface only supports changing the serde
+    * mapping and states.  This method does not guarantee it will check anymore than that.  This
+    * method does not understand the state transitions and just assumes that the new state it is
+    * passed is reasonable.
+    * @param version version descriptor for the schema
+    * @param newVersion altered SchemaVersion
+    * @throws NoSuchObjectException no such version of the named schema exists
+    * @throws MetaException general database exception
+    */
+   void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get a specific schema version.
+    * @param version version descriptor for the schema
+    * @return the SchemaVersion
+    * @throws MetaException general database exception
+    */
+   SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException;
+ 
+   /**
+    * Get the latest version of a schema.
+    * @param schemaName name of the schema
+    * @return latest version of the schema
+    * @throws MetaException general database exception
+    */
+   SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException;
+ 
+   /**
+    * Get all of the versions of a schema
+    * @param schemaName name of the schema
+    * @return all versions of the schema
+    * @throws MetaException general database exception
+    */
+   List<SchemaVersion> getAllSchemaVersion(ISchemaName schemaName) throws MetaException;
+ 
+   /**
+    * Find all SchemaVersion objects that match a query.  The query will select all SchemaVersions
+    * that are equal to all of the non-null passed in arguments.  That is, if arguments
+    * colName='name', colNamespace=null, type='string' are passed in, then all schemas that have
+    * a column with colName 'name' and type 'string' will be returned.
+    * @param colName column name.  Null is ok, which will cause this field to not be used in the
+    *                query.
+    * @param colNamespace column namespace.   Null is ok, which will cause this field to not be
+    *                     used in the query.
+    * @param type column type.   Null is ok, which will cause this field to not be used in the
+    *             query.
+    * @return List of all SchemaVersions that match.  Note that there is no expectation that these
+    * SchemaVersions derive from the same ISchema.  The list will be empty if there are no
+    * matching SchemaVersions.
+    * @throws MetaException general database exception
+    */
+   List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace, String type)
+       throws MetaException;
+ 
+   /**
+    * Drop a version of the schema.
+    * @param version version descriptor for the schema
+    * @throws NoSuchObjectException no such version of the named schema exists
+    * @throws MetaException general database exception
+    */
+   void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get serde information
+    * @param serDeName name of the SerDe
+    * @return the SerDe, or null if there is no such serde
+    * @throws NoSuchObjectException no serde with this name exists
+    * @throws MetaException general database exception
+    */
+   SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Add a serde
+    * @param serde serde to add
+    * @throws AlreadyExistsException a serde of this name already exists
+    * @throws MetaException general database exception
+    */
+   void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException;
+ 
+   /** Adds a RuntimeStat for persistence. */
+   void addRuntimeStat(RuntimeStat stat) throws MetaException;
+ 
+   /** Reads runtime statistic entries. */
+   List<RuntimeStat> getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException;
+ 
+   /** Removes outdated statistics. */
+   int deleteRuntimeStats(int maxRetainSecs) throws MetaException;
+ 
+   List<TableName> getTableNamesWithStats() throws MetaException, NoSuchObjectException;
+ 
+   List<TableName> getAllTableNamesForStats() throws MetaException, NoSuchObjectException;
+ 
+   Map<String, List<String>> getPartitionColsWithStats(String catName, String dbName,
+       String tableName) throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Remove older notification events.
+    * @param olderThan Remove any events older than a given number of seconds
+    */
+   void cleanWriteNotificationEvents(int olderThan);
+ 
+   /**
+    * Get all write events for a specific transaction .
+    * @param txnId get all the events done by this transaction
+    * @param dbName the name of db for which dump is being taken
+    * @param tableName the name of the table for which the dump is being taken
+    */
+   List<WriteEventInfo> getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException;
+ }


[11/54] [abbrv] hive git commit: HIVE-17896: TopNKey: Create a standalone vectorizable TopNKey operator (Teddy Choi, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query29.q.out b/ql/src/test/results/clientpositive/perf/tez/query29.q.out
index 791ddb6..9bfcdfa 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query29.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query29.q.out
@@ -117,200 +117,202 @@ Stage-0
     limit:100
     Stage-1
       Reducer 5 vectorized
-      File Output Operator [FS_259]
-        Limit [LIM_258] (rows=100 width=88)
+      File Output Operator [FS_260]
+        Limit [LIM_259] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_257] (rows=463823414 width=88)
+          Select Operator [SEL_258] (rows=463823414 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
           <-Reducer 4 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_256]
-              Group By Operator [GBY_255] (rows=463823414 width=88)
+            SHUFFLE [RS_257]
+              Group By Operator [GBY_256] (rows=463823414 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3
               <-Reducer 3 [SIMPLE_EDGE]
                 SHUFFLE [RS_49]
                   PartitionCols:_col0, _col1, _col2, _col3
                   Group By Operator [GBY_48] (rows=927646829 width=88)
                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col14)","sum(_col22)","sum(_col3)"],keys:_col7, _col8, _col27, _col28
-                    Merge Join Operator [MERGEJOIN_204] (rows=927646829 width=88)
-                      Conds:RS_44._col1, _col2=RS_45._col14, _col13(Inner),Output:["_col3","_col7","_col8","_col14","_col22","_col27","_col28"]
-                    <-Reducer 12 [SIMPLE_EDGE]
-                      SHUFFLE [RS_45]
-                        PartitionCols:_col14, _col13
-                        Select Operator [SEL_40] (rows=843315281 width=88)
-                          Output:["_col1","_col2","_col8","_col13","_col14","_col16","_col21","_col22"]
-                          Merge Join Operator [MERGEJOIN_203] (rows=843315281 width=88)
-                            Conds:RS_37._col3=RS_246._col0(Inner),Output:["_col5","_col10","_col11","_col13","_col18","_col19","_col21","_col22"]
-                          <-Map 22 [SIMPLE_EDGE] vectorized
-                            PARTITION_ONLY_SHUFFLE [RS_246]
+                    Top N Key Operator [TNK_93] (rows=927646829 width=88)
+                      keys:_col7, _col8, _col27, _col28,sort order:++++,top n:100
+                      Merge Join Operator [MERGEJOIN_205] (rows=927646829 width=88)
+                        Conds:RS_44._col1, _col2=RS_45._col14, _col13(Inner),Output:["_col3","_col7","_col8","_col14","_col22","_col27","_col28"]
+                      <-Reducer 12 [SIMPLE_EDGE]
+                        SHUFFLE [RS_45]
+                          PartitionCols:_col14, _col13
+                          Select Operator [SEL_40] (rows=843315281 width=88)
+                            Output:["_col1","_col2","_col8","_col13","_col14","_col16","_col21","_col22"]
+                            Merge Join Operator [MERGEJOIN_204] (rows=843315281 width=88)
+                              Conds:RS_37._col3=RS_247._col0(Inner),Output:["_col5","_col10","_col11","_col13","_col18","_col19","_col21","_col22"]
+                            <-Map 22 [SIMPLE_EDGE] vectorized
+                              PARTITION_ONLY_SHUFFLE [RS_247]
+                                PartitionCols:_col0
+                                Select Operator [SEL_246] (rows=1704 width=1910)
+                                  Output:["_col0","_col1","_col2"]
+                                  Filter Operator [FIL_245] (rows=1704 width=1910)
+                                    predicate:s_store_sk is not null
+                                    TableScan [TS_25] (rows=1704 width=1910)
+                                      default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id","s_store_name"]
+                            <-Reducer 11 [SIMPLE_EDGE]
+                              SHUFFLE [RS_37]
+                                PartitionCols:_col3
+                                Merge Join Operator [MERGEJOIN_203] (rows=766650239 width=88)
+                                  Conds:RS_34._col1=RS_239._col0(Inner),Output:["_col3","_col5","_col10","_col11","_col13","_col18","_col19"]
+                                <-Map 20 [SIMPLE_EDGE] vectorized
+                                  PARTITION_ONLY_SHUFFLE [RS_239]
+                                    PartitionCols:_col0
+                                    Select Operator [SEL_238] (rows=462000 width=1436)
+                                      Output:["_col0","_col1","_col2"]
+                                      Filter Operator [FIL_237] (rows=462000 width=1436)
+                                        predicate:i_item_sk is not null
+                                        TableScan [TS_22] (rows=462000 width=1436)
+                                          default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc"]
+                                <-Reducer 10 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_34]
+                                    PartitionCols:_col1
+                                    Merge Join Operator [MERGEJOIN_202] (rows=696954748 width=88)
+                                      Conds:RS_31._col1, _col2, _col4=RS_32._col1, _col2, _col3(Inner),Output:["_col1","_col3","_col5","_col10","_col11","_col13"]
+                                    <-Reducer 15 [SIMPLE_EDGE]
+                                      PARTITION_ONLY_SHUFFLE [RS_32]
+                                        PartitionCols:_col1, _col2, _col3
+                                        Merge Join Operator [MERGEJOIN_201] (rows=63350266 width=77)
+                                          Conds:RS_230._col0=RS_223._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                        <-Map 13 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_223]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_220] (rows=4058 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_218] (rows=4058 width=1119)
+                                                predicate:((d_year = 1999) and d_date_sk is not null and d_moy BETWEEN 4 AND 7)
+                                                TableScan [TS_9] (rows=73049 width=1119)
+                                                  default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                        <-Map 19 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_230]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_229] (rows=57591150 width=77)
+                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                              Filter Operator [FIL_228] (rows=57591150 width=77)
+                                                predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
+                                                TableScan [TS_12] (rows=57591150 width=77)
+                                                  default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_return_quantity"]
+                                    <-Reducer 9 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_31]
+                                        PartitionCols:_col1, _col2, _col4
+                                        Merge Join Operator [MERGEJOIN_200] (rows=633595212 width=88)
+                                          Conds:RS_255._col0=RS_221._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
+                                        <-Map 13 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_221]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_219] (rows=18262 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_217] (rows=18262 width=1119)
+                                                predicate:((d_moy = 4) and (d_year = 1999) and d_date_sk is not null)
+                                                 Please refer to the previous TableScan [TS_9]
+                                        <-Map 8 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_255]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_254] (rows=575995635 width=88)
+                                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                              Filter Operator [FIL_253] (rows=575995635 width=88)
+                                                predicate:((ss_customer_sk BETWEEN DynamicValue(RS_32_store_returns_sr_customer_sk_min) AND DynamicValue(RS_32_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_32_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_32_store_returns_sr_item_sk_min) AND DynamicValue(RS_32_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_32_store_returns_sr_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_35_item_i_item_sk_min) AND DynamicValue(RS_35_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_35_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_29_d1_d_date_sk_min) AND DynamicValue(RS_29_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_29_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_38_store_s_store_sk_min) AND DynamicValue(RS_38_store_s_store_sk_
 max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_38_store_s_store_sk_bloom_filter))) and (ss_ticket_number BETWEEN DynamicValue(RS_32_store_returns_sr_ticket_number_min) AND DynamicValue(RS_32_store_returns_sr_ticket_number_max) and in_bloom_filter(ss_ticket_number, DynamicValue(RS_32_store_returns_sr_ticket_number_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
+                                                TableScan [TS_6] (rows=575995635 width=88)
+                                                  default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_quantity"]
+                                                <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_227]
+                                                    Group By Operator [GBY_226] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      SHUFFLE [RS_225]
+                                                        Group By Operator [GBY_224] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_222] (rows=18262 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_219]
+                                                <-Reducer 16 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_232]
+                                                    Group By Operator [GBY_231] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
+                                                    <-Reducer 15 [CUSTOM_SIMPLE_EDGE]
+                                                      PARTITION_ONLY_SHUFFLE [RS_121]
+                                                        Group By Operator [GBY_120] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
+                                                          Select Operator [SEL_119] (rows=63350266 width=77)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Merge Join Operator [MERGEJOIN_201]
+                                                <-Reducer 17 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_234]
+                                                    Group By Operator [GBY_233] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
+                                                    <-Reducer 15 [CUSTOM_SIMPLE_EDGE]
+                                                      PARTITION_ONLY_SHUFFLE [RS_126]
+                                                        Group By Operator [GBY_125] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
+                                                          Select Operator [SEL_124] (rows=63350266 width=77)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Merge Join Operator [MERGEJOIN_201]
+                                                <-Reducer 18 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_236]
+                                                    Group By Operator [GBY_235] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
+                                                    <-Reducer 15 [CUSTOM_SIMPLE_EDGE]
+                                                      PARTITION_ONLY_SHUFFLE [RS_131]
+                                                        Group By Operator [GBY_130] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
+                                                          Select Operator [SEL_129] (rows=63350266 width=77)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Merge Join Operator [MERGEJOIN_201]
+                                                <-Reducer 21 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_244]
+                                                    Group By Operator [GBY_243] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_242]
+                                                        Group By Operator [GBY_241] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_240] (rows=462000 width=1436)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_238]
+                                                <-Reducer 23 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_252]
+                                                    Group By Operator [GBY_251] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 22 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_250]
+                                                        Group By Operator [GBY_249] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_248] (rows=1704 width=1910)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_246]
+                      <-Reducer 2 [SIMPLE_EDGE]
+                        SHUFFLE [RS_44]
+                          PartitionCols:_col1, _col2
+                          Merge Join Operator [MERGEJOIN_199] (rows=316788826 width=135)
+                            Conds:RS_216._col0=RS_208._col0(Inner),Output:["_col1","_col2","_col3"]
+                          <-Map 6 [SIMPLE_EDGE] vectorized
+                            PARTITION_ONLY_SHUFFLE [RS_208]
                               PartitionCols:_col0
-                              Select Operator [SEL_245] (rows=1704 width=1910)
-                                Output:["_col0","_col1","_col2"]
-                                Filter Operator [FIL_244] (rows=1704 width=1910)
-                                  predicate:s_store_sk is not null
-                                  TableScan [TS_25] (rows=1704 width=1910)
-                                    default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id","s_store_name"]
-                          <-Reducer 11 [SIMPLE_EDGE]
-                            SHUFFLE [RS_37]
-                              PartitionCols:_col3
-                              Merge Join Operator [MERGEJOIN_202] (rows=766650239 width=88)
-                                Conds:RS_34._col1=RS_238._col0(Inner),Output:["_col3","_col5","_col10","_col11","_col13","_col18","_col19"]
-                              <-Map 20 [SIMPLE_EDGE] vectorized
-                                PARTITION_ONLY_SHUFFLE [RS_238]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_237] (rows=462000 width=1436)
-                                    Output:["_col0","_col1","_col2"]
-                                    Filter Operator [FIL_236] (rows=462000 width=1436)
-                                      predicate:i_item_sk is not null
-                                      TableScan [TS_22] (rows=462000 width=1436)
-                                        default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc"]
-                              <-Reducer 10 [SIMPLE_EDGE]
-                                SHUFFLE [RS_34]
-                                  PartitionCols:_col1
-                                  Merge Join Operator [MERGEJOIN_201] (rows=696954748 width=88)
-                                    Conds:RS_31._col1, _col2, _col4=RS_32._col1, _col2, _col3(Inner),Output:["_col1","_col3","_col5","_col10","_col11","_col13"]
-                                  <-Reducer 15 [SIMPLE_EDGE]
-                                    PARTITION_ONLY_SHUFFLE [RS_32]
-                                      PartitionCols:_col1, _col2, _col3
-                                      Merge Join Operator [MERGEJOIN_200] (rows=63350266 width=77)
-                                        Conds:RS_229._col0=RS_222._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                      <-Map 13 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_222]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_219] (rows=4058 width=1119)
-                                            Output:["_col0"]
-                                            Filter Operator [FIL_217] (rows=4058 width=1119)
-                                              predicate:((d_year = 1999) and d_date_sk is not null and d_moy BETWEEN 4 AND 7)
-                                              TableScan [TS_9] (rows=73049 width=1119)
-                                                default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                                      <-Map 19 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_229]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_228] (rows=57591150 width=77)
-                                            Output:["_col0","_col1","_col2","_col3","_col4"]
-                                            Filter Operator [FIL_227] (rows=57591150 width=77)
-                                              predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
-                                              TableScan [TS_12] (rows=57591150 width=77)
-                                                default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_return_quantity"]
-                                  <-Reducer 9 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_31]
-                                      PartitionCols:_col1, _col2, _col4
-                                      Merge Join Operator [MERGEJOIN_199] (rows=633595212 width=88)
-                                        Conds:RS_254._col0=RS_220._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
-                                      <-Map 13 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_220]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_218] (rows=18262 width=1119)
-                                            Output:["_col0"]
-                                            Filter Operator [FIL_216] (rows=18262 width=1119)
-                                              predicate:((d_moy = 4) and (d_year = 1999) and d_date_sk is not null)
-                                               Please refer to the previous TableScan [TS_9]
-                                      <-Map 8 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_254]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_253] (rows=575995635 width=88)
-                                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                            Filter Operator [FIL_252] (rows=575995635 width=88)
-                                              predicate:((ss_customer_sk BETWEEN DynamicValue(RS_32_store_returns_sr_customer_sk_min) AND DynamicValue(RS_32_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_32_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_32_store_returns_sr_item_sk_min) AND DynamicValue(RS_32_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_32_store_returns_sr_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_35_item_i_item_sk_min) AND DynamicValue(RS_35_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_35_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_29_d1_d_date_sk_min) AND DynamicValue(RS_29_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_29_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_38_store_s_store_sk_min) AND DynamicValue(RS_38_store_s_store_sk_ma
 x) and in_bloom_filter(ss_store_sk, DynamicValue(RS_38_store_s_store_sk_bloom_filter))) and (ss_ticket_number BETWEEN DynamicValue(RS_32_store_returns_sr_ticket_number_min) AND DynamicValue(RS_32_store_returns_sr_ticket_number_max) and in_bloom_filter(ss_ticket_number, DynamicValue(RS_32_store_returns_sr_ticket_number_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
-                                              TableScan [TS_6] (rows=575995635 width=88)
-                                                default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_quantity"]
-                                              <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_226]
-                                                  Group By Operator [GBY_225] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_224]
-                                                      Group By Operator [GBY_223] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_221] (rows=18262 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_218]
-                                              <-Reducer 16 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_231]
-                                                  Group By Operator [GBY_230] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
-                                                  <-Reducer 15 [CUSTOM_SIMPLE_EDGE]
-                                                    PARTITION_ONLY_SHUFFLE [RS_120]
-                                                      Group By Operator [GBY_119] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                        Select Operator [SEL_118] (rows=63350266 width=77)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_200]
-                                              <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_233]
-                                                  Group By Operator [GBY_232] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
-                                                  <-Reducer 15 [CUSTOM_SIMPLE_EDGE]
-                                                    PARTITION_ONLY_SHUFFLE [RS_125]
-                                                      Group By Operator [GBY_124] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                        Select Operator [SEL_123] (rows=63350266 width=77)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_200]
-                                              <-Reducer 18 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_235]
-                                                  Group By Operator [GBY_234] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
-                                                  <-Reducer 15 [CUSTOM_SIMPLE_EDGE]
-                                                    PARTITION_ONLY_SHUFFLE [RS_130]
-                                                      Group By Operator [GBY_129] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                        Select Operator [SEL_128] (rows=63350266 width=77)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_200]
-                                              <-Reducer 21 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_243]
-                                                  Group By Operator [GBY_242] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_241]
-                                                      Group By Operator [GBY_240] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_239] (rows=462000 width=1436)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_237]
-                                              <-Reducer 23 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_251]
-                                                  Group By Operator [GBY_250] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 22 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_249]
-                                                      Group By Operator [GBY_248] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_247] (rows=1704 width=1910)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_245]
-                    <-Reducer 2 [SIMPLE_EDGE]
-                      SHUFFLE [RS_44]
-                        PartitionCols:_col1, _col2
-                        Merge Join Operator [MERGEJOIN_198] (rows=316788826 width=135)
-                          Conds:RS_215._col0=RS_207._col0(Inner),Output:["_col1","_col2","_col3"]
-                        <-Map 6 [SIMPLE_EDGE] vectorized
-                          PARTITION_ONLY_SHUFFLE [RS_207]
-                            PartitionCols:_col0
-                            Select Operator [SEL_206] (rows=36525 width=1119)
-                              Output:["_col0"]
-                              Filter Operator [FIL_205] (rows=36525 width=1119)
-                                predicate:((d_year) IN (1999, 2000, 2001) and d_date_sk is not null)
-                                TableScan [TS_3] (rows=73049 width=1119)
-                                  default@date_dim,d3,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                        <-Map 1 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_215]
-                            PartitionCols:_col0
-                            Select Operator [SEL_214] (rows=287989836 width=135)
-                              Output:["_col0","_col1","_col2","_col3"]
-                              Filter Operator [FIL_213] (rows=287989836 width=135)
-                                predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_42_d3_d_date_sk_min) AND DynamicValue(RS_42_d3_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_42_d3_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
-                                TableScan [TS_0] (rows=287989836 width=135)
-                                  default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_quantity"]
-                                <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                  BROADCAST [RS_212]
-                                    Group By Operator [GBY_211] (rows=1 width=12)
-                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                    <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_210]
-                                        Group By Operator [GBY_209] (rows=1 width=12)
-                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                          Select Operator [SEL_208] (rows=36525 width=1119)
-                                            Output:["_col0"]
-                                             Please refer to the previous Select Operator [SEL_206]
+                              Select Operator [SEL_207] (rows=36525 width=1119)
+                                Output:["_col0"]
+                                Filter Operator [FIL_206] (rows=36525 width=1119)
+                                  predicate:((d_year) IN (1999, 2000, 2001) and d_date_sk is not null)
+                                  TableScan [TS_3] (rows=73049 width=1119)
+                                    default@date_dim,d3,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                          <-Map 1 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_216]
+                              PartitionCols:_col0
+                              Select Operator [SEL_215] (rows=287989836 width=135)
+                                Output:["_col0","_col1","_col2","_col3"]
+                                Filter Operator [FIL_214] (rows=287989836 width=135)
+                                  predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_42_d3_d_date_sk_min) AND DynamicValue(RS_42_d3_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_42_d3_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
+                                  TableScan [TS_0] (rows=287989836 width=135)
+                                    default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_quantity"]
+                                  <-Reducer 7 [BROADCAST_EDGE] vectorized
+                                    BROADCAST [RS_213]
+                                      Group By Operator [GBY_212] (rows=1 width=12)
+                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                      <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_211]
+                                          Group By Operator [GBY_210] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                            Select Operator [SEL_209] (rows=36525 width=1119)
+                                              Output:["_col0"]
+                                               Please refer to the previous Select Operator [SEL_207]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query35.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query35.q.out b/ql/src/test/results/clientpositive/perf/tez/query35.q.out
index 4fe3856..4ad92c2 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query35.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query35.q.out
@@ -136,188 +136,190 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 6 vectorized
-      File Output Operator [FS_225]
-        Limit [LIM_224] (rows=100 width=88)
+      File Output Operator [FS_226]
+        Limit [LIM_225] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_223] (rows=1045432122 width=88)
+          Select Operator [SEL_224] (rows=1045432122 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16"]
           <-Reducer 5 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_222]
-              Select Operator [SEL_221] (rows=1045432122 width=88)
+            SHUFFLE [RS_223]
+              Select Operator [SEL_222] (rows=1045432122 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col9","_col10","_col11","_col12","_col14","_col15","_col16","_col17"]
-                Group By Operator [GBY_220] (rows=1045432122 width=88)
+                Group By Operator [GBY_221] (rows=1045432122 width=88)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","count(VALUE._col2)","max(VALUE._col3)","sum(VALUE._col4)","count(VALUE._col5)","max(VALUE._col6)","sum(VALUE._col7)","count(VALUE._col8)","max(VALUE._col9)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5
                 <-Reducer 4 [SIMPLE_EDGE]
                   SHUFFLE [RS_63]
                     PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
                     Group By Operator [GBY_62] (rows=2090864244 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15"],aggregations:["count()","sum(_col8)","count(_col8)","max(_col8)","sum(_col9)","count(_col9)","max(_col9)","sum(_col10)","count(_col10)","max(_col10)"],keys:_col4, _col6, _col7, _col8, _col9, _col10
-                      Select Operator [SEL_61] (rows=2090864244 width=88)
-                        Output:["_col4","_col6","_col7","_col8","_col9","_col10"]
-                        Filter Operator [FIL_60] (rows=2090864244 width=88)
-                          predicate:(_col12 is not null or _col14 is not null)
-                          Merge Join Operator [MERGEJOIN_173] (rows=2090864244 width=88)
-                            Conds:RS_55._col0=RS_56._col0(Left Semi),RS_55._col0=RS_211._col0(Left Outer),RS_55._col0=RS_219._col0(Left Outer),Output:["_col4","_col6","_col7","_col8","_col9","_col10","_col12","_col14"]
-                          <-Reducer 3 [SIMPLE_EDGE]
-                            PARTITION_ONLY_SHUFFLE [RS_55]
-                              PartitionCols:_col0
-                              Merge Join Operator [MERGEJOIN_169] (rows=96800003 width=860)
-                                Conds:RS_50._col1=RS_182._col0(Inner),Output:["_col0","_col4","_col6","_col7","_col8","_col9","_col10"]
-                              <-Map 9 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_182]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_181] (rows=1861800 width=385)
-                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                    Filter Operator [FIL_180] (rows=1861800 width=385)
-                                      predicate:cd_demo_sk is not null
-                                      TableScan [TS_6] (rows=1861800 width=385)
-                                        default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_dep_count","cd_dep_employed_count","cd_dep_college_count"]
-                              <-Reducer 2 [SIMPLE_EDGE]
-                                SHUFFLE [RS_50]
-                                  PartitionCols:_col1
-                                  Merge Join Operator [MERGEJOIN_168] (rows=88000001 width=860)
-                                    Conds:RS_176._col2=RS_179._col0(Inner),Output:["_col0","_col1","_col4"]
-                                  <-Map 1 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_176]
-                                      PartitionCols:_col2
-                                      Select Operator [SEL_175] (rows=80000000 width=860)
-                                        Output:["_col0","_col1","_col2"]
-                                        Filter Operator [FIL_174] (rows=80000000 width=860)
-                                          predicate:(c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_customer_sk is not null)
-                                          TableScan [TS_0] (rows=80000000 width=860)
-                                            default@customer,c,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_addr_sk"]
-                                  <-Map 8 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_179]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_178] (rows=40000000 width=1014)
-                                        Output:["_col0","_col1"]
-                                        Filter Operator [FIL_177] (rows=40000000 width=1014)
-                                          predicate:ca_address_sk is not null
-                                          TableScan [TS_3] (rows=40000000 width=1014)
-                                            default@customer_address,ca,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
-                          <-Reducer 11 [SIMPLE_EDGE]
-                            SHUFFLE [RS_56]
-                              PartitionCols:_col0
-                              Group By Operator [GBY_54] (rows=633595212 width=88)
-                                Output:["_col0"],keys:_col0
-                                Select Operator [SEL_18] (rows=633595212 width=88)
-                                  Output:["_col0"]
-                                  Merge Join Operator [MERGEJOIN_170] (rows=633595212 width=88)
-                                    Conds:RS_203._col0=RS_185._col0(Inner),Output:["_col1"]
-                                  <-Map 12 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_185]
+                      Top N Key Operator [TNK_104] (rows=2090864244 width=88)
+                        keys:_col4, _col6, _col7, _col8, _col9, _col10,sort order:++++++,top n:100
+                        Select Operator [SEL_61] (rows=2090864244 width=88)
+                          Output:["_col4","_col6","_col7","_col8","_col9","_col10"]
+                          Filter Operator [FIL_60] (rows=2090864244 width=88)
+                            predicate:(_col12 is not null or _col14 is not null)
+                            Merge Join Operator [MERGEJOIN_174] (rows=2090864244 width=88)
+                              Conds:RS_55._col0=RS_56._col0(Left Semi),RS_55._col0=RS_212._col0(Left Outer),RS_55._col0=RS_220._col0(Left Outer),Output:["_col4","_col6","_col7","_col8","_col9","_col10","_col12","_col14"]
+                            <-Reducer 3 [SIMPLE_EDGE]
+                              PARTITION_ONLY_SHUFFLE [RS_55]
+                                PartitionCols:_col0
+                                Merge Join Operator [MERGEJOIN_170] (rows=96800003 width=860)
+                                  Conds:RS_50._col1=RS_183._col0(Inner),Output:["_col0","_col4","_col6","_col7","_col8","_col9","_col10"]
+                                <-Map 9 [SIMPLE_EDGE] vectorized
+                                  SHUFFLE [RS_183]
+                                    PartitionCols:_col0
+                                    Select Operator [SEL_182] (rows=1861800 width=385)
+                                      Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                      Filter Operator [FIL_181] (rows=1861800 width=385)
+                                        predicate:cd_demo_sk is not null
+                                        TableScan [TS_6] (rows=1861800 width=385)
+                                          default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_dep_count","cd_dep_employed_count","cd_dep_college_count"]
+                                <-Reducer 2 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_50]
+                                    PartitionCols:_col1
+                                    Merge Join Operator [MERGEJOIN_169] (rows=88000001 width=860)
+                                      Conds:RS_177._col2=RS_180._col0(Inner),Output:["_col0","_col1","_col4"]
+                                    <-Map 1 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_177]
+                                        PartitionCols:_col2
+                                        Select Operator [SEL_176] (rows=80000000 width=860)
+                                          Output:["_col0","_col1","_col2"]
+                                          Filter Operator [FIL_175] (rows=80000000 width=860)
+                                            predicate:(c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_customer_sk is not null)
+                                            TableScan [TS_0] (rows=80000000 width=860)
+                                              default@customer,c,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_addr_sk"]
+                                    <-Map 8 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_180]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_179] (rows=40000000 width=1014)
+                                          Output:["_col0","_col1"]
+                                          Filter Operator [FIL_178] (rows=40000000 width=1014)
+                                            predicate:ca_address_sk is not null
+                                            TableScan [TS_3] (rows=40000000 width=1014)
+                                              default@customer_address,ca,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
+                            <-Reducer 11 [SIMPLE_EDGE]
+                              SHUFFLE [RS_56]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_54] (rows=633595212 width=88)
+                                  Output:["_col0"],keys:_col0
+                                  Select Operator [SEL_18] (rows=633595212 width=88)
+                                    Output:["_col0"]
+                                    Merge Join Operator [MERGEJOIN_171] (rows=633595212 width=88)
+                                      Conds:RS_204._col0=RS_186._col0(Inner),Output:["_col1"]
+                                    <-Map 12 [SIMPLE_EDGE] vectorized
+                                      PARTITION_ONLY_SHUFFLE [RS_186]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_185] (rows=12174 width=1119)
+                                          Output:["_col0"]
+                                          Filter Operator [FIL_184] (rows=12174 width=1119)
+                                            predicate:((d_qoy < 4) and (d_year = 1999) and d_date_sk is not null)
+                                            TableScan [TS_12] (rows=73049 width=1119)
+                                              default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                                    <-Map 10 [SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_204]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_203] (rows=575995635 width=88)
+                                          Output:["_col0","_col1"]
+                                          Filter Operator [FIL_202] (rows=575995635 width=88)
+                                            predicate:((ss_customer_sk BETWEEN DynamicValue(RS_55_c_c_customer_sk_min) AND DynamicValue(RS_55_c_c_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_55_c_c_customer_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_16_date_dim_d_date_sk_min) AND DynamicValue(RS_16_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_16_date_dim_d_date_sk_bloom_filter))) and ss_customer_sk is not null and ss_sold_date_sk is not null)
+                                            TableScan [TS_9] (rows=575995635 width=88)
+                                              default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk"]
+                                            <-Reducer 13 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_199]
+                                                Group By Operator [GBY_198] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  PARTITION_ONLY_SHUFFLE [RS_195]
+                                                    Group By Operator [GBY_192] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_187] (rows=12174 width=1119)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_185]
+                                            <-Reducer 7 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_201]
+                                                Group By Operator [GBY_200] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=96800000)"]
+                                                <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
+                                                  PARTITION_ONLY_SHUFFLE [RS_137]
+                                                    Group By Operator [GBY_136] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=96800000)"]
+                                                      Select Operator [SEL_135] (rows=96800003 width=860)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_170]
+                            <-Reducer 15 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_212]
+                                PartitionCols:_col0
+                                Select Operator [SEL_211] (rows=79201469 width=135)
+                                  Output:["_col0","_col1"]
+                                  Group By Operator [GBY_210] (rows=79201469 width=135)
+                                    Output:["_col0"],keys:KEY._col0
+                                  <-Reducer 14 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_30]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_184] (rows=12174 width=1119)
-                                        Output:["_col0"]
-                                        Filter Operator [FIL_183] (rows=12174 width=1119)
-                                          predicate:((d_qoy < 4) and (d_year = 1999) and d_date_sk is not null)
-                                          TableScan [TS_12] (rows=73049 width=1119)
-                                            default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                                  <-Map 10 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_203]
+                                      Group By Operator [GBY_29] (rows=158402938 width=135)
+                                        Output:["_col0"],keys:_col1
+                                        Merge Join Operator [MERGEJOIN_172] (rows=158402938 width=135)
+                                          Conds:RS_209._col0=RS_188._col0(Inner),Output:["_col1"]
+                                        <-Map 12 [SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_188]
+                                            PartitionCols:_col0
+                                             Please refer to the previous Select Operator [SEL_185]
+                                        <-Map 20 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_209]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_208] (rows=144002668 width=135)
+                                              Output:["_col0","_col1"]
+                                              Filter Operator [FIL_207] (rows=144002668 width=135)
+                                                predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_26_date_dim_d_date_sk_min) AND DynamicValue(RS_26_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_26_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_sold_date_sk is not null)
+                                                TableScan [TS_19] (rows=144002668 width=135)
+                                                  default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_customer_sk"]
+                                                <-Reducer 16 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_206]
+                                                    Group By Operator [GBY_205] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_196]
+                                                        Group By Operator [GBY_193] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_189] (rows=12174 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_185]
+                            <-Reducer 18 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_220]
+                                PartitionCols:_col0
+                                Select Operator [SEL_219] (rows=158394413 width=135)
+                                  Output:["_col0","_col1"]
+                                  Group By Operator [GBY_218] (rows=158394413 width=135)
+                                    Output:["_col0"],keys:KEY._col0
+                                  <-Reducer 17 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_44]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_202] (rows=575995635 width=88)
-                                        Output:["_col0","_col1"]
-                                        Filter Operator [FIL_201] (rows=575995635 width=88)
-                                          predicate:((ss_customer_sk BETWEEN DynamicValue(RS_55_c_c_customer_sk_min) AND DynamicValue(RS_55_c_c_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_55_c_c_customer_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_16_date_dim_d_date_sk_min) AND DynamicValue(RS_16_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_16_date_dim_d_date_sk_bloom_filter))) and ss_customer_sk is not null and ss_sold_date_sk is not null)
-                                          TableScan [TS_9] (rows=575995635 width=88)
-                                            default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk"]
-                                          <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_198]
-                                              Group By Operator [GBY_197] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                              <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                PARTITION_ONLY_SHUFFLE [RS_194]
-                                                  Group By Operator [GBY_191] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_186] (rows=12174 width=1119)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_184]
-                                          <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_200]
-                                              Group By Operator [GBY_199] (rows=1 width=12)
-                                                Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=96800000)"]
-                                              <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
-                                                PARTITION_ONLY_SHUFFLE [RS_136]
-                                                  Group By Operator [GBY_135] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=96800000)"]
-                                                    Select Operator [SEL_134] (rows=96800003 width=860)
-                                                      Output:["_col0"]
-                                                       Please refer to the previous Merge Join Operator [MERGEJOIN_169]
-                          <-Reducer 15 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_211]
-                              PartitionCols:_col0
-                              Select Operator [SEL_210] (rows=79201469 width=135)
-                                Output:["_col0","_col1"]
-                                Group By Operator [GBY_209] (rows=79201469 width=135)
-                                  Output:["_col0"],keys:KEY._col0
-                                <-Reducer 14 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_30]
-                                    PartitionCols:_col0
-                                    Group By Operator [GBY_29] (rows=158402938 width=135)
-                                      Output:["_col0"],keys:_col1
-                                      Merge Join Operator [MERGEJOIN_171] (rows=158402938 width=135)
-                                        Conds:RS_208._col0=RS_187._col0(Inner),Output:["_col1"]
-                                      <-Map 12 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_187]
-                                          PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_184]
-                                      <-Map 20 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_208]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_207] (rows=144002668 width=135)
-                                            Output:["_col0","_col1"]
-                                            Filter Operator [FIL_206] (rows=144002668 width=135)
-                                              predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_26_date_dim_d_date_sk_min) AND DynamicValue(RS_26_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_26_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_sold_date_sk is not null)
-                                              TableScan [TS_19] (rows=144002668 width=135)
-                                                default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_customer_sk"]
-                                              <-Reducer 16 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_205]
-                                                  Group By Operator [GBY_204] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_195]
-                                                      Group By Operator [GBY_192] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_188] (rows=12174 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_184]
-                          <-Reducer 18 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_219]
-                              PartitionCols:_col0
-                              Select Operator [SEL_218] (rows=158394413 width=135)
-                                Output:["_col0","_col1"]
-                                Group By Operator [GBY_217] (rows=158394413 width=135)
-                                  Output:["_col0"],keys:KEY._col0
-                                <-Reducer 17 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_44]
-                                    PartitionCols:_col0
-                                    Group By Operator [GBY_43] (rows=316788826 width=135)
-                                      Output:["_col0"],keys:_col1
-                                      Merge Join Operator [MERGEJOIN_172] (rows=316788826 width=135)
-                                        Conds:RS_216._col0=RS_189._col0(Inner),Output:["_col1"]
-                                      <-Map 12 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_189]
-                                          PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_184]
-                                      <-Map 21 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_216]
-                                          PartitionCols:_col0
-                                          Select Operator [SEL_215] (rows=287989836 width=135)
-                                            Output:["_col0","_col1"]
-                                            Filter Operator [FIL_214] (rows=287989836 width=135)
-                                              predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_40_date_dim_d_date_sk_min) AND DynamicValue(RS_40_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_40_date_dim_d_date_sk_bloom_filter))) and cs_ship_customer_sk is not null and cs_sold_date_sk is not null)
-                                              TableScan [TS_33] (rows=287989836 width=135)
-                                                default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_ship_customer_sk"]
-                                              <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_213]
-                                                  Group By Operator [GBY_212] (rows=1 width=12)
-                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                  <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_196]
-                                                      Group By Operator [GBY_193] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_190] (rows=12174 width=1119)
-                                                          Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_184]
+                                      Group By Operator [GBY_43] (rows=316788826 width=135)
+                                        Output:["_col0"],keys:_col1
+                                        Merge Join Operator [MERGEJOIN_173] (rows=316788826 width=135)
+                                          Conds:RS_217._col0=RS_190._col0(Inner),Output:["_col1"]
+                                        <-Map 12 [SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_190]
+                                            PartitionCols:_col0
+                                             Please refer to the previous Select Operator [SEL_185]
+                                        <-Map 21 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_217]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_216] (rows=287989836 width=135)
+                                              Output:["_col0","_col1"]
+                                              Filter Operator [FIL_215] (rows=287989836 width=135)
+                                                predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_40_date_dim_d_date_sk_min) AND DynamicValue(RS_40_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_40_date_dim_d_date_sk_bloom_filter))) and cs_ship_customer_sk is not null and cs_sold_date_sk is not null)
+                                                TableScan [TS_33] (rows=287989836 width=135)
+                                                  default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_ship_customer_sk"]
+                                                <-Reducer 19 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_214]
+                                                    Group By Operator [GBY_213] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                    <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                      PARTITION_ONLY_SHUFFLE [RS_197]
+                                                        Group By Operator [GBY_194] (rows=1 width=12)
+                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                          Select Operator [SEL_191] (rows=12174 width=1119)
+                                                            Output:["_col0"]
+                                                             Please refer to the previous Select Operator [SEL_185]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/851c8aba/ql/src/test/results/clientpositive/perf/tez/query37.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query37.q.out b/ql/src/test/results/clientpositive/perf/tez/query37.q.out
index 9db8538..2b3ae52 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query37.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query37.q.out
@@ -46,83 +46,85 @@ Stage-0
     limit:100
     Stage-1
       Reducer 4 vectorized
-      File Output Operator [FS_96]
-        Limit [LIM_95] (rows=100 width=135)
+      File Output Operator [FS_97]
+        Limit [LIM_96] (rows=100 width=135)
           Number of rows:100
-          Select Operator [SEL_94] (rows=316788826 width=135)
+          Select Operator [SEL_95] (rows=316788826 width=135)
             Output:["_col0","_col1","_col2"]
           <-Reducer 3 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_93]
-              Group By Operator [GBY_92] (rows=316788826 width=135)
+            SHUFFLE [RS_94]
+              Group By Operator [GBY_93] (rows=316788826 width=135)
                 Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2
               <-Reducer 2 [SIMPLE_EDGE]
                 SHUFFLE [RS_22]
                   PartitionCols:_col0, _col1, _col2
                   Group By Operator [GBY_21] (rows=633577652 width=135)
                     Output:["_col0","_col1","_col2"],keys:_col2, _col3, _col4
-                    Merge Join Operator [MERGEJOIN_72] (rows=633577652 width=135)
-                      Conds:RS_91._col0=RS_75._col0(Inner),RS_75._col0=RS_18._col1(Inner),Output:["_col2","_col3","_col4"]
-                    <-Map 5 [SIMPLE_EDGE] vectorized
-                      PARTITION_ONLY_SHUFFLE [RS_75]
-                        PartitionCols:_col0
-                        Select Operator [SEL_74] (rows=25666 width=1436)
-                          Output:["_col0","_col1","_col2","_col3"]
-                          Filter Operator [FIL_73] (rows=25666 width=1436)
-                            predicate:((i_manufact_id) IN (678, 964, 918, 849) and i_current_price BETWEEN 22 AND 52 and i_item_sk is not null)
-                            TableScan [TS_3] (rows=462000 width=1436)
-                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc","i_current_price","i_manufact_id"]
-                    <-Reducer 8 [SIMPLE_EDGE]
-                      PARTITION_ONLY_SHUFFLE [RS_18]
-                        PartitionCols:_col1
-                        Merge Join Operator [MERGEJOIN_71] (rows=4593600 width=15)
-                          Conds:RS_83._col0=RS_86._col0(Inner),Output:["_col1"]
-                        <-Map 10 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_86]
-                            PartitionCols:_col0
-                            Select Operator [SEL_85] (rows=8116 width=1119)
-                              Output:["_col0"]
-                              Filter Operator [FIL_84] (rows=8116 width=1119)
-                                predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-06-02 00:00:00' AND TIMESTAMP'2001-08-01 00:00:00' and d_date_sk is not null)
-                                TableScan [TS_9] (rows=73049 width=1119)
-                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
-                        <-Map 7 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_83]
-                            PartitionCols:_col0
-                            Select Operator [SEL_82] (rows=4176000 width=15)
-                              Output:["_col0","_col1"]
-                              Filter Operator [FIL_81] (rows=4176000 width=15)
-                                predicate:(inv_date_sk is not null and inv_item_sk is not null and inv_quantity_on_hand BETWEEN 100 AND 500)
-                                TableScan [TS_6] (rows=37584000 width=15)
-                                  default@inventory,inventory,Tbl:COMPLETE,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_quantity_on_hand"]
-                    <-Map 1 [SIMPLE_EDGE] vectorized
-                      SHUFFLE [RS_91]
-                        PartitionCols:_col0
-                        Select Operator [SEL_90] (rows=287989836 width=135)
-                          Output:["_col0"]
-                          Filter Operator [FIL_89] (rows=287989836 width=135)
-                            predicate:((cs_item_sk BETWEEN DynamicValue(RS_17_item_i_item_sk_min) AND DynamicValue(RS_17_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_17_item_i_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_18_inventory_inv_item_sk_min) AND DynamicValue(RS_18_inventory_inv_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_18_inventory_inv_item_sk_bloom_filter))) and cs_item_sk is not null)
-                            TableScan [TS_0] (rows=287989836 width=135)
-                              default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_item_sk"]
-                            <-Reducer 6 [BROADCAST_EDGE] vectorized
-                              BROADCAST [RS_80]
-                                Group By Operator [GBY_79] (rows=1 width=12)
-                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                <-Map 5 [CUSTOM_SIMPLE_EDGE] vectorized
-                                  PARTITION_ONLY_SHUFFLE [RS_78]
-                                    Group By Operator [GBY_77] (rows=1 width=12)
-                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                      Select Operator [SEL_76] (rows=25666 width=1436)
-                                        Output:["_col0"]
-                                         Please refer to the previous Select Operator [SEL_74]
-                            <-Reducer 9 [BROADCAST_EDGE] vectorized
-                              BROADCAST [RS_88]
-                                Group By Operator [GBY_87] (rows=1 width=12)
-                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=4593600)"]
-                                <-Reducer 8 [CUSTOM_SIMPLE_EDGE]
-                                  PARTITION_ONLY_SHUFFLE [RS_50]
-                                    Group By Operator [GBY_49] (rows=1 width=12)
-                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=4593600)"]
-                                      Select Operator [SEL_48] (rows=4593600 width=15)
-                                        Output:["_col0"]
-                                         Please refer to the previous Merge Join Operator [MERGEJOIN_71]
+                    Top N Key Operator [TNK_43] (rows=633577652 width=135)
+                      keys:_col2, _col3, _col4,sort order:+++,top n:100
+                      Merge Join Operator [MERGEJOIN_73] (rows=633577652 width=135)
+                        Conds:RS_92._col0=RS_76._col0(Inner),RS_76._col0=RS_18._col1(Inner),Output:["_col2","_col3","_col4"]
+                      <-Map 5 [SIMPLE_EDGE] vectorized
+                        PARTITION_ONLY_SHUFFLE [RS_76]
+                          PartitionCols:_col0
+                          Select Operator [SEL_75] (rows=25666 width=1436)
+                            Output:["_col0","_col1","_col2","_col3"]
+                            Filter Operator [FIL_74] (rows=25666 width=1436)
+                              predicate:((i_manufact_id) IN (678, 964, 918, 849) and i_current_price BETWEEN 22 AND 52 and i_item_sk is not null)
+                              TableScan [TS_3] (rows=462000 width=1436)
+                                default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc","i_current_price","i_manufact_id"]
+                      <-Reducer 8 [SIMPLE_EDGE]
+                        PARTITION_ONLY_SHUFFLE [RS_18]
+                          PartitionCols:_col1
+                          Merge Join Operator [MERGEJOIN_72] (rows=4593600 width=15)
+                            Conds:RS_84._col0=RS_87._col0(Inner),Output:["_col1"]
+                          <-Map 10 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_87]
+                              PartitionCols:_col0
+                              Select Operator [SEL_86] (rows=8116 width=1119)
+                                Output:["_col0"]
+                                Filter Operator [FIL_85] (rows=8116 width=1119)
+                                  predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-06-02 00:00:00' AND TIMESTAMP'2001-08-01 00:00:00' and d_date_sk is not null)
+                                  TableScan [TS_9] (rows=73049 width=1119)
+                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
+                          <-Map 7 [SIMPLE_EDGE] vectorized
+                            SHUFFLE [RS_84]
+                              PartitionCols:_col0
+                              Select Operator [SEL_83] (rows=4176000 width=15)
+                                Output:["_col0","_col1"]
+                                Filter Operator [FIL_82] (rows=4176000 width=15)
+                                  predicate:(inv_date_sk is not null and inv_item_sk is not null and inv_quantity_on_hand BETWEEN 100 AND 500)
+                                  TableScan [TS_6] (rows=37584000 width=15)
+                                    default@inventory,inventory,Tbl:COMPLETE,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_quantity_on_hand"]
+                      <-Map 1 [SIMPLE_EDGE] vectorized
+                        SHUFFLE [RS_92]
+                          PartitionCols:_col0
+                          Select Operator [SEL_91] (rows=287989836 width=135)
+                            Output:["_col0"]
+                            Filter Operator [FIL_90] (rows=287989836 width=135)
+                              predicate:((cs_item_sk BETWEEN DynamicValue(RS_17_item_i_item_sk_min) AND DynamicValue(RS_17_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_17_item_i_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_18_inventory_inv_item_sk_min) AND DynamicValue(RS_18_inventory_inv_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_18_inventory_inv_item_sk_bloom_filter))) and cs_item_sk is not null)
+                              TableScan [TS_0] (rows=287989836 width=135)
+                                default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_item_sk"]
+                              <-Reducer 6 [BROADCAST_EDGE] vectorized
+                                BROADCAST [RS_81]
+                                  Group By Operator [GBY_80] (rows=1 width=12)
+                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                  <-Map 5 [CUSTOM_SIMPLE_EDGE] vectorized
+                                    PARTITION_ONLY_SHUFFLE [RS_79]
+                                      Group By Operator [GBY_78] (rows=1 width=12)
+                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                        Select Operator [SEL_77] (rows=25666 width=1436)
+                                          Output:["_col0"]
+                                           Please refer to the previous Select Operator [SEL_75]
+                              <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                BROADCAST [RS_89]
+                                  Group By Operator [GBY_88] (rows=1 width=12)
+                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=4593600)"]
+                                  <-Reducer 8 [CUSTOM_SIMPLE_EDGE]
+                                    PARTITION_ONLY_SHUFFLE [RS_51]
+                                      Group By Operator [GBY_50] (rows=1 width=12)
+                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=4593600)"]
+                                        Select Operator [SEL_49] (rows=4593600 width=15)
+                                          Output:["_col0"]
+                                           Please refer to the previous Merge Join Operator [MERGEJOIN_72]