You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by we...@apache.org on 2017/05/08 20:42:51 UTC

[01/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Repository: hive
Updated Branches:
  refs/heads/hive-14535 187eb760d -> ed64a74e8


http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
index c81131e..a694cf8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
@@ -26,6 +26,7 @@ import static org.apache.hadoop.hive.ql.optimizer.physical.LlapDecider.LlapMode.
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Deque;
 import java.util.EnumSet;
@@ -50,6 +51,7 @@ import org.apache.hadoop.hive.ql.exec.SelectOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
@@ -104,20 +106,12 @@ public class LlapDecider implements PhysicalPlanResolver {
   }
 
   private LlapMode mode;
-  private final LlapClusterStateForCompile clusterState;
-
-  public LlapDecider(LlapClusterStateForCompile clusterState) {
-    this.clusterState = clusterState;
-  }
-
 
   class LlapDecisionDispatcher implements Dispatcher {
     private final HiveConf conf;
     private final boolean doSkipUdfCheck;
     private final boolean arePermanentFnsAllowed;
     private final boolean shouldUber;
-    private final float minReducersPerExec;
-    private final int executorsPerNode;
     private List<MapJoinOperator> mapJoinOpList;
     private final Map<Rule, NodeProcessor> rules;
 
@@ -127,9 +121,6 @@ public class LlapDecider implements PhysicalPlanResolver {
       arePermanentFnsAllowed = HiveConf.getBoolVar(conf, ConfVars.LLAP_ALLOW_PERMANENT_FNS);
       // Don't user uber in "all" mode - everything can go into LLAP, which is better than uber.
       shouldUber = HiveConf.getBoolVar(conf, ConfVars.LLAP_AUTO_ALLOW_UBER) && (mode != all);
-      minReducersPerExec = HiveConf.getFloatVar(
-          conf, ConfVars.TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR);
-      executorsPerNode = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_NUM_EXECUTORS); // TODO# hmm
       mapJoinOpList = new ArrayList<MapJoinOperator>();
       rules = getRules();
     }
@@ -148,57 +139,22 @@ public class LlapDecider implements PhysicalPlanResolver {
       return null;
     }
 
-    private void handleWork(TezWork tezWork, BaseWork work) throws SemanticException {
+    private void handleWork(TezWork tezWork, BaseWork work)
+      throws SemanticException {
       boolean workCanBeDoneInLlap = evaluateWork(tezWork, work);
       LOG.debug(
           "Work " + work + " " + (workCanBeDoneInLlap ? "can" : "cannot") + " be done in LLAP");
       if (workCanBeDoneInLlap) {
         for (MapJoinOperator graceMapJoinOp : mapJoinOpList) {
-          LOG.debug("Disabling hybrid grace hash join in case of LLAP "
-              + "and non-dynamic partition hash join.");
+          LOG.debug(
+              "Disabling hybrid grace hash join in case of LLAP and non-dynamic partition hash join.");
           graceMapJoinOp.getConf().setHybridHashJoin(false);
         }
-        adjustAutoParallelism(work);
-        
         convertWork(tezWork, work);
       }
       mapJoinOpList.clear();
     }
 
-    private void adjustAutoParallelism(BaseWork work) {
-      if (minReducersPerExec <= 0 || !(work instanceof ReduceWork)) return;
-      ReduceWork reduceWork = (ReduceWork)work;
-      if (reduceWork.isAutoReduceParallelism() == false && reduceWork.isUniformDistribution() == false) {
-        return; // Not based on ARP and cannot assume uniform distribution, bail.
-      }
-      clusterState.initClusterInfo();
-      int targetCount = 0;
-      if (!clusterState.hasClusterInfo()) {
-        LOG.warn("Cannot determine LLAP cluster information");
-        targetCount = (int)Math.ceil(minReducersPerExec * 1 * executorsPerNode);
-      } else {
-        targetCount = (int)Math.ceil(minReducersPerExec * (clusterState.getKnownExecutorCount()
-            + clusterState.getNodeCountWithUnknownExecutors() * executorsPerNode));
-      }
-      // We only increase the targets here.
-      if (reduceWork.isAutoReduceParallelism()) {
-        int newMin = Math.max(reduceWork.getMinReduceTasks(), targetCount);
-        if (newMin < reduceWork.getMaxReduceTasks()) {
-          reduceWork.setMinReduceTasks(newMin);
-          reduceWork.getEdgePropRef().setAutoReduce(conf, true, newMin,
-              reduceWork.getMaxReduceTasks(), conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER));
-        } else {
-          reduceWork.setAutoReduceParallelism(false);
-          reduceWork.setNumReduceTasks(newMin);
-          // TODO: is this correct? based on the same logic as HIVE-14200
-          reduceWork.getEdgePropRef().setAutoReduce(null, false, 0, 0, 0);
-        }
-      } else {
-        // UNIFORM || AUTOPARALLEL (maxed out)
-        reduceWork.setNumReduceTasks(Math.max(reduceWork.getNumReduceTasks(), targetCount));
-      }
-    }
-
 
     private void convertWork(TezWork tezWork, BaseWork work)
       throws SemanticException {


[28/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
deleted file mode 100644
index 98682a8..0000000
--- a/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
+++ /dev/null
@@ -1,1023 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-------------------------------------------------------------------
--- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15)
-------------------------------------------------------------------
--- Complete schema required for the following classes:-
---     org.apache.hadoop.hive.metastore.model.MColumnDescriptor
---     org.apache.hadoop.hive.metastore.model.MDBPrivilege
---     org.apache.hadoop.hive.metastore.model.MDatabase
---     org.apache.hadoop.hive.metastore.model.MDelegationToken
---     org.apache.hadoop.hive.metastore.model.MFieldSchema
---     org.apache.hadoop.hive.metastore.model.MFunction
---     org.apache.hadoop.hive.metastore.model.MGlobalPrivilege
---     org.apache.hadoop.hive.metastore.model.MIndex
---     org.apache.hadoop.hive.metastore.model.MMasterKey
---     org.apache.hadoop.hive.metastore.model.MOrder
---     org.apache.hadoop.hive.metastore.model.MPartition
---     org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege
---     org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics
---     org.apache.hadoop.hive.metastore.model.MPartitionEvent
---     org.apache.hadoop.hive.metastore.model.MPartitionPrivilege
---     org.apache.hadoop.hive.metastore.model.MResourceUri
---     org.apache.hadoop.hive.metastore.model.MRole
---     org.apache.hadoop.hive.metastore.model.MRoleMap
---     org.apache.hadoop.hive.metastore.model.MSerDeInfo
---     org.apache.hadoop.hive.metastore.model.MStorageDescriptor
---     org.apache.hadoop.hive.metastore.model.MStringList
---     org.apache.hadoop.hive.metastore.model.MTable
---     org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege
---     org.apache.hadoop.hive.metastore.model.MTableColumnStatistics
---     org.apache.hadoop.hive.metastore.model.MTablePrivilege
---     org.apache.hadoop.hive.metastore.model.MType
---     org.apache.hadoop.hive.metastore.model.MVersionTable
---
--- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
-CREATE TABLE MASTER_KEYS
-(
-    KEY_ID int NOT NULL,
-    MASTER_KEY nvarchar(767) NULL
-);
-
-ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
-
--- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
-CREATE TABLE IDXS
-(
-    INDEX_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    DEFERRED_REBUILD bit NOT NULL,
-    INDEX_HANDLER_CLASS nvarchar(4000) NULL,
-    INDEX_NAME nvarchar(128) NULL,
-    INDEX_TBL_ID bigint NULL,
-    LAST_ACCESS_TIME int NOT NULL,
-    ORIG_TBL_ID bigint NULL,
-    SD_ID bigint NULL
-);
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
-
--- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
-CREATE TABLE PART_COL_STATS
-(
-    CS_ID bigint NOT NULL,
-    AVG_COL_LEN float NULL,
-    "COLUMN_NAME" nvarchar(767) NOT NULL,
-    COLUMN_TYPE nvarchar(128) NOT NULL,
-    DB_NAME nvarchar(128) NOT NULL,
-    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
-    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
-    DOUBLE_HIGH_VALUE float NULL,
-    DOUBLE_LOW_VALUE float NULL,
-    LAST_ANALYZED bigint NOT NULL,
-    LONG_HIGH_VALUE bigint NULL,
-    LONG_LOW_VALUE bigint NULL,
-    MAX_COL_LEN bigint NULL,
-    NUM_DISTINCTS bigint NULL,
-    NUM_FALSES bigint NULL,
-    NUM_NULLS bigint NOT NULL,
-    NUM_TRUES bigint NULL,
-    PART_ID bigint NULL,
-    PARTITION_NAME nvarchar(767) NOT NULL,
-    "TABLE_NAME" nvarchar(256) NOT NULL
-);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
-
-CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
-
--- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-CREATE TABLE PART_PRIVS
-(
-    PART_GRANT_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PART_ID bigint NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    PART_PRIV nvarchar(128) NULL
-);
-
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
-
--- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
-CREATE TABLE SKEWED_STRING_LIST
-(
-    STRING_LIST_ID bigint NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
-
--- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE TABLE ROLES
-(
-    ROLE_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    OWNER_NAME nvarchar(128) NULL,
-    ROLE_NAME nvarchar(128) NULL
-);
-
-ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
-
--- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
-CREATE TABLE PARTITIONS
-(
-    PART_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    LAST_ACCESS_TIME int NOT NULL,
-    PART_NAME nvarchar(767) NULL,
-    SD_ID bigint NULL,
-    TBL_ID bigint NULL
-);
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
-
--- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
-CREATE TABLE CDS
-(
-    CD_ID bigint NOT NULL
-);
-
-ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
-
--- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable]
-CREATE TABLE VERSION
-(
-    VER_ID bigint NOT NULL,
-    SCHEMA_VERSION nvarchar(127) NOT NULL,
-    VERSION_COMMENT nvarchar(255) NOT NULL
-);
-
-ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
-
--- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE TABLE GLOBAL_PRIVS
-(
-    USER_GRANT_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    USER_PRIV nvarchar(128) NULL
-);
-
-ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
-
--- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-CREATE TABLE PART_COL_PRIVS
-(
-    PART_COLUMN_GRANT_ID bigint NOT NULL,
-    "COLUMN_NAME" nvarchar(767) NULL,
-    CREATE_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PART_ID bigint NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    PART_COL_PRIV nvarchar(128) NULL
-);
-
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
-
--- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-CREATE TABLE DB_PRIVS
-(
-    DB_GRANT_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    DB_ID bigint NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    DB_PRIV nvarchar(128) NULL
-);
-
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
-
--- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
-CREATE TABLE TAB_COL_STATS
-(
-    CS_ID bigint NOT NULL,
-    AVG_COL_LEN float NULL,
-    "COLUMN_NAME" nvarchar(767) NOT NULL,
-    COLUMN_TYPE nvarchar(128) NOT NULL,
-    DB_NAME nvarchar(128) NOT NULL,
-    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
-    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
-    DOUBLE_HIGH_VALUE float NULL,
-    DOUBLE_LOW_VALUE float NULL,
-    LAST_ANALYZED bigint NOT NULL,
-    LONG_HIGH_VALUE bigint NULL,
-    LONG_LOW_VALUE bigint NULL,
-    MAX_COL_LEN bigint NULL,
-    NUM_DISTINCTS bigint NULL,
-    NUM_FALSES bigint NULL,
-    NUM_NULLS bigint NOT NULL,
-    NUM_TRUES bigint NULL,
-    TBL_ID bigint NULL,
-    "TABLE_NAME" nvarchar(256) NOT NULL
-);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
-
--- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
-CREATE TABLE TYPES
-(
-    TYPES_ID bigint NOT NULL,
-    TYPE_NAME nvarchar(128) NULL,
-    TYPE1 nvarchar(767) NULL,
-    TYPE2 nvarchar(767) NULL
-);
-
-ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
-
--- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-CREATE TABLE TBL_PRIVS
-(
-    TBL_GRANT_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    TBL_PRIV nvarchar(128) NULL,
-    TBL_ID bigint NULL
-);
-
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
-
--- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE TABLE DBS
-(
-    DB_ID bigint NOT NULL,
-    "DESC" nvarchar(4000) NULL,
-    DB_LOCATION_URI nvarchar(4000) NOT NULL,
-    "NAME" nvarchar(128) NULL,
-    OWNER_NAME nvarchar(128) NULL,
-    OWNER_TYPE nvarchar(10) NULL
-);
-
-ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
-
--- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-CREATE TABLE TBL_COL_PRIVS
-(
-    TBL_COLUMN_GRANT_ID bigint NOT NULL,
-    "COLUMN_NAME" nvarchar(767) NULL,
-    CREATE_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    TBL_COL_PRIV nvarchar(128) NULL,
-    TBL_ID bigint NULL
-);
-
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
-
--- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-CREATE TABLE DELEGATION_TOKENS
-(
-    TOKEN_IDENT nvarchar(767) NOT NULL,
-    TOKEN nvarchar(767) NULL
-);
-
-ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
-
--- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-CREATE TABLE SERDES
-(
-    SERDE_ID bigint NOT NULL,
-    "NAME" nvarchar(128) NULL,
-    SLIB nvarchar(4000) NULL
-);
-
-ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
-
--- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction]
-CREATE TABLE FUNCS
-(
-    FUNC_ID bigint NOT NULL,
-    CLASS_NAME nvarchar(4000) NULL,
-    CREATE_TIME int NOT NULL,
-    DB_ID bigint NULL,
-    FUNC_NAME nvarchar(128) NULL,
-    FUNC_TYPE int NOT NULL,
-    OWNER_NAME nvarchar(128) NULL,
-    OWNER_TYPE nvarchar(10) NULL
-);
-
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
-
--- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
-CREATE TABLE ROLE_MAP
-(
-    ROLE_GRANT_ID bigint NOT NULL,
-    ADD_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    ROLE_ID bigint NULL
-);
-
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
-
--- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
-CREATE TABLE TBLS
-(
-    TBL_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    DB_ID bigint NULL,
-    LAST_ACCESS_TIME int NOT NULL,
-    OWNER nvarchar(767) NULL,
-    RETENTION int NOT NULL,
-    SD_ID bigint NULL,
-    TBL_NAME nvarchar(256) NULL,
-    TBL_TYPE nvarchar(128) NULL,
-    VIEW_EXPANDED_TEXT text NULL,
-    VIEW_ORIGINAL_TEXT text NULL,
-    IS_REWRITE_ENABLED bit NOT NULL
-);
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
-
--- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-CREATE TABLE SDS
-(
-    SD_ID bigint NOT NULL,
-    CD_ID bigint NULL,
-    INPUT_FORMAT nvarchar(4000) NULL,
-    IS_COMPRESSED bit NOT NULL,
-    IS_STOREDASSUBDIRECTORIES bit NOT NULL,
-    LOCATION nvarchar(4000) NULL,
-    NUM_BUCKETS int NOT NULL,
-    OUTPUT_FORMAT nvarchar(4000) NULL,
-    SERDE_ID bigint NULL
-);
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
-
--- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE TABLE PARTITION_EVENTS
-(
-    PART_NAME_ID bigint NOT NULL,
-    DB_NAME nvarchar(128) NULL,
-    EVENT_TIME bigint NOT NULL,
-    EVENT_TYPE int NOT NULL,
-    PARTITION_NAME nvarchar(767) NULL,
-    TBL_NAME nvarchar(256) NULL
-);
-
-ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
-
--- Table SORT_COLS for join relationship
-CREATE TABLE SORT_COLS
-(
-    SD_ID bigint NOT NULL,
-    "COLUMN_NAME" nvarchar(767) NULL,
-    "ORDER" int NOT NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table SKEWED_COL_NAMES for join relationship
-CREATE TABLE SKEWED_COL_NAMES
-(
-    SD_ID bigint NOT NULL,
-    SKEWED_COL_NAME nvarchar(255) NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table SKEWED_COL_VALUE_LOC_MAP for join relationship
-CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
-(
-    SD_ID bigint NOT NULL,
-    STRING_LIST_ID_KID bigint NOT NULL,
-    LOCATION nvarchar(4000) NULL
-);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
-
--- Table SKEWED_STRING_LIST_VALUES for join relationship
-CREATE TABLE SKEWED_STRING_LIST_VALUES
-(
-    STRING_LIST_ID bigint NOT NULL,
-    STRING_LIST_VALUE nvarchar(255) NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
-
--- Table PARTITION_KEY_VALS for join relationship
-CREATE TABLE PARTITION_KEY_VALS
-(
-    PART_ID bigint NOT NULL,
-    PART_KEY_VAL nvarchar(255) NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
-
--- Table PARTITION_KEYS for join relationship
-CREATE TABLE PARTITION_KEYS
-(
-    TBL_ID bigint NOT NULL,
-    PKEY_COMMENT nvarchar(4000) NULL,
-    PKEY_NAME nvarchar(128) NOT NULL,
-    PKEY_TYPE nvarchar(767) NOT NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
-
--- Table SKEWED_VALUES for join relationship
-CREATE TABLE SKEWED_VALUES
-(
-    SD_ID_OID bigint NOT NULL,
-    STRING_LIST_ID_EID bigint NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
-
--- Table SD_PARAMS for join relationship
-CREATE TABLE SD_PARAMS
-(
-    SD_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE varchar(max) NULL
-);
-
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
-
--- Table FUNC_RU for join relationship
-CREATE TABLE FUNC_RU
-(
-    FUNC_ID bigint NOT NULL,
-    RESOURCE_TYPE int NOT NULL,
-    RESOURCE_URI nvarchar(4000) NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX);
-
--- Table TYPE_FIELDS for join relationship
-CREATE TABLE TYPE_FIELDS
-(
-    TYPE_NAME bigint NOT NULL,
-    COMMENT nvarchar(256) NULL,
-    FIELD_NAME nvarchar(128) NOT NULL,
-    FIELD_TYPE nvarchar(767) NOT NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
-
--- Table BUCKETING_COLS for join relationship
-CREATE TABLE BUCKETING_COLS
-(
-    SD_ID bigint NOT NULL,
-    BUCKET_COL_NAME nvarchar(255) NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table DATABASE_PARAMS for join relationship
-CREATE TABLE DATABASE_PARAMS
-(
-    DB_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(180) NOT NULL,
-    PARAM_VALUE nvarchar(4000) NULL
-);
-
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
-
--- Table INDEX_PARAMS for join relationship
-CREATE TABLE INDEX_PARAMS
-(
-    INDEX_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE nvarchar(4000) NULL
-);
-
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
-
--- Table COLUMNS_V2 for join relationship
-CREATE TABLE COLUMNS_V2
-(
-    CD_ID bigint NOT NULL,
-    COMMENT nvarchar(256) NULL,
-    "COLUMN_NAME" nvarchar(767) NOT NULL,
-    TYPE_NAME varchar(max) NOT NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
-
--- Table SERDE_PARAMS for join relationship
-CREATE TABLE SERDE_PARAMS
-(
-    SERDE_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE varchar(max) NULL
-);
-
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
-
--- Table PARTITION_PARAMS for join relationship
-CREATE TABLE PARTITION_PARAMS
-(
-    PART_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE nvarchar(4000) NULL
-);
-
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
-
--- Table TABLE_PARAMS for join relationship
-CREATE TABLE TABLE_PARAMS
-(
-    TBL_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE varchar(max) NULL
-);
-
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
-
-CREATE TABLE NOTIFICATION_LOG
-(
-    NL_ID bigint NOT NULL,
-    EVENT_ID bigint NOT NULL,
-    EVENT_TIME int NOT NULL,
-    EVENT_TYPE nvarchar(32) NOT NULL,
-    DB_NAME nvarchar(128) NULL,
-    TBL_NAME nvarchar(256) NULL,
-    MESSAGE_FORMAT nvarchar(16)
-    MESSAGE text NULL
-);
-
-ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
-
-CREATE TABLE NOTIFICATION_SEQUENCE
-(
-    NNI_ID bigint NOT NULL,
-    NEXT_EVENT_ID bigint NOT NULL
-);
-
-ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
-
--- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey]
-
--- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
-
-CREATE INDEX IDXS_N50 ON IDXS (ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N49 ON IDXS (INDEX_TBL_ID);
-
-
--- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
-
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
-
--- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
-
-CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
-
-
--- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList]
-
--- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
-
-
--- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
-
-CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
-
-CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
-
-
--- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
-
--- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable]
-
--- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
-
-CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
-
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
-
-CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
-
-
--- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);
-
-
--- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
-CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME);
-
-
--- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
-
-CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME");
-
-
--- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
-
-
--- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-
--- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-
--- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction]
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
-
-CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID);
-
-CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
-
-
--- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ;
-
-CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
-
-CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
-
-CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
-
-CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
-
-CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
-
-
--- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
-
-CREATE INDEX SDS_N50 ON SDS (CD_ID);
-
-CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
-
-
--- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
-
-
--- Constraints for table SORT_COLS
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
-
-
--- Constraints for table SKEWED_COL_NAMES
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID);
-
-
--- Constraints for table SKEWED_COL_VALUE_LOC_MAP
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
-
-CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID);
-
-CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID);
-
-
--- Constraints for table SKEWED_STRING_LIST_VALUES
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
-
-CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID);
-
-
--- Constraints for table PARTITION_KEY_VALS
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
-
-CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
-
-
--- Constraints for table PARTITION_KEYS
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
-
-
--- Constraints for table SKEWED_VALUES
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ;
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
-
-CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID);
-
-CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID);
-
-
--- Constraints for table SD_PARAMS
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
-
-
--- Constraints for table FUNC_RU
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ;
-
-CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
-
-
--- Constraints for table TYPE_FIELDS
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ;
-
-CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
-
-
--- Constraints for table BUCKETING_COLS
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
-
-
--- Constraints for table DATABASE_PARAMS
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
-
-CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
-
-
--- Constraints for table INDEX_PARAMS
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) ;
-
-CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
-
-
--- Constraints for table COLUMNS_V2
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
-
-CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
-
-
--- Constraints for table SERDE_PARAMS
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
-
-CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
-
-
--- Constraints for table PARTITION_PARAMS
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
-
-CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
-
-
--- Constraints for table TABLE_PARAMS
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
-
-
-
--- -----------------------------------------------------------------------------------------------------------------------------------------------
--- Transaction and Lock Tables
--- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
--- -----------------------------------------------------------------------------------------------------------------------------------------------
-CREATE TABLE COMPACTION_QUEUE(
-	CQ_ID bigint NOT NULL,
-	CQ_DATABASE nvarchar(128) NOT NULL,
-	CQ_TABLE nvarchar(128) NOT NULL,
-	CQ_PARTITION nvarchar(767) NULL,
-	CQ_STATE char(1) NOT NULL,
-	CQ_TYPE char(1) NOT NULL,
-	CQ_TBLPROPERTIES nvarchar(2048) NULL,
-	CQ_WORKER_ID nvarchar(128) NULL,
-	CQ_START bigint NULL,
-	CQ_RUN_AS nvarchar(128) NULL,
-	CQ_HIGHEST_TXN_ID bigint NULL,
-    CQ_META_INFO varbinary(2048) NULL,
-	CQ_HADOOP_JOB_ID nvarchar(128) NULL,
-PRIMARY KEY CLUSTERED 
-(
-	CQ_ID ASC
-)
-);
-
-CREATE TABLE COMPLETED_COMPACTIONS (
-	CC_ID bigint NOT NULL,
-	CC_DATABASE nvarchar(128) NOT NULL,
-	CC_TABLE nvarchar(128) NOT NULL,
-	CC_PARTITION nvarchar(767) NULL,
-	CC_STATE char(1) NOT NULL,
-	CC_TYPE char(1) NOT NULL,
-	CC_TBLPROPERTIES nvarchar(2048) NULL,
-	CC_WORKER_ID nvarchar(128) NULL,
-	CC_START bigint NULL,
-	CC_END bigint NULL,
-	CC_RUN_AS nvarchar(128) NULL,
-	CC_HIGHEST_TXN_ID bigint NULL,
-    CC_META_INFO varbinary(2048) NULL,
-	CC_HADOOP_JOB_ID nvarchar(128) NULL,
-PRIMARY KEY CLUSTERED 
-(
-	CC_ID ASC
-)
-);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS(
-	CTC_TXNID bigint NULL,
-	CTC_DATABASE nvarchar(128) NOT NULL,
-	CTC_TABLE nvarchar(128) NULL,
-	CTC_PARTITION nvarchar(767) NULL
-);
-
-CREATE TABLE HIVE_LOCKS(
-	HL_LOCK_EXT_ID bigint NOT NULL,
-	HL_LOCK_INT_ID bigint NOT NULL,
-	HL_TXNID bigint NULL,
-	HL_DB nvarchar(128) NOT NULL,
-	HL_TABLE nvarchar(128) NULL,
-	HL_PARTITION nvarchar(767) NULL,
-	HL_LOCK_STATE char(1) NOT NULL,
-	HL_LOCK_TYPE char(1) NOT NULL,
-	HL_LAST_HEARTBEAT bigint NOT NULL,
-	HL_ACQUIRED_AT bigint NULL,
-	HL_USER nvarchar(128) NOT NULL,
-	HL_HOST nvarchar(128) NOT NULL,
-    HL_HEARTBEAT_COUNT int NULL,
-    HL_AGENT_INFO nvarchar(128) NULL,
-    HL_BLOCKEDBY_EXT_ID bigint NULL,
-    HL_BLOCKEDBY_INT_ID bigint NULL,
-PRIMARY KEY CLUSTERED 
-(
-	HL_LOCK_EXT_ID ASC,
-	HL_LOCK_INT_ID ASC
-)
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
-	NCQ_NEXT bigint NOT NULL
-);
-
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE NEXT_LOCK_ID(
-	NL_NEXT bigint NOT NULL
-);
-
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE NEXT_TXN_ID(
-	NTXN_NEXT bigint NOT NULL
-);
-
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE TXNS(
-	TXN_ID bigint NOT NULL,
-	TXN_STATE char(1) NOT NULL,
-	TXN_STARTED bigint NOT NULL,
-	TXN_LAST_HEARTBEAT bigint NOT NULL,
-	TXN_USER nvarchar(128) NOT NULL,
-	TXN_HOST nvarchar(128) NOT NULL,
-    TXN_AGENT_INFO nvarchar(128) NULL,
-    TXN_META_INFO nvarchar(128) NULL,
-    TXN_HEARTBEAT_COUNT int NULL,
-PRIMARY KEY CLUSTERED 
-(
-	TXN_ID ASC
-)
-);
-
-CREATE TABLE TXN_COMPONENTS(
-	TC_TXNID bigint NULL,
-	TC_DATABASE nvarchar(128) NOT NULL,
-	TC_TABLE nvarchar(128) NULL,
-	TC_PARTITION nvarchar(767) NULL,
-	TC_OPERATION_TYPE char(1) NOT NULL
-);
-
-ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
-
-CREATE TABLE AUX_TABLE (
-  MT_KEY1 nvarchar(128) NOT NULL,
-  MT_KEY2 bigint NOT NULL,
-  MT_COMMENT nvarchar(255) NULL,
-  PRIMARY KEY CLUSTERED
-(
-    MT_KEY1 ASC,
-    MT_KEY2 ASC
-)
-);
-
-CREATE TABLE KEY_CONSTRAINTS
-(
-  CHILD_CD_ID BIGINT,
-  CHILD_INTEGER_IDX INT,
-  CHILD_TBL_ID BIGINT,
-  PARENT_CD_ID BIGINT NOT NULL,
-  PARENT_INTEGER_IDX INT NOT NULL,
-  PARENT_TBL_ID BIGINT NOT NULL,
-  POSITION INT NOT NULL,
-  CONSTRAINT_NAME VARCHAR(400) NOT NULL,
-  CONSTRAINT_TYPE SMALLINT NOT NULL,
-  UPDATE_RULE SMALLINT,
-  DELETE_RULE SMALLINT,
-  ENABLE_VALIDATE_RELY SMALLINT NOT NULL
-) ;
-
-ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
-
-CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
-
-CREATE TABLE WRITE_SET (
-  WS_DATABASE nvarchar(128) NOT NULL,
-  WS_TABLE nvarchar(128) NOT NULL,
-  WS_PARTITION nvarchar(767),
-  WS_TXNID bigint NOT NULL,
-  WS_COMMIT_ID bigint NOT NULL,
-  WS_OPERATION_TYPE char(1) NOT NULL
-);
-
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql
index b7dda59..cba9637 100644
--- a/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql
@@ -36,7 +36,7 @@ PRIMARY KEY CLUSTERED
 CREATE TABLE COMPLETED_TXN_COMPONENTS(
 	CTC_TXNID bigint NULL,
 	CTC_DATABASE varchar(128) NOT NULL,
-	CTC_TABLE varchar(256) NULL,
+	CTC_TABLE varchar(128) NULL,
 	CTC_PARTITION varchar(767) NULL
 );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
index b786b16..99024c2 100644
--- a/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
@@ -3,7 +3,6 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE;
 :r 022-HIVE-14496.mssql.sql
 :r 023-HIVE-14637.mssql.sql
 :r 023-HIVE-10562.mssql.sql
-:r 024-HIVE-12274.mssql.sql
 
 UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql
deleted file mode 100644
index 04dafba..0000000
--- a/metastore/scripts/upgrade/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql
+++ /dev/null
@@ -1,6 +0,0 @@
-SELECT 'Upgrading MetaStore schema from 2.2.0 to 2.3.0' AS MESSAGE;
-
-:r 025-HIVE-16399.mssql.sql
-
-UPDATE VERSION SET SCHEMA_VERSION='2.3.0', VERSION_COMMENT='Hive release version 2.3.0' where VER_ID=1;
-SELECT 'Finished upgrading MetaStore schema from 2.2.0 to 2.3.0' AS MESSAGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
deleted file mode 100644
index 94d18a3..0000000
--- a/metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
+++ /dev/null
@@ -1,4 +0,0 @@
-SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE;
-
-UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
-SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mssql/upgrade.order.mssql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/upgrade.order.mssql b/metastore/scripts/upgrade/mssql/upgrade.order.mssql
index 8623683..6162140 100644
--- a/metastore/scripts/upgrade/mssql/upgrade.order.mssql
+++ b/metastore/scripts/upgrade/mssql/upgrade.order.mssql
@@ -6,5 +6,3 @@
 1.2.0-to-2.0.0
 2.0.0-to-2.1.0
 2.1.0-to-2.2.0
-2.2.0-to-2.3.0
-2.3.0-to-3.0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/039-HIVE-12274.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/039-HIVE-12274.mysql.sql b/metastore/scripts/upgrade/mysql/039-HIVE-12274.mysql.sql
deleted file mode 100644
index cdaf286..0000000
--- a/metastore/scripts/upgrade/mysql/039-HIVE-12274.mysql.sql
+++ /dev/null
@@ -1,18 +0,0 @@
-ALTER TABLE COLUMNS_V2 MODIFY TYPE_NAME MEDIUMTEXT;
-ALTER TABLE TABLE_PARAMS MODIFY PARAM_VALUE MEDIUMTEXT;
-ALTER TABLE SERDE_PARAMS MODIFY PARAM_VALUE MEDIUMTEXT;
-ALTER TABLE SD_PARAMS MODIFY PARAM_VALUE MEDIUMTEXT;
-
-ALTER TABLE TBLS MODIFY TBL_NAME varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL;
-ALTER TABLE NOTIFICATION_LOG MODIFY TBL_NAME varchar(256) CHARACTER SET latin1 COLLATE latin1_bin;
-ALTER TABLE PARTITION_EVENTS MODIFY TBL_NAME varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL;
-ALTER TABLE TAB_COL_STATS MODIFY TABLE_NAME varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL;
-ALTER TABLE PART_COL_STATS MODIFY TABLE_NAME varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL;
-ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY CTC_TABLE varchar(256) CHARACTER SET latin1 COLLATE latin1_bin;
-
-ALTER TABLE COLUMNS_V2 MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL;
-ALTER TABLE PART_COL_PRIVS MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL;
-ALTER TABLE TBL_COL_PRIVS MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL;
-ALTER TABLE SORT_COLS MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL;
-ALTER TABLE TAB_COL_STATS MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL;
-ALTER TABLE PART_COL_STATS MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/040-HIVE-16399.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/040-HIVE-16399.mysql.sql b/metastore/scripts/upgrade/mysql/040-HIVE-16399.mysql.sql
deleted file mode 100644
index f6cc31f..0000000
--- a/metastore/scripts/upgrade/mysql/040-HIVE-16399.mysql.sql
+++ /dev/null
@@ -1 +0,0 @@
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
index 20cfbc4..439500d 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
@@ -53,7 +53,7 @@ CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
   `CD_ID` bigint(20) NOT NULL,
   `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
   `INTEGER_IDX` int(11) NOT NULL,
   PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
   KEY `COLUMNS_V2_N49` (`CD_ID`),
@@ -232,7 +232,7 @@ CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
   `EVENT_TIME` bigint(20) NOT NULL,
   `EVENT_TYPE` int(11) NOT NULL,
   `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   PRIMARY KEY (`PART_NAME_ID`),
   KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
@@ -296,7 +296,7 @@ CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
 /*!40101 SET character_set_client = utf8 */;
 CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
   `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `CREATE_TIME` int(11) NOT NULL,
   `GRANT_OPTION` smallint(6) NOT NULL,
   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
@@ -406,7 +406,7 @@ CREATE TABLE IF NOT EXISTS `SDS` (
 CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
   `SD_ID` bigint(20) NOT NULL,
   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
   KEY `SD_PARAMS_N49` (`SD_ID`),
   CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
@@ -449,7 +449,7 @@ CREATE TABLE IF NOT EXISTS `SERDES` (
 CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
   `SERDE_ID` bigint(20) NOT NULL,
   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
   KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
   CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
@@ -544,7 +544,7 @@ CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
 /*!40101 SET character_set_client = utf8 */;
 CREATE TABLE IF NOT EXISTS `SORT_COLS` (
   `SD_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `ORDER` int(11) NOT NULL,
   `INTEGER_IDX` int(11) NOT NULL,
   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
@@ -562,7 +562,7 @@ CREATE TABLE IF NOT EXISTS `SORT_COLS` (
 CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
   `TBL_ID` bigint(20) NOT NULL,
   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
   KEY `TABLE_PARAMS_N49` (`TBL_ID`),
   CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
@@ -583,7 +583,7 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
   `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `RETENTION` int(11) NOT NULL,
   `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `VIEW_EXPANDED_TEXT` mediumtext,
   `VIEW_ORIGINAL_TEXT` mediumtext,
@@ -607,7 +607,7 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
 /*!40101 SET character_set_client = utf8 */;
 CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
   `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `CREATE_TIME` int(11) NOT NULL,
   `GRANT_OPTION` smallint(6) NOT NULL,
   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
@@ -652,8 +652,8 @@ CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
 CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
  `CS_ID` bigint(20) NOT NULL,
  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
  `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
  `TBL_ID` bigint(20) NOT NULL,
  `LONG_LOW_VALUE` bigint(20),
@@ -679,9 +679,9 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
 CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
  `CS_ID` bigint(20) NOT NULL,
  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
  `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
  `PART_ID` bigint(20) NOT NULL,
  `LONG_LOW_VALUE` bigint(20),
@@ -799,7 +799,7 @@ CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG`
     `EVENT_TIME` INT(11) NOT NULL,
     `EVENT_TYPE` varchar(32) NOT NULL,
     `DB_NAME` varchar(128),
-    `TBL_NAME` varchar(256),
+    `TBL_NAME` varchar(128),
     `MESSAGE` longtext,
     `MESSAGE_FORMAT` varchar(16),
     PRIMARY KEY (`NL_ID`)

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/hive-schema-2.3.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-2.3.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-2.3.0.mysql.sql
deleted file mode 100644
index 1403e38..0000000
--- a/metastore/scripts/upgrade/mysql/hive-schema-2.3.0.mysql.sql
+++ /dev/null
@@ -1,853 +0,0 @@
--- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
---
--- Host: localhost    Database: test
--- ------------------------------------------------------
--- Server version	5.5.25
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `BUCKETING_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `BUCKETING_COLS_N49` (`SD_ID`),
-  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `CDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `CDS` (
-  `CD_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `COLUMNS_V2`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
-  `CD_ID` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
-  KEY `COLUMNS_V2_N49` (`CD_ID`),
-  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DATABASE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
-  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
-  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DBS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DBS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`),
-  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DB_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
-  `DB_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_GRANT_ID`),
-  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `DB_PRIVS_N49` (`DB_ID`),
-  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `GLOBAL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
-  `USER_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`USER_GRANT_ID`),
-  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `IDXS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `IDXS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DEFERRED_REBUILD` bit(1) NOT NULL,
-  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`),
-  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
-  KEY `IDXS_N51` (`SD_ID`),
-  KEY `IDXS_N50` (`INDEX_TBL_ID`),
-  KEY `IDXS_N49` (`ORIG_TBL_ID`),
-  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `INDEX_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
-  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
-  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `NUCLEUS_TABLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
-  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`CLASS_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITIONS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITIONS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`),
-  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
-  KEY `PARTITIONS_N49` (`TBL_ID`),
-  KEY `PARTITIONS_N50` (`SD_ID`),
-  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_EVENTS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
-  `PART_NAME_ID` bigint(20) NOT NULL,
-  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `EVENT_TIME` bigint(20) NOT NULL,
-  `EVENT_TYPE` int(11) NOT NULL,
-  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_NAME_ID`),
-  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEYS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
-  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
-  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEY_VALS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
-  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
-  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
-  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
-  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
-  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
-  `PART_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_GRANT_ID`),
-  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `PART_PRIVS_N49` (`PART_ID`),
-  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLES` (
-  `ROLE_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`ROLE_ID`),
-  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLE_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
-  `ROLE_GRANT_ID` bigint(20) NOT NULL,
-  `ADD_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`ROLE_GRANT_ID`),
-  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `ROLE_MAP_N49` (`ROLE_ID`),
-  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SDS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `CD_ID` bigint(20) DEFAULT NULL,
-  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `IS_COMPRESSED` bit(1) NOT NULL,
-  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `NUM_BUCKETS` int(11) NOT NULL,
-  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SERDE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`),
-  KEY `SDS_N49` (`SERDE_ID`),
-  KEY `SDS_N50` (`CD_ID`),
-  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
-  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SD_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
-  KEY `SD_PARAMS_N49` (`SD_ID`),
-  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SEQUENCE_TABLE`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
-  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NEXT_VAL` bigint(20) NOT NULL,
-  PRIMARY KEY (`SEQUENCE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDES` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
-  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
-  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_NAMES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
-  `SD_ID` bigint(20) NOT NULL,
-  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
-  `SD_ID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
-  `SD_ID_OID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
-  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
-  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
-  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SORT_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SORT_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ORDER` int(11) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SORT_COLS_N49` (`SD_ID`),
-  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TABLE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
-  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
-  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBLS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `RETENTION` int(11) NOT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `VIEW_EXPANDED_TEXT` mediumtext,
-  `VIEW_ORIGINAL_TEXT` mediumtext,
-  `IS_REWRITE_ENABLED` bit(1) NOT NULL,
-  PRIMARY KEY (`TBL_ID`),
-  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
-  KEY `TBLS_N50` (`SD_ID`),
-  KEY `TBLS_N49` (`DB_ID`),
-  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
-  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
-  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
-  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
-  `TBL_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_GRANT_ID`),
-  KEY `TBL_PRIVS_N49` (`TBL_ID`),
-  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TAB_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TBL_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `PART_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PART_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;
-
---
--- Table structure for table `TYPES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPES` (
-  `TYPES_ID` bigint(20) NOT NULL,
-  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TYPES_ID`),
-  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TYPE_FIELDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
-  `TYPE_NAME` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
-  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
-  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
--- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
-CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
-(
-    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
-    `MASTER_KEY` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`KEY_ID`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
--- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
-(
-    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
-    `TOKEN` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`TOKEN_IDENT`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
---
--- Table structure for VERSION
---
-CREATE TABLE IF NOT EXISTS `VERSION` (
-  `VER_ID` BIGINT NOT NULL,
-  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
-  `VERSION_COMMENT` VARCHAR(255),
-  PRIMARY KEY (`VER_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table FUNCS
---
-CREATE TABLE IF NOT EXISTS `FUNCS` (
-  `FUNC_ID` BIGINT(20) NOT NULL,
-  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
-  `CREATE_TIME` INT(11) NOT NULL,
-  `DB_ID` BIGINT(20),
-  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
-  `FUNC_TYPE` INT(11) NOT NULL,
-  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
-  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
-  PRIMARY KEY (`FUNC_ID`),
-  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
-  KEY `FUNCS_N49` (`DB_ID`),
-  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table FUNC_RU
---
-CREATE TABLE IF NOT EXISTS `FUNC_RU` (
-  `FUNC_ID` BIGINT(20) NOT NULL,
-  `RESOURCE_TYPE` INT(11) NOT NULL,
-  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
-  `INTEGER_IDX` INT(11) NOT NULL,
-  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
-  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG`
-(
-    `NL_ID` BIGINT(20) NOT NULL,
-    `EVENT_ID` BIGINT(20) NOT NULL,
-    `EVENT_TIME` INT(11) NOT NULL,
-    `EVENT_TYPE` varchar(32) NOT NULL,
-    `DB_NAME` varchar(128),
-    `TBL_NAME` varchar(256),
-    `MESSAGE` longtext,
-    `MESSAGE_FORMAT` varchar(16),
-    PRIMARY KEY (`NL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE`
-(
-    `NNI_ID` BIGINT(20) NOT NULL,
-    `NEXT_EVENT_ID` BIGINT(20) NOT NULL,
-    PRIMARY KEY (`NNI_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS`
-(
-  `CHILD_CD_ID` BIGINT,
-  `CHILD_INTEGER_IDX` INT(11),
-  `CHILD_TBL_ID` BIGINT,
-  `PARENT_CD_ID` BIGINT NOT NULL,
-  `PARENT_INTEGER_IDX` INT(11) NOT NULL,
-  `PARENT_TBL_ID` BIGINT NOT NULL,
-  `POSITION` BIGINT NOT NULL,
-  `CONSTRAINT_NAME` VARCHAR(400) NOT NULL,
-  `CONSTRAINT_TYPE` SMALLINT(6)  NOT NULL,
-  `UPDATE_RULE` SMALLINT(6),
-  `DELETE_RULE` SMALLINT(6),
-  `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL,
-  PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE;
-
--- ----------------------------
--- Transaction and Lock Tables
--- ----------------------------
-SOURCE hive-txn-schema-2.3.0.mysql.sql;
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.3.0', 'Hive release version 2.3.0');
-
-/*!40101 SET character_set_client = @saved_cs_client */;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-08-23  0:56:31


[42/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
----------------------------------------------------------------------
diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
index 6f96e1d..f7e3e3a 100644
--- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
+++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.MetaStoreEventListener;
 import org.apache.hadoop.hive.metastore.RawStore;
 import org.apache.hadoop.hive.metastore.RawStoreProxy;
@@ -57,7 +56,6 @@ import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.hive.metastore.events.InsertEvent;
 import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
-import org.apache.hadoop.hive.metastore.events.ListenerEvent;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
 import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
 import org.apache.hadoop.hive.metastore.messaging.PartitionFiles;
@@ -88,17 +86,23 @@ public class DbNotificationListener extends MetaStoreEventListener {
   // HiveConf rather than a Configuration.
   private HiveConf hiveConf;
   private MessageFactory msgFactory;
-
-  private synchronized void init(HiveConf conf) throws MetaException {
-    if (cleaner == null) {
-      cleaner =
-          new CleanerThread(conf, RawStoreProxy.getProxy(conf, conf,
-              conf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL), 999999));
+  private RawStore rs;
+
+  private synchronized void init(HiveConf conf) {
+    try {
+      rs = RawStoreProxy.getProxy(conf, conf,
+          conf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL), 999999);
+    } catch (MetaException e) {
+      LOG.error("Unable to connect to raw store, notifications will not be tracked", e);
+      rs = null;
+    }
+    if (cleaner == null && rs != null) {
+      cleaner = new CleanerThread(conf, rs);
       cleaner.start();
     }
   }
 
-  public DbNotificationListener(Configuration config) throws MetaException {
+  public DbNotificationListener(Configuration config) {
     super(config);
     // The code in MetastoreUtils.getMetaStoreListeners() that calls this looks for a constructor
     // with a Configuration parameter, so we have to declare config as Configuration.  But it
@@ -138,7 +142,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
             .buildCreateTableMessage(t, new FileIterator(t.getSd().getLocation())).toString());
     event.setDbName(t.getDbName());
     event.setTableName(t.getTableName());
-    process(event, tableEvent);
+    process(event);
   }
 
   /**
@@ -153,7 +157,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
             .buildDropTableMessage(t).toString());
     event.setDbName(t.getDbName());
     event.setTableName(t.getTableName());
-    process(event, tableEvent);
+    process(event);
   }
 
   /**
@@ -166,10 +170,10 @@ public class DbNotificationListener extends MetaStoreEventListener {
     Table after = tableEvent.getNewTable();
     NotificationEvent event =
         new NotificationEvent(0, now(), EventType.ALTER_TABLE.toString(), msgFactory
-            .buildAlterTableMessage(before, after, tableEvent.getIsTruncateOp()).toString());
+            .buildAlterTableMessage(before, after).toString());
     event.setDbName(after.getDbName());
     event.setTableName(after.getTableName());
-    process(event, tableEvent);
+    process(event);
   }
 
   class FileIterator implements Iterator<String> {
@@ -277,7 +281,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
         new NotificationEvent(0, now(), EventType.ADD_PARTITION.toString(), msg);
     event.setDbName(t.getDbName());
     event.setTableName(t.getTableName());
-    process(event, partitionEvent);
+    process(event);
   }
 
   /**
@@ -292,7 +296,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
             .buildDropPartitionMessage(t, partitionEvent.getPartitionIterator()).toString());
     event.setDbName(t.getDbName());
     event.setTableName(t.getTableName());
-    process(event, partitionEvent);
+    process(event);
   }
 
   /**
@@ -305,10 +309,10 @@ public class DbNotificationListener extends MetaStoreEventListener {
     Partition after = partitionEvent.getNewPartition();
     NotificationEvent event =
         new NotificationEvent(0, now(), EventType.ALTER_PARTITION.toString(), msgFactory
-            .buildAlterPartitionMessage(partitionEvent.getTable(), before, after, partitionEvent.getIsTruncateOp()).toString());
+            .buildAlterPartitionMessage(partitionEvent.getTable(), before, after).toString());
     event.setDbName(before.getDbName());
     event.setTableName(before.getTableName());
-    process(event, partitionEvent);
+    process(event);
   }
 
   /**
@@ -322,7 +326,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
         new NotificationEvent(0, now(), EventType.CREATE_DATABASE.toString(), msgFactory
             .buildCreateDatabaseMessage(db).toString());
     event.setDbName(db.getName());
-    process(event, dbEvent);
+    process(event);
   }
 
   /**
@@ -336,7 +340,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
         new NotificationEvent(0, now(), EventType.DROP_DATABASE.toString(), msgFactory
             .buildDropDatabaseMessage(db).toString());
     event.setDbName(db.getName());
-    process(event, dbEvent);
+    process(event);
   }
 
   /**
@@ -350,7 +354,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
         new NotificationEvent(0, now(), EventType.CREATE_FUNCTION.toString(), msgFactory
             .buildCreateFunctionMessage(fn).toString());
     event.setDbName(fn.getDbName());
-    process(event, fnEvent);
+    process(event);
   }
 
   /**
@@ -364,7 +368,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
         new NotificationEvent(0, now(), EventType.DROP_FUNCTION.toString(), msgFactory
             .buildDropFunctionMessage(fn).toString());
     event.setDbName(fn.getDbName());
-    process(event, fnEvent);
+    process(event);
   }
 
   /**
@@ -378,7 +382,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
         new NotificationEvent(0, now(), EventType.CREATE_INDEX.toString(), msgFactory
             .buildCreateIndexMessage(index).toString());
     event.setDbName(index.getDbName());
-    process(event, indexEvent);
+    process(event);
   }
 
   /**
@@ -392,7 +396,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
         new NotificationEvent(0, now(), EventType.DROP_INDEX.toString(), msgFactory
             .buildDropIndexMessage(index).toString());
     event.setDbName(index.getDbName());
-    process(event, indexEvent);
+    process(event);
   }
 
   /**
@@ -407,7 +411,7 @@ public class DbNotificationListener extends MetaStoreEventListener {
         new NotificationEvent(0, now(), EventType.ALTER_INDEX.toString(), msgFactory
             .buildAlterIndexMessage(before, after).toString());
     event.setDbName(before.getDbName());
-    process(event, indexEvent);
+    process(event);
   }
 
   class FileChksumIterator implements Iterator<String> {
@@ -439,12 +443,12 @@ public class DbNotificationListener extends MetaStoreEventListener {
   public void onInsert(InsertEvent insertEvent) throws MetaException {
     NotificationEvent event =
         new NotificationEvent(0, now(), EventType.INSERT.toString(), msgFactory.buildInsertMessage(
-            insertEvent.getDb(), insertEvent.getTable(), insertEvent.getPartitionKeyValues(), insertEvent.isReplace(),
+            insertEvent.getDb(), insertEvent.getTable(), insertEvent.getPartitionKeyValues(),
             new FileChksumIterator(insertEvent.getFiles(), insertEvent.getFileChecksums()))
             .toString());
     event.setDbName(insertEvent.getDb());
     event.setTableName(insertEvent.getTable());
-    process(event, insertEvent);
+    process(event);
   }
 
   /**
@@ -468,27 +472,18 @@ public class DbNotificationListener extends MetaStoreEventListener {
     return (int)millis;
   }
 
-  /**
-   * Process this notification by adding it to metastore DB.
-   *
-   * @param event NotificationEvent is the object written to the metastore DB.
-   * @param listenerEvent ListenerEvent (from which NotificationEvent was based) used only to set the
-   *                      DB_NOTIFICATION_EVENT_ID_KEY_NAME for future reference by other listeners.
-   */
-  private void process(NotificationEvent event, ListenerEvent listenerEvent) throws MetaException {
+  // Process this notification by adding it to metastore DB
+  private void process(NotificationEvent event) {
     event.setMessageFormat(msgFactory.getMessageFormat());
-    synchronized (NOTIFICATION_TBL_LOCK) {
-      LOG.debug("DbNotificationListener: Processing : {}:{}", event.getEventId(),
-          event.getMessage());
-      HMSHandler.getMSForConf(hiveConf).addNotificationEvent(event);
-    }
-
-      // Set the DB_NOTIFICATION_EVENT_ID for future reference by other listeners.
-      if (event.isSetEventId()) {
-        listenerEvent.putParameter(
-            MetaStoreEventListenerConstants.DB_NOTIFICATION_EVENT_ID_KEY_NAME,
-            Long.toString(event.getEventId()));
+    if (rs != null) {
+      synchronized (NOTIFICATION_TBL_LOCK) {
+        LOG.debug("DbNotificationListener: Processing : {}:{}", event.getEventId(),
+            event.getMessage());
+        rs.addNotificationEvent(event);
       }
+    } else {
+      LOG.warn("Dropping event " + event + " since notification is not running.");
+    }
   }
 
   private static class CleanerThread extends Thread {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/MetaStoreEventListenerConstants.java
----------------------------------------------------------------------
diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/MetaStoreEventListenerConstants.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/MetaStoreEventListenerConstants.java
deleted file mode 100644
index a4f2d59..0000000
--- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/MetaStoreEventListenerConstants.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hive.hcatalog.listener;
-
-/**
- * Keeps a list of reserved keys used by Hive listeners when updating the ListenerEvent
- * parameters.
- */
-public class MetaStoreEventListenerConstants {
-  /*
-   * DbNotificationListener keys reserved for updating ListenerEvent parameters.
-   *
-   * DB_NOTIFICATION_EVENT_ID_KEY_NAME This key will have the event identifier that DbNotificationListener
-   *                                   processed during an event. This event identifier might be shared
-   *                                   across other MetaStoreEventListener implementations.
-   */
-  public static final String DB_NOTIFICATION_EVENT_ID_KEY_NAME = "DB_NOTIFICATION_EVENT_ID_KEY_NAME";
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/streaming/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/pom.xml b/hcatalog/streaming/pom.xml
index 5bea0a6..e765305 100644
--- a/hcatalog/streaming/pom.xml
+++ b/hcatalog/streaming/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
deleted file mode 100644
index 78987ab..0000000
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hive.hcatalog.streaming;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.AbstractSerDe;
-import org.apache.hadoop.hive.serde2.RegexSerDe;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.SerDeUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.StructField;
-import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.io.Text;
-
-/**
- * Streaming Writer handles text input data with regex. Uses
- * org.apache.hadoop.hive.serde2.RegexSerDe
- */
-public class StrictRegexWriter extends AbstractRecordWriter {
-  private RegexSerDe serde;
-  private final StructObjectInspector recordObjInspector;
-  private final ObjectInspector[] bucketObjInspectors;
-  private final StructField[] bucketStructFields;
-  
-  /**
-   * @param endPoint the end point to write to
-   * @param conn connection this Writer is to be used with
-   * @throws ConnectionError
-   * @throws SerializationError
-   * @throws StreamingException
-   */
-  public StrictRegexWriter(HiveEndPoint endPoint, StreamingConnection conn)
-          throws ConnectionError, SerializationError, StreamingException {
-    this(null, endPoint, null, conn);
-  }
-  
-  /**
-   * @param endPoint the end point to write to
-   * @param conf a Hive conf object. Should be null if not using advanced Hive settings.
-   * @param conn connection this Writer is to be used with
-   * @throws ConnectionError
-   * @throws SerializationError
-   * @throws StreamingException
-   */
-  public StrictRegexWriter(HiveEndPoint endPoint, HiveConf conf, StreamingConnection conn)
-          throws ConnectionError, SerializationError, StreamingException {
-    this(null, endPoint, conf, conn);
-  }
-  
-  /**
-   * @param regex to parse the data
-   * @param endPoint the end point to write to
-   * @param conf a Hive conf object. Should be null if not using advanced Hive settings.
-   * @param conn connection this Writer is to be used with
-   * @throws ConnectionError
-   * @throws SerializationError
-   * @throws StreamingException
-   */
-  public StrictRegexWriter(String regex, HiveEndPoint endPoint, HiveConf conf, StreamingConnection conn)
-          throws ConnectionError, SerializationError, StreamingException {
-    super(endPoint, conf, conn);
-    this.serde = createSerde(tbl, conf, regex);
-    // get ObjInspectors for entire record and bucketed cols
-    try {
-      recordObjInspector = ( StructObjectInspector ) serde.getObjectInspector();
-      this.bucketObjInspectors = getObjectInspectorsForBucketedCols(bucketIds, recordObjInspector);
-    } catch (SerDeException e) {
-      throw new SerializationError("Unable to get ObjectInspector for bucket columns", e);
-    }
-
-    // get StructFields for bucketed cols
-    bucketStructFields = new StructField[bucketIds.size()];
-    List<? extends StructField> allFields = recordObjInspector.getAllStructFieldRefs();
-    for (int i = 0; i < bucketIds.size(); i++) {
-      bucketStructFields[i] = allFields.get(bucketIds.get(i));
-    }
-  }
-  
-  @Override
-  public AbstractSerDe getSerde() {
-    return serde;
-  }
-
-  @Override
-  protected StructObjectInspector getRecordObjectInspector() {
-    return recordObjInspector;
-  }
-
-  @Override
-  protected StructField[] getBucketStructFields() {
-    return bucketStructFields;
-  }
-
-  @Override
-  protected ObjectInspector[] getBucketObjectInspectors() {
-    return bucketObjInspectors;
-  }
-
-
-  @Override
-  public void write(long transactionId, byte[] record)
-          throws StreamingIOFailure, SerializationError {
-    try {
-      Object encodedRow = encode(record);
-      int bucket = getBucket(encodedRow);
-      getRecordUpdater(bucket).insert(transactionId, encodedRow);
-    } catch (IOException e) {
-      throw new StreamingIOFailure("Error writing record in transaction("
-              + transactionId + ")", e);
-    }
-  }
-
-  /**
-   * Creates RegexSerDe
-   * @param tbl   used to create serde
-   * @param conf  used to create serde
-   * @param regex  used to create serde
-   * @return
-   * @throws SerializationError if serde could not be initialized
-   */
-  private static RegexSerDe createSerde(Table tbl, HiveConf conf, String regex)
-          throws SerializationError {
-    try {
-      Properties tableProps = MetaStoreUtils.getTableMetadata(tbl);
-      tableProps.setProperty(RegexSerDe.INPUT_REGEX, regex);
-      ArrayList<String> tableColumns = getCols(tbl);
-      tableProps.setProperty(serdeConstants.LIST_COLUMNS, StringUtils.join(tableColumns, ","));
-      RegexSerDe serde = new RegexSerDe();
-      SerDeUtils.initializeSerDe(serde, conf, tableProps, null);
-      return serde;
-    } catch (SerDeException e) {
-      throw new SerializationError("Error initializing serde " + RegexSerDe.class.getName(), e);
-    }
-  }
-  
-  private static ArrayList<String> getCols(Table table) {
-    List<FieldSchema> cols = table.getSd().getCols();
-    ArrayList<String> colNames = new ArrayList<String>(cols.size());
-    for (FieldSchema col : cols) {
-      colNames.add(col.getName().toLowerCase());
-    }
-    return colNames;
-  }
-
-  /**
-   * Encode Utf8 encoded string bytes using RegexSerDe
-   * 
-   * @param utf8StrRecord
-   * @return The encoded object
-   * @throws SerializationError
-   */
-  @Override
-  public Object encode(byte[] utf8StrRecord) throws SerializationError {
-    try {
-      Text blob = new Text(utf8StrRecord);
-      return serde.deserialize(blob);
-    } catch (SerDeException e) {
-      throw new SerializationError("Unable to convert byte[] record into Object", e);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index 097de9b..bf29993 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -64,6 +64,10 @@ import org.apache.hadoop.hive.ql.CommandNeedRetryException;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.IOConstants;
+import org.apache.hadoop.hive.shims.Utils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.orc.impl.OrcAcidUtils;
+import org.apache.orc.tools.FileDump;
 import org.apache.hadoop.hive.ql.io.orc.OrcFile;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
@@ -78,15 +82,11 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableIntObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableLongObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableStringObjectInspector;
-import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.orc.impl.OrcAcidUtils;
-import org.apache.orc.tools.FileDump;
 import org.apache.thrift.TException;
 import org.junit.After;
 import org.junit.Assert;
@@ -485,9 +485,9 @@ public class TestStreaming {
 
     NullWritable key = rr.createKey();
     OrcStruct value = rr.createValue();
-    for (String record : records) {
+    for (int i = 0; i < records.length; i++) {
       Assert.assertEquals(true, rr.next(key, value));
-      Assert.assertEquals(record, value.toString());
+      Assert.assertEquals(records[i], value.toString());
     }
     Assert.assertEquals(false, rr.next(key, value));
   }
@@ -741,7 +741,7 @@ public class TestStreaming {
     txnBatch.write("1,Hello streaming".getBytes());
     txnBatch.commit();
 
-    checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}");
+    checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}");
 
     Assert.assertEquals(TransactionBatch.TxnState.COMMITTED
       , txnBatch.getCurrentTransactionState());
@@ -753,11 +753,11 @@ public class TestStreaming {
     txnBatch.write("2,Welcome to streaming".getBytes());
 
     // data should not be visible
-    checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}");
+    checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}");
 
     txnBatch.commit();
 
-    checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}",
+    checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}",
       "{2, Welcome to streaming}");
 
     txnBatch.close();
@@ -787,75 +787,6 @@ public class TestStreaming {
   }
 
   @Test
-  public void testTransactionBatchCommit_Regex() throws Exception {
-    testTransactionBatchCommit_Regex(null);
-  }
-  @Test
-  public void testTransactionBatchCommit_RegexUGI() throws Exception {
-    testTransactionBatchCommit_Regex(Utils.getUGI());
-  }
-  private void testTransactionBatchCommit_Regex(UserGroupInformation ugi) throws Exception {
-    HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, dbName, tblName,
-      partitionVals);
-    StreamingConnection connection = endPt.newConnection(true, conf, ugi, "UT_" + Thread.currentThread().getName());
-    String regex = "([^,]*),(.*)";
-    StrictRegexWriter writer = new StrictRegexWriter(regex, endPt, conf, connection);
-
-    // 1st Txn
-    TransactionBatch txnBatch =  connection.fetchTransactionBatch(10, writer);
-    txnBatch.beginNextTransaction();
-    Assert.assertEquals(TransactionBatch.TxnState.OPEN
-      , txnBatch.getCurrentTransactionState());
-    txnBatch.write("1,Hello streaming".getBytes());
-    txnBatch.commit();
-
-    checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}");
-
-    Assert.assertEquals(TransactionBatch.TxnState.COMMITTED
-      , txnBatch.getCurrentTransactionState());
-
-    // 2nd Txn
-    txnBatch.beginNextTransaction();
-    Assert.assertEquals(TransactionBatch.TxnState.OPEN
-      , txnBatch.getCurrentTransactionState());
-    txnBatch.write("2,Welcome to streaming".getBytes());
-
-    // data should not be visible
-    checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}");
-
-    txnBatch.commit();
-
-    checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}",
-      "{2, Welcome to streaming}");
-
-    txnBatch.close();
-    Assert.assertEquals(TransactionBatch.TxnState.INACTIVE
-      , txnBatch.getCurrentTransactionState());
-
-
-    connection.close();
-
-
-    // To Unpartitioned table
-    endPt = new HiveEndPoint(metaStoreURI, dbName2, tblName2, null);
-    connection = endPt.newConnection(true, conf, ugi, "UT_" + Thread.currentThread().getName());
-    regex = "([^:]*):(.*)";
-    writer = new StrictRegexWriter(regex, endPt, conf, connection);
-
-    // 1st Txn
-    txnBatch =  connection.fetchTransactionBatch(10, writer);
-    txnBatch.beginNextTransaction();
-    Assert.assertEquals(TransactionBatch.TxnState.OPEN
-      , txnBatch.getCurrentTransactionState());
-    txnBatch.write("1:Hello streaming".getBytes());
-    txnBatch.commit();
-
-    Assert.assertEquals(TransactionBatch.TxnState.COMMITTED
-      , txnBatch.getCurrentTransactionState());
-    connection.close();
-  }
-  
-  @Test
   public void testTransactionBatchCommit_Json() throws Exception {
     HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, dbName, tblName,
             partitionVals);
@@ -871,7 +802,7 @@ public class TestStreaming {
     txnBatch.write(rec1.getBytes());
     txnBatch.commit();
 
-    checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}");
+    checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}");
 
     Assert.assertEquals(TransactionBatch.TxnState.COMMITTED
             , txnBatch.getCurrentTransactionState());
@@ -998,7 +929,7 @@ public class TestStreaming {
     txnBatch.write("2,Welcome to streaming".getBytes());
     txnBatch.commit();
 
-    checkDataWritten(partLoc, 14, 23, 1, 1, "{1, Hello streaming}",
+    checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}",
             "{2, Welcome to streaming}");
 
     txnBatch.close();
@@ -1017,13 +948,13 @@ public class TestStreaming {
     txnBatch.write("1,Hello streaming".getBytes());
     txnBatch.commit();
 
-    checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}");
+    checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}");
 
     txnBatch.beginNextTransaction();
     txnBatch.write("2,Welcome to streaming".getBytes());
     txnBatch.commit();
 
-    checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}",
+    checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}",
             "{2, Welcome to streaming}");
 
     txnBatch.close();
@@ -1034,14 +965,14 @@ public class TestStreaming {
     txnBatch.write("3,Hello streaming - once again".getBytes());
     txnBatch.commit();
 
-    checkDataWritten(partLoc, 15, 34, 1, 2, "{1, Hello streaming}",
+    checkDataWritten(partLoc, 1, 20, 1, 2, "{1, Hello streaming}",
             "{2, Welcome to streaming}", "{3, Hello streaming - once again}");
 
     txnBatch.beginNextTransaction();
     txnBatch.write("4,Welcome to streaming - once again".getBytes());
     txnBatch.commit();
 
-    checkDataWritten(partLoc, 15, 34, 1, 2, "{1, Hello streaming}",
+    checkDataWritten(partLoc, 1, 20, 1, 2, "{1, Hello streaming}",
             "{2, Welcome to streaming}", "{3, Hello streaming - once again}",
             "{4, Welcome to streaming - once again}");
 
@@ -1078,11 +1009,11 @@ public class TestStreaming {
 
     txnBatch2.commit();
 
-    checkDataWritten(partLoc, 24, 33, 1, 1, "{3, Hello streaming - once again}");
+    checkDataWritten(partLoc, 11, 20, 1, 1, "{3, Hello streaming - once again}");
 
     txnBatch1.commit();
 
-    checkDataWritten(partLoc, 14, 33, 1, 2, "{1, Hello streaming}", "{3, Hello streaming - once again}");
+    checkDataWritten(partLoc, 1, 20, 1, 2, "{1, Hello streaming}", "{3, Hello streaming - once again}");
 
     txnBatch1.beginNextTransaction();
     txnBatch1.write("2,Welcome to streaming".getBytes());
@@ -1090,17 +1021,17 @@ public class TestStreaming {
     txnBatch2.beginNextTransaction();
     txnBatch2.write("4,Welcome to streaming - once again".getBytes());
 
-    checkDataWritten(partLoc, 14, 33, 1, 2, "{1, Hello streaming}", "{3, Hello streaming - once again}");
+    checkDataWritten(partLoc, 1, 20, 1, 2, "{1, Hello streaming}", "{3, Hello streaming - once again}");
 
     txnBatch1.commit();
 
-    checkDataWritten(partLoc, 14, 33, 1, 2, "{1, Hello streaming}",
+    checkDataWritten(partLoc, 1, 20, 1, 2, "{1, Hello streaming}",
         "{2, Welcome to streaming}",
         "{3, Hello streaming - once again}");
 
     txnBatch2.commit();
 
-    checkDataWritten(partLoc, 14, 33, 1, 2, "{1, Hello streaming}",
+    checkDataWritten(partLoc, 1, 20, 1, 2, "{1, Hello streaming}",
         "{2, Welcome to streaming}",
         "{3, Hello streaming - once again}",
         "{4, Welcome to streaming - once again}");
@@ -1769,7 +1700,7 @@ public class TestStreaming {
     txnBatch.heartbeat();//this is no-op on closed batch
     txnBatch.abort();//ditto
     GetOpenTxnsInfoResponse r = msClient.showTxns();
-    Assert.assertEquals("HWM didn't match", 17, r.getTxn_high_water_mark());
+    Assert.assertEquals("HWM didn't match", 2, r.getTxn_high_water_mark());
     List<TxnInfo> ti = r.getOpen_txns();
     Assert.assertEquals("wrong status ti(0)", TxnState.ABORTED, ti.get(0).getState());
     Assert.assertEquals("wrong status ti(1)", TxnState.ABORTED, ti.get(1).getState());
@@ -1833,7 +1764,7 @@ public class TestStreaming {
       expectedEx != null && expectedEx.getMessage().contains("has been closed()"));
 
     r = msClient.showTxns();
-    Assert.assertEquals("HWM didn't match", 19, r.getTxn_high_water_mark());
+    Assert.assertEquals("HWM didn't match", 4, r.getTxn_high_water_mark());
     ti = r.getOpen_txns();
     Assert.assertEquals("wrong status ti(0)", TxnState.ABORTED, ti.get(0).getState());
     Assert.assertEquals("wrong status ti(1)", TxnState.ABORTED, ti.get(1).getState());
@@ -1856,7 +1787,7 @@ public class TestStreaming {
       expectedEx != null && expectedEx.getMessage().contains("Simulated fault occurred"));
     
     r = msClient.showTxns();
-    Assert.assertEquals("HWM didn't match", 21, r.getTxn_high_water_mark());
+    Assert.assertEquals("HWM didn't match", 6, r.getTxn_high_water_mark());
     ti = r.getOpen_txns();
     Assert.assertEquals("wrong status ti(3)", TxnState.ABORTED, ti.get(3).getState());
     Assert.assertEquals("wrong status ti(4)", TxnState.ABORTED, ti.get(4).getState());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/java-client/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/pom.xml b/hcatalog/webhcat/java-client/pom.xml
index 3bb9f4d..3b53664 100644
--- a/hcatalog/webhcat/java-client/pom.xml
+++ b/hcatalog/webhcat/java-client/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
index 86d3acb..b9cb067 100644
--- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
+++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
@@ -434,7 +434,7 @@ public class TestHCatClient {
     HCatClient client = HCatClient.create(new Configuration(hcatConf));
     boolean isExceptionCaught = false;
     // Table creation with a long table name causes ConnectionFailureException
-    final String tableName = "Temptable" + new BigInteger(260, new Random()).toString(2);
+    final String tableName = "Temptable" + new BigInteger(200, new Random()).toString(2);
 
     ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
     cols.add(new HCatFieldSchema("id", Type.INT, "id columns"));

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/pom.xml b/hcatalog/webhcat/svr/pom.xml
index a55ffe9..c5ad387 100644
--- a/hcatalog/webhcat/svr/pom.xml
+++ b/hcatalog/webhcat/svr/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
 
@@ -45,50 +45,9 @@
       <artifactId>hive-hcatalog-core</artifactId>
       <version>${project.version}</version>
       <scope>provided</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.eclipse.jetty</groupId>
-          <artifactId>jetty-runner</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-sslengine</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-	  <artifactId>jetty-util</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-	  <artifactId>jsp-2.1</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-	  <artifactId>jsp-api-2.1</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <!-- inter-project -->
     <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-rewrite</artifactId>
-      <version>${jetty.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-server</artifactId>
-      <version>${jetty.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-servlet</artifactId>
-      <version>${jetty.version}</version>
-    </dependency>
-    <dependency>
       <groupId>com.sun.jersey</groupId>
       <artifactId>jersey-core</artifactId>
       <version>${jersey.version}</version>
@@ -134,6 +93,11 @@
       <version>${jackson.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.eclipse.jetty.aggregate</groupId>
+      <artifactId>jetty-all-server</artifactId>
+      <version>${jetty.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>jul-to-slf4j</artifactId>
       <version>${slf4j.version}</version>
@@ -143,7 +107,7 @@
       <artifactId>hadoop-auth</artifactId>
       <version>${hadoop.version}</version>
         <exclusions>
-          <exclusion>
+             <exclusion>
             <groupId>org.slf4j</groupId>
             <artifactId>slf4j-log4j12</artifactId>
           </exclusion>
@@ -157,42 +121,16 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
       <version>${hadoop.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-util</artifactId>
-	</exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
       <version>${hadoop.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-util</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-core</artifactId>
       <version>${hadoop.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-util</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <!-- test inter-project -->
     <dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
index 0ea7d88..54d0907 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
@@ -111,43 +111,6 @@ public class AppConfig extends Configuration {
   public static final String MR_AM_MEMORY_MB     = "templeton.mr.am.memory.mb";
   public static final String TEMPLETON_JOBSLIST_ORDER = "templeton.jobs.listorder";
 
-  /*
-   * These parameters controls the maximum number of concurrent job submit/status/list
-   * operations in templeton service. If more number of concurrent requests comes then
-   * they will be rejected with BusyException.
-   */
-  public static final String JOB_SUBMIT_MAX_THREADS = "templeton.parallellism.job.submit";
-  public static final String JOB_STATUS_MAX_THREADS = "templeton.parallellism.job.status";
-  public static final String JOB_LIST_MAX_THREADS = "templeton.parallellism.job.list";
-
-  /*
-   * These parameters controls the maximum time job submit/status/list operation is
-   * executed in templeton service. On time out, the execution is interrupted and
-   * TimeoutException is returned to client. On time out
-   *   For list and status operation, there is no action needed as they are read requests.
-   *   For submit operation, we do best effort to kill the job if its generated. Enabling
-   *     this parameter may have following side effects
-   *     1) There is a possibility for having active job for some time when the client gets
-   *        response for submit operation and a list operation from client could potential
-   *        show the newly created job which may eventually be killed with no guarantees.
-   *     2) If submit operation retried by client then there is a possibility of duplicate
-   *        jobs triggered.
-   *
-   * Time out configs should be configured in seconds.
-   *
-   */
-  public static final String JOB_SUBMIT_TIMEOUT   = "templeton.job.submit.timeout";
-  public static final String JOB_STATUS_TIMEOUT   = "templeton.job.status.timeout";
-  public static final String JOB_LIST_TIMEOUT   = "templeton.job.list.timeout";
-
-  /*
-   * If task execution time out is configured for submit operation then job may need to
-   * be killed on execution time out. These parameters controls the maximum number of
-   * retries and retry wait time in seconds for executing the time out task.
-   */
-  public static final String JOB_TIMEOUT_TASK_RETRY_COUNT = "templeton.job.timeout.task.retry.count";
-  public static final String JOB_TIMEOUT_TASK_RETRY_INTERVAL = "templeton.job.timeout.task.retry.interval";
-
   /**
    * see webhcat-default.xml
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/DeleteDelegator.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/DeleteDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/DeleteDelegator.java
index 622f92d..4b2dfec 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/DeleteDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/DeleteDelegator.java
@@ -24,7 +24,6 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.shims.HadoopShims.WebHCatJTShim;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.mapred.JobID;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -42,11 +41,10 @@ public class DeleteDelegator extends TempletonDelegator {
   public QueueStatusBean run(String user, String id)
     throws NotAuthorizedException, BadParam, IOException, InterruptedException
   {
-    UserGroupInformation ugi = null;
+    UserGroupInformation ugi = UgiFactory.getUgi(user);
     WebHCatJTShim tracker = null;
     JobState state = null;
     try {
-      ugi = UgiFactory.getUgi(user);
       tracker = ShimLoader.getHadoopShims().getWebHCatShim(appConf, ugi);
       JobID jobid = StatusDelegator.StringToJobID(id);
       if (jobid == null)
@@ -71,8 +69,6 @@ public class DeleteDelegator extends TempletonDelegator {
         tracker.close();
       if (state != null)
         state.close();
-      if (ugi != null)
-        FileSystem.closeAllForUGI(ugi);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
index 1953028..f0296cb 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
@@ -49,7 +49,7 @@ public class HiveDelegator extends LauncherDelegator {
                String statusdir, String callback, String completedUrl, boolean enablelog,
                Boolean enableJobReconnect)
     throws NotAuthorizedException, BadParam, BusyException, QueueException,
-    ExecuteException, IOException, InterruptedException, TooManyRequestsException
+    ExecuteException, IOException, InterruptedException
   {
     runAs = user;
     List<String> args = makeArgs(execute, srcFile, defines, hiveArgs, otherFiles, statusdir,

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JarDelegator.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JarDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JarDelegator.java
index 1246b40..84cd5b9 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JarDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JarDelegator.java
@@ -46,7 +46,7 @@ public class JarDelegator extends LauncherDelegator {
                boolean usesHcatalog, String completedUrl,
                boolean enablelog, Boolean enableJobReconnect, JobType jobType)
     throws NotAuthorizedException, BadParam, BusyException, QueueException,
-    ExecuteException, IOException, InterruptedException, TooManyRequestsException {
+    ExecuteException, IOException, InterruptedException {
     runAs = user;
     List<String> args = makeArgs(jar, mainClass,
       libjars, files, jarArgs, defines,

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JobCallable.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JobCallable.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JobCallable.java
deleted file mode 100644
index e703eff..0000000
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JobCallable.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hive.hcatalog.templeton;
-
-import java.util.concurrent.Callable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class JobCallable<T> implements Callable<T> {
-  private static final Logger LOG = LoggerFactory.getLogger(JobCallable.class);
-
-  static public enum JobState {
-    STARTED,
-    FAILED,
-    COMPLETED
-  }
-
-  /*
-   * Job state of job request. Changes to the state are synchronized using
-   * setStateAndResult. This is required due to two different threads,
-   * main thread and job execute thread, tries to change state and organize
-   * clean up tasks.
-   */
-  private JobState jobState = JobState.STARTED;
-
-  /*
-   * Result of JobCallable task after successful task completion. This is
-   * expected to be set by the thread which executes JobCallable task.
-   */
-  public T returnResult = null;
-
-  /*
-   * Sets the job state to FAILED. Returns true if FAILED status is set.
-   * Otherwise, it returns false.
-   */
-  public boolean setJobStateFailed() {
-    return setStateAndResult(JobState.FAILED, null);
-  }
-
-  /*
-   * Sets the job state to COMPLETED and also sets the results value. Returns true
-   * if COMPLETED status is set. Otherwise, it returns false.
-   */
-  public boolean setJobStateCompleted(T result) {
-    return setStateAndResult(JobState.COMPLETED, result);
-  }
-
-  /*
-   * Sets the job state and result. Returns true if status and result are set.
-   * Otherwise, it returns false.
-   */
-  private synchronized boolean setStateAndResult(JobState jobState, T result) {
-    if (this.jobState == JobState.STARTED) {
-      this.jobState = jobState;
-      this.returnResult = result;
-      return true;
-    } else {
-      LOG.info("Failed to set job state to " + jobState + " due to job state "
-                  + this.jobState + ". Expected state is " + JobState.STARTED);
-    }
-
-    return false;
-  }
-
-  /*
-   * Executes the callable task with help of execute() call and gets the result
-   * of the task. It also sets job status as COMPLETED if state is not already
-   * set to FAILED and returns result to future.
-   */
-  public T call() throws Exception {
-
-    /*
-     * Don't catch any execution exceptions here and let the caller catch it.
-     */
-    T result = this.execute();
-
-    if (!this.setJobStateCompleted(result)) {
-     /*
-      * Failed to set job status as COMPLETED which mean the main thread would have
-      * exited and not waiting for the result. Call cleanup() to execute any cleanup.
-      */
-      cleanup();
-      return null;
-    }
-
-    return this.returnResult;
-  }
-
-  /*
-   * Abstract method to be overridden for task execution.
-   */
-  public abstract T execute() throws Exception;
-
-  /*
-   * Cleanup method called to run cleanup tasks if job state is FAILED. By default,
-   * no cleanup is provided.
-   */
-  public void cleanup() {}
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JobRequestExecutor.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JobRequestExecutor.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JobRequestExecutor.java
deleted file mode 100644
index 9ac4588..0000000
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/JobRequestExecutor.java
+++ /dev/null
@@ -1,341 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hive.hcatalog.templeton;
-
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.Future;
-
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class JobRequestExecutor<T> {
-  private static final Logger LOG = LoggerFactory.getLogger(JobRequestExecutor.class);
-  private static AppConfig appConf = Main.getAppConfigInstance();
-
-  /*
-   * Thread pool to execute job requests.
-   */
-  private ThreadPoolExecutor jobExecutePool = null;
-
-  /*
-   * Type of job request.
-   */
-  private JobRequestType requestType;
-
-  /*
-   * Config name used to find the number of concurrent requests.
-   */
-  private String concurrentRequestsConfigName;
-
-  /*
-   * Config name used to find the maximum time job request can be executed.
-   */
-  private String jobTimeoutConfigName;
-
-  /*
-   * Job request execution time out in seconds. If it is 0 then request
-   * will not be timed out.
-   */
-  private int requestExecutionTimeoutInSec = 0;
-
-  /*
-   * Amount of time a thread can be alive in thread pool before cleaning this up. Core threads
-   * will not be cleanup from thread pool.
-   */
-  private int threadKeepAliveTimeInHours = 1;
-
-  /*
-   * Maximum number of times a cancel request is sent to job request execution
-   * thread. Future.cancel may not be able to interrupt the thread if it is
-   * blocked on network calls.
-   */
-  private int maxTaskCancelRetryCount = 10;
-
-  /*
-   * Wait time in milliseconds before another cancel request is made.
-   */
-  private int maxTaskCancelRetryWaitTimeInMs = 1000;
-
-  /*
-   * A flag to indicate whether to cancel the task when exception TimeoutException or
-   * InterruptedException or CancellationException raised. The default is cancel thread.
-   */
-  private boolean enableCancelTask = true;
-
-  /*
-   * Job Request type.
-   */
-  public enum JobRequestType {
-    Submit,
-    Status,
-    List
-  }
-
-  /*
-   * Creates a job request object and sets up execution environment. Creates a thread pool
-   * to execute job requests.
-   *
-   * @param requestType
-   *          Job request type
-   *
-   * @param concurrentRequestsConfigName
-   *          Config name to be used to extract number of concurrent requests to be serviced.
-   *
-   * @param jobTimeoutConfigName
-   *          Config name to be used to extract maximum time a task can execute a request.
-   *
-   * @param enableCancelTask
-   *          A flag to indicate whether to cancel the task when exception TimeoutException
-   *          or InterruptedException or CancellationException raised.
-   *
-   */
-  public JobRequestExecutor(JobRequestType requestType, String concurrentRequestsConfigName,
-                            String jobTimeoutConfigName, boolean enableCancelTask) {
-
-    this.concurrentRequestsConfigName = concurrentRequestsConfigName;
-    this.jobTimeoutConfigName = jobTimeoutConfigName;
-    this.requestType = requestType;
-    this.enableCancelTask = enableCancelTask;
-
-    /*
-     * The default number of threads will be 0. That means thread pool is not used and
-     * operation is executed with the current thread.
-     */
-    int threads = !StringUtils.isEmpty(concurrentRequestsConfigName) ?
-                                appConf.getInt(concurrentRequestsConfigName, 0) : 0;
-
-    if (threads > 0) {
-      /*
-       * Create a thread pool with no queue wait time to execute the operation. This will ensure
-       * that job requests are rejected if there are already maximum number of threads busy.
-       */
-      this.jobExecutePool = new ThreadPoolExecutor(threads, threads,
-                             threadKeepAliveTimeInHours, TimeUnit.HOURS,
-                             new SynchronousQueue<Runnable>());
-       this.jobExecutePool.allowCoreThreadTimeOut(true);
-
-      /*
-       * Get the job request time out value. If this configuration value is set to 0
-       * then job request will wait until it finishes.
-       */
-      if (!StringUtils.isEmpty(jobTimeoutConfigName)) {
-        this.requestExecutionTimeoutInSec = appConf.getInt(jobTimeoutConfigName, 0);
-      }
-
-      LOG.info("Configured " + threads + " threads for job request type " + this.requestType
-                 + " with time out " + this.requestExecutionTimeoutInSec + " s.");
-    } else {
-      /*
-       * If threads are not configured then they will be executed in current thread itself.
-       */
-      LOG.info("No thread pool configured for job request type " + this.requestType);
-    }
-  }
-
-  /*
-   * Creates a job request object and sets up execution environment. Creates a thread pool
-   * to execute job requests.
-   *
-   * @param requestType
-   *          Job request type
-   *
-   * @param concurrentRequestsConfigName
-   *          Config name to be used to extract number of concurrent requests to be serviced.
-   *
-   * @param jobTimeoutConfigName
-   *          Config name to be used to extract maximum time a task can execute a request.
-   *
-   */
-  public JobRequestExecutor(JobRequestType requestType, String concurrentRequestsConfigName,
-                            String jobTimeoutConfigName) {
-    this(requestType, concurrentRequestsConfigName, jobTimeoutConfigName, true);
-  }
-
-  /*
-   * Returns true of thread pool is created and can be used for executing a job request.
-   * Otherwise, returns false.
-   */
-  public boolean isThreadPoolEnabled() {
-    return this.jobExecutePool != null;
-  }
-
-  /*
-   * Executes job request operation. If thread pool is not created then job request is
-   * executed in current thread itself.
-   *
-   * @param jobExecuteCallable
-   *          Callable object to run the job request task.
-   *
-   */
-  public T execute(JobCallable<T> jobExecuteCallable) throws InterruptedException,
-                 TimeoutException, TooManyRequestsException, ExecutionException {
-    /*
-     * The callable shouldn't be null to execute. The thread pool also should be configured
-     * to execute requests.
-     */
-    assert (jobExecuteCallable != null);
-    assert (this.jobExecutePool != null);
-
-    String type = this.requestType.toString().toLowerCase();
-
-    String retryMessageForConcurrentRequests = "Please wait for some time before retrying "
-                  + "the operation. Please refer to the config " + concurrentRequestsConfigName
-                  + " to configure concurrent requests.";
-
-    LOG.debug("Starting new " + type + " job request with time out " + this.requestExecutionTimeoutInSec
-              + "seconds.");
-    Future<T> future = null;
-
-    try {
-      future = this.jobExecutePool.submit(jobExecuteCallable);
-    } catch (RejectedExecutionException rejectedException) {
-      /*
-       * Not able to find thread to execute the job request. Raise Busy exception and client
-       * can retry the operation.
-       */
-      String tooManyRequestsExceptionMessage = "Unable to service the " + type + " job request as "
-                        + "templeton service is busy with too many " + type + " job requests. "
-                        + retryMessageForConcurrentRequests;
-
-      LOG.warn(tooManyRequestsExceptionMessage);
-      throw new TooManyRequestsException(tooManyRequestsExceptionMessage);
-    }
-
-    T result = null;
-
-    try {
-      result = this.requestExecutionTimeoutInSec > 0
-                ? future.get(this.requestExecutionTimeoutInSec, TimeUnit.SECONDS) : future.get();
-    } catch (TimeoutException e) {
-      /*
-       * See if the execution thread has just completed operation and result is available.
-       * If result is available then return the result. Otherwise, raise exception.
-       */
-      if ((result = tryGetJobResultOrSetJobStateFailed(jobExecuteCallable)) == null) {
-        String message = this.requestType + " job request got timed out. Please wait for some time "
-                       + "before retrying the operation. Please refer to the config "
-                       + jobTimeoutConfigName + " to configure job request time out.";
-        LOG.warn(message);
-
-        /*
-         * Throw TimeoutException to caller.
-         */
-        throw new TimeoutException(message);
-      }
-    } catch (InterruptedException e) {
-      /*
-       * See if the execution thread has just completed operation and result is available.
-       * If result is available then return the result. Otherwise, raise exception.
-       */
-      if ((result = tryGetJobResultOrSetJobStateFailed(jobExecuteCallable)) == null) {
-        String message = this.requestType + " job request got interrupted. Please wait for some time "
-                       + "before retrying the operation.";
-        LOG.warn(message);
-
-        /*
-         * Throw TimeoutException to caller.
-         */
-        throw new InterruptedException(message);
-      }
-    } catch (CancellationException e) {
-      /*
-       * See if the execution thread has just completed operation and result is available.
-       * If result is available then return the result. Otherwise, raise exception.
-       */
-      if ((result = tryGetJobResultOrSetJobStateFailed(jobExecuteCallable)) == null) {
-        String message = this.requestType + " job request got cancelled and thread got interrupted. "
-                       + "Please wait for some time before retrying the operation.";
-        LOG.warn(message);
-
-        throw new InterruptedException(message);
-      }
-    } finally {
-      /*
-       * If the thread is still active and needs to be cancelled then cancel it. This may
-       * happen in case task got interrupted, or timed out.
-       */
-      if (enableCancelTask) {
-        cancelExecutePoolThread(future);
-      }
-    }
-
-    LOG.debug("Completed " + type + " job request.");
-
-    return result;
-  }
-
-  /*
-   * Initiate cancel request to cancel the thread execution and interrupt the thread.
-   * If thread interruption is not handled by jobExecuteCallable then thread may continue
-   * running to completion. The cancel call may fail for some scenarios. In that case,
-   * retry the cancel call until it returns true or max retry count is reached.
-   *
-   * @param future
-   *          Future object which has handle to cancel the thread.
-   *
-   */
-  private void cancelExecutePoolThread(Future<T> future) {
-    int retryCount = 0;
-    while(retryCount < this.maxTaskCancelRetryCount && !future.isDone()) {
-      LOG.info("Task is still executing the job request. Cancelling it with retry count: "
-               + retryCount);
-      if (future.cancel(true)) {
-        /*
-         * Cancelled the job request and return to client.
-         */
-        LOG.info("Cancel job request issued successfully.");
-        return;
-      }
-
-      retryCount++;
-      try {
-        Thread.sleep(this.maxTaskCancelRetryWaitTimeInMs);
-      } catch (InterruptedException e) {
-        /*
-         * Nothing to do. Just retry.
-         */
-      }
-    }
-
-    LOG.warn("Failed to cancel the job. isCancelled: " + future.isCancelled()
-                    + " Retry count: " + retryCount);
-  }
-
-  /*
-   * Tries to get the job result if job request is completed. Otherwise it sets job status
-   * to FAILED such that execute thread can do necessary clean up based on FAILED state.
-   */
-  private T tryGetJobResultOrSetJobStateFailed(JobCallable<T> jobExecuteCallable) {
-    if (!jobExecuteCallable.setJobStateFailed()) {
-      LOG.info("Job is already COMPLETED. Returning the result.");
-      return jobExecuteCallable.returnResult;
-    } else {
-      LOG.info("Job status set to FAILED. Job clean up to be done by execute thread "
-              + "after job request is executed.");
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java
index 9bea897..b3f44a2 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java
@@ -23,19 +23,16 @@ import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeoutException;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.shims.HadoopShimsSecure;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.HadoopShims.WebHCatJTShim;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hive.hcatalog.templeton.tool.JobState;
 import org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob;
@@ -53,26 +50,9 @@ public class LauncherDelegator extends TempletonDelegator {
   static public enum JobType {JAR, STREAMING, PIG, HIVE, SQOOP}
   private boolean secureMeatastoreAccess = false;
   private final String HIVE_SHIMS_FILENAME_PATTERN = ".*hive-shims.*";
-  private final String JOB_SUBMIT_EXECUTE_THREAD_PREFIX = "JobSubmitExecute";
-  private final int jobTimeoutTaskRetryCount;
-  private final int jobTimeoutTaskRetryIntervalInSec;
-
-  /**
-   * Current thread used to set in execution threads.
-   */
-  private final String submitThreadId = Thread.currentThread().getName();
-
-  /**
-   * Job request executor to submit job requests.
-   */
-  private static JobRequestExecutor<EnqueueBean> jobRequest =
-                   new JobRequestExecutor<EnqueueBean>(JobRequestExecutor.JobRequestType.Submit,
-                   AppConfig.JOB_SUBMIT_MAX_THREADS, AppConfig.JOB_SUBMIT_TIMEOUT, false);
 
   public LauncherDelegator(AppConfig appConf) {
     super(appConf);
-    jobTimeoutTaskRetryCount = appConf.getInt(AppConfig.JOB_TIMEOUT_TASK_RETRY_COUNT, 0);
-    jobTimeoutTaskRetryIntervalInSec = appConf.getInt(AppConfig.JOB_TIMEOUT_TASK_RETRY_INTERVAL, 0);
   }
 
   public void registerJob(String id, String user, String callback,
@@ -90,102 +70,19 @@ public class LauncherDelegator extends TempletonDelegator {
     }
   }
 
-  /*
-   * Submit job request. If maximum concurrent job submit requests are configured then submit
-   * request will be executed on a thread from thread pool. If job submit request time out is
-   * configured then request execution thread will be interrupted if thread times out. Also
-   * does best efforts to identify if job is submitted and kill it quietly.
-   */
-  public EnqueueBean enqueueController(final String user, final Map<String, Object> userArgs,
-                     final String callback, final List<String> args)
-    throws NotAuthorizedException, BusyException, IOException, QueueException, TooManyRequestsException {
-
-    EnqueueBean bean = null;
-    final TempletonControllerJob controllerJob = getTempletonController();
-
-    if (jobRequest.isThreadPoolEnabled()) {
-      JobCallable<EnqueueBean> jobExecuteCallable = getJobSubmitTask(user, userArgs, callback,
-                                                                     args, controllerJob);
-      try {
-        bean = jobRequest.execute(jobExecuteCallable);
-      } catch (TimeoutException ex) {
-       /*
-        * Job request got timed out. Job kill should have started. Return to client with
-        * QueueException.
-        */
-        throw new QueueException(ex.getMessage());
-      } catch (InterruptedException ex) {
-       /*
-        * Job request got interrupted. Job kill should have started. Return to client with
-        * with QueueException.
-        */
-        throw new QueueException(ex.getMessage());
-      } catch (ExecutionException ex) {
-        /*
-         * ExecutionException is raised if job execution gets an exception. Return to client
-         * with the exception.
-         */
-        throw new QueueException(ex.getMessage());
-      }
-    } else {
-      LOG.info("No thread pool configured for submit job request. Executing "
-                      + "the job request in current thread.");
-
-      bean = enqueueJob(user, userArgs, callback, args, controllerJob);
-    }
-
-    return bean;
-  }
-
-  /*
-   * Job callable task for job submit operation. Overrides behavior of execute()
-   * to submit job. Also, overrides the behavior of cleanup() to kill the job in case
-   * job submission request is timed out or interrupted.
-   */
-  private JobCallable<EnqueueBean> getJobSubmitTask(final String user,
-                     final Map<String, Object> userArgs, final String callback,
-                     final List<String> args, final TempletonControllerJob controllerJob) {
-      return new JobCallable<EnqueueBean>() {
-        @Override
-        public EnqueueBean execute() throws NotAuthorizedException, BusyException, IOException,
-                                       QueueException {
-         /*
-          * Change the current thread name to include parent thread Id if it is executed
-          * in thread pool. Useful to extract logs specific to a job request and helpful
-          * to debug job issues.
-          */
-          Thread.currentThread().setName(String.format("%s-%s-%s", JOB_SUBMIT_EXECUTE_THREAD_PREFIX,
-                                       submitThreadId, Thread.currentThread().getId()));
-
-          return enqueueJob(user, userArgs, callback, args, controllerJob);
-        }
-
-        @Override
-        public void cleanup() {
-          /*
-           * Failed to set job status as COMPLETED which mean the main thread would have
-           * exited and not waiting for the result. Kill the submitted job.
-           */
-          LOG.info("Job kill not done by main thread. Trying to kill now.");
-          killTempletonJobWithRetry(user, controllerJob.getSubmittedId());
-        }
-      };
-  }
-
   /**
    * Enqueue the TempletonControllerJob directly calling doAs.
    */
-  public EnqueueBean enqueueJob(String user, Map<String, Object> userArgs, String callback,
-                     List<String> args, TempletonControllerJob controllerJob)
+  public EnqueueBean enqueueController(String user, Map<String, Object> userArgs, String callback,
+                     List<String> args)
     throws NotAuthorizedException, BusyException,
     IOException, QueueException {
-    UserGroupInformation ugi = null;
     try {
-      ugi = UgiFactory.getUgi(user);
+      UserGroupInformation ugi = UgiFactory.getUgi(user);
 
       final long startTime = System.nanoTime();
 
-      String id = queueAsUser(ugi, args, controllerJob);
+      String id = queueAsUser(ugi, args);
 
       long elapsed = ((System.nanoTime() - startTime) / ((int) 1e6));
       LOG.debug("queued job " + id + " in " + elapsed + " ms");
@@ -199,91 +96,24 @@ public class LauncherDelegator extends TempletonDelegator {
       return new EnqueueBean(id);
     } catch (InterruptedException e) {
       throw new QueueException("Unable to launch job " + e);
-    } finally {
-      if (ugi != null) {
-        FileSystem.closeAllForUGI(ugi);
-      }
     }
   }
 
-  private String queueAsUser(UserGroupInformation ugi, final List<String> args,
-                            final TempletonControllerJob controllerJob)
+  private String queueAsUser(UserGroupInformation ugi, final List<String> args)
     throws IOException, InterruptedException {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Launching job: " + args);
     }
     return ugi.doAs(new PrivilegedExceptionAction<String>() {
       public String run() throws Exception {
-        runTempletonControllerJob(controllerJob, args);
-        return controllerJob.getSubmittedId();
+        String[] array = new String[args.size()];
+        TempletonControllerJob ctrl = new TempletonControllerJob(secureMeatastoreAccess, appConf);
+        ToolRunner.run(ctrl, args.toArray(array));
+        return ctrl.getSubmittedId();
       }
     });
   }
 
-  /*
-   * Kills templeton job with multiple retries if job exists. Returns true if kill job
-   * attempt is success. Otherwise returns false.
-   */
-  private boolean killTempletonJobWithRetry(String user, String jobId) {
-    /*
-     * Make null safe Check if the job submission has gone through and if job is valid.
-     */
-    if (StringUtils.startsWith(jobId, "job_")) {
-      LOG.info("Started killing the job " + jobId);
-
-      boolean success = false;
-      int count = 0;
-      do {
-        try {
-          count++;
-          killJob(user, jobId);
-          success = true;
-          LOG.info("Kill job attempt succeeded.");
-         } catch (Exception e) {
-          LOG.info("Failed to kill the job due to exception: " + e.getMessage());
-          LOG.info("Waiting for " + jobTimeoutTaskRetryIntervalInSec + "s before retrying "
-                       + "the operation. Iteration: " + count);
-          try {
-            Thread.sleep(jobTimeoutTaskRetryIntervalInSec * 1000);
-          } catch (InterruptedException ex) {
-            LOG.info("Got interrupted while waiting for next retry.");
-          }
-        }
-      } while (!success && count < jobTimeoutTaskRetryCount);
-
-      return success;
-    } else {
-      LOG.info("Couldn't find a valid job id after job request is timed out.");
-      return false;
-    }
-  }
-
-  /*
-   * Gets new templeton controller objects.
-   */
-  protected TempletonControllerJob getTempletonController() {
-    return new TempletonControllerJob(secureMeatastoreAccess, appConf);
-  }
-
-  /*
-   * Runs the templeton controller job with 'args'. Utilizes ToolRunner to run
-   * the actual job.
-   */
-  protected int runTempletonControllerJob(TempletonControllerJob controllerJob, List<String> args)
-    throws IOException, InterruptedException, TimeoutException, Exception {
-    String[] array = new String[args.size()];
-    return ToolRunner.run(controllerJob, args.toArray(array));
-  }
-
-  /*
-   * Uses DeleteDelegator to kill a job and ignores all exceptions.
-   */
-  protected void killJob(String user, String jobId)
-  throws NotAuthorizedException, BadParam, IOException, InterruptedException {
-    DeleteDelegator d = new DeleteDelegator(appConf);
-    d.run(user, jobId);
-  }
-
   public List<String> makeLauncherArgs(AppConfig appConf, String statusdir,
                      String completedUrl,
                      List<String> copyFiles,
@@ -350,35 +180,24 @@ public class LauncherDelegator extends TempletonDelegator {
    */
   private String getShimLibjars() {
     WebHCatJTShim shim = null;
-    UserGroupInformation ugi = null;
     try {
-      ugi = UserGroupInformation.getCurrentUser();
-      shim = ShimLoader.getHadoopShims().getWebHCatShim(appConf, ugi);
-
-      // Besides the HiveShims jar which is Hadoop version dependent we also
-      // always need to include hive shims common jars.
-      Path shimCommonJar = new Path(
-          TempletonUtils.findContainingJar(ShimLoader.class, HIVE_SHIMS_FILENAME_PATTERN));
-      Path shimCommonSecureJar = new Path(
-          TempletonUtils.findContainingJar(HadoopShimsSecure.class, HIVE_SHIMS_FILENAME_PATTERN));
-      Path shimJar = new Path(
-          TempletonUtils.findContainingJar(shim.getClass(), HIVE_SHIMS_FILENAME_PATTERN));
-
-      return String.format(
-          "%s,%s,%s",
-          shimCommonJar.toString(), shimCommonSecureJar.toString(), shimJar.toString());
+      shim = ShimLoader.getHadoopShims().getWebHCatShim(appConf, UserGroupInformation.getCurrentUser());
     } catch (IOException e) {
-      throw new RuntimeException("Failed to get shimLibJars", e);
-    } finally {
-      try {
-        if (ugi != null) {
-          FileSystem.closeAllForUGI(ugi);
-        }
-      } catch (IOException e) {
-        throw new RuntimeException("Failed to closeAllForUGI", e);
-      }
+      throw new RuntimeException("Failed to get WebHCatShim", e);
     }
 
+    // Besides the HiveShims jar which is Hadoop version dependent we also
+    // always need to include hive shims common jars.
+    Path shimCommonJar = new Path(
+        TempletonUtils.findContainingJar(ShimLoader.class, HIVE_SHIMS_FILENAME_PATTERN));
+    Path shimCommonSecureJar = new Path(
+        TempletonUtils.findContainingJar(HadoopShimsSecure.class, HIVE_SHIMS_FILENAME_PATTERN));
+    Path shimJar = new Path(
+        TempletonUtils.findContainingJar(shim.getClass(), HIVE_SHIMS_FILENAME_PATTERN));
+
+    return String.format(
+        "%s,%s,%s",
+        shimCommonJar.toString(), shimCommonSecureJar.toString(), shimJar.toString());
   }
 
   // Storage vars
@@ -444,7 +263,7 @@ public class LauncherDelegator extends TempletonDelegator {
   }
   /**
    * This is called by subclasses when they determined that the sumbmitted job requires
-   * metastore access (e.g. Pig job that uses HCatalog).  This then determines if
+   * metastore access (e.g. Pig job that uses HCatalog).  This then determines if 
    * secure access is required and causes TempletonControllerJob to set up a delegation token.
    * @see TempletonControllerJob
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ListDelegator.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ListDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ListDelegator.java
index dfa59f8..a30ecd1 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ListDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ListDelegator.java
@@ -19,15 +19,9 @@
 package org.apache.hive.hcatalog.templeton;
 
 import java.io.IOException;
-import java.util.Collections;
 import java.util.List;
 import java.util.ArrayList;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeoutException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.shims.HadoopShims.WebHCatJTShim;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.mapred.JobStatus;
@@ -37,82 +31,20 @@ import org.apache.hadoop.security.UserGroupInformation;
  * List jobs owned by a user.
  */
 public class ListDelegator extends TempletonDelegator {
-  private static final Log LOG = LogFactory.getLog(ListDelegator.class);
-  private final String JOB_LIST_EXECUTE_THREAD_PREFIX = "JobListExecute";
-
-  /**
-   * Current thread id used to set in execution threads.
-   */
-  private final String listThreadId = Thread.currentThread().getName();
-
-  /*
-   * Job request executor to list job status requests.
-   */
-  private static JobRequestExecutor<List<JobItemBean>> jobRequest =
-                   new JobRequestExecutor<List<JobItemBean>>(JobRequestExecutor.JobRequestType.List,
-                   AppConfig.JOB_LIST_MAX_THREADS, AppConfig.JOB_LIST_TIMEOUT);
-
   public ListDelegator(AppConfig appConf) {
     super(appConf);
   }
 
-  /*
-   * List status jobs request. If maximum concurrent job list requests are configured then
-   * list request will be executed on a thread from thread pool. If job list request time out
-   * is configured then request execution thread will be interrupted if thread times out and
-   * does no action.
-   */
-  public List<JobItemBean> run(final String user, final boolean showall, final String jobId,
-                               final int numRecords, final boolean showDetails)
-    throws NotAuthorizedException, BadParam, IOException, InterruptedException, BusyException,
-           TimeoutException, ExecutionException, TooManyRequestsException {
-
-    if (jobRequest.isThreadPoolEnabled()) {
-      return jobRequest.execute(getJobListTask(user, showall, jobId,numRecords, showDetails));
-    } else {
-      return listJobs(user, showall, jobId, numRecords, showDetails);
-    }
-  }
-
-  /*
-   * Job callable task for job list operation. Overrides behavior of execute() to list jobs.
-   * No need to override behavior of cleanup() as there is nothing to be done if list jobs
-   * operation is timed out or interrupted.
-   */
-  private JobCallable<List<JobItemBean>> getJobListTask(final String user, final boolean showall,
-                      final String jobId, final int numRecords, final boolean showDetails) {
-    return new JobCallable<List<JobItemBean>>() {
-      @Override
-      public List<JobItemBean> execute() throws NotAuthorizedException, BadParam, IOException,
-                                             InterruptedException {
-       /*
-        * Change the current thread name to include parent thread Id if it is executed
-        * in thread pool. Useful to extract logs specific to a job request and helpful
-        * to debug job issues.
-        */
-        Thread.currentThread().setName(String.format("%s-%s-%s", JOB_LIST_EXECUTE_THREAD_PREFIX,
-                                       listThreadId, Thread.currentThread().getId()));
-
-        return listJobs(user, showall, jobId, numRecords, showDetails);
-      }
-    };
-  }
-
-  /*
-   * Gets list of job ids and calls getJobStatus to get status for each job id.
-   */
-  public List<JobItemBean> listJobs(String user, boolean showall, String jobId,
-                                    int numRecords, boolean showDetails)
+  public List<String> run(String user, boolean showall)
     throws NotAuthorizedException, BadParam, IOException, InterruptedException {
 
-    UserGroupInformation ugi = null;
+    UserGroupInformation ugi = UgiFactory.getUgi(user);
     WebHCatJTShim tracker = null;
-    ArrayList<String> ids = new ArrayList<String>();
-
     try {
-      ugi = UgiFactory.getUgi(user);
       tracker = ShimLoader.getHadoopShims().getWebHCatShim(appConf, ugi);
 
+      ArrayList<String> ids = new ArrayList<String>();
+
       JobStatus[] jobs = tracker.getAllJobs();
 
       if (jobs != null) {
@@ -122,81 +54,13 @@ public class ListDelegator extends TempletonDelegator {
             ids.add(id);
         }
       }
+
+      return ids;
     } catch (IllegalStateException e) {
       throw new BadParam(e.getMessage());
     } finally {
       if (tracker != null)
         tracker.close();
-      if (ugi != null)
-        FileSystem.closeAllForUGI(ugi);
     }
-
-    return getJobStatus(ids, user, showall, jobId, numRecords, showDetails);
-  }
-
-  /*
-   * Returns job status for list of input jobs as a list.
-   */
-  public List<JobItemBean> getJobStatus(ArrayList<String> jobIds, String user, boolean showall,
-                                       String jobId, int numRecords, boolean showDetails)
-                                       throws IOException, InterruptedException {
-
-    List<JobItemBean> detailList = new ArrayList<JobItemBean>();
-    int currRecord = 0;
-
-    // Sort the list as requested
-    boolean isAscendingOrder = true;
-    switch (appConf.getListJobsOrder()) {
-    case lexicographicaldesc:
-      Collections.sort(jobIds, Collections.reverseOrder());
-      isAscendingOrder = false;
-      break;
-    case lexicographicalasc:
-    default:
-      Collections.sort(jobIds);
-      break;
-    }
-
-    for (String job : jobIds) {
-      // If numRecords = -1, fetch all records.
-      // Hence skip all the below checks when numRecords = -1.
-      if (numRecords != -1) {
-        // If currRecord >= numRecords, we have already fetched the top #numRecords
-        if (currRecord >= numRecords) {
-          break;
-        }
-        else if (jobId == null || jobId.trim().length() == 0) {
-            currRecord++;
-        }
-        // If the current record needs to be returned based on the
-        // filter conditions specified by the user, increment the counter
-        else if (isAscendingOrder && job.compareTo(jobId) > 0 || !isAscendingOrder && job.compareTo(jobId) < 0) {
-          currRecord++;
-        }
-        // The current record should not be included in the output detailList.
-        else {
-          continue;
-        }
-      }
-      JobItemBean jobItem = new JobItemBean();
-      jobItem.id = job;
-      if (showDetails) {
-        StatusDelegator sd = new StatusDelegator(appConf);
-        try {
-          jobItem.detail = sd.run(user, job, false);
-        }
-        catch(Exception ex) {
-          /*
-           * if we could not get status for some reason, log it, and send empty status back with
-           * just the ID so that caller knows to even look in the log file
-           */
-          LOG.info("Failed to get status detail for jobId='" + job + "'", ex);
-          jobItem.detail = new QueueStatusBean(job, "Failed to retrieve status; see WebHCat logs");
-        }
-      }
-      detailList.add(jobItem);
-    }
-
-    return detailList;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Main.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Main.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Main.java
index 3ed3ece..5208bf4 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Main.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Main.java
@@ -25,7 +25,6 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.EnumSet;
 import java.util.HashMap;
 
 import org.slf4j.Logger;
@@ -44,15 +43,14 @@ import org.eclipse.jetty.rewrite.handler.RedirectPatternRule;
 import org.eclipse.jetty.rewrite.handler.RewriteHandler;
 import org.eclipse.jetty.server.Handler;
 import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.server.handler.HandlerList;
 import org.eclipse.jetty.servlet.FilterHolder;
+import org.eclipse.jetty.servlet.FilterMapping;
 import org.eclipse.jetty.servlet.ServletContextHandler;
 import org.eclipse.jetty.servlet.ServletHolder;
 import org.eclipse.jetty.xml.XmlConfiguration;
 import org.slf4j.bridge.SLF4JBridgeHandler;
 
-import javax.servlet.DispatcherType;
 import javax.servlet.http.HttpServletRequest;
 
 /**
@@ -124,7 +122,7 @@ public class Main {
       checkEnv();
       runServer(port);
       // Currently only print the first port to be consistent with old behavior
-      port =  ArrayUtils.isEmpty(server.getConnectors()) ? -1 : ((ServerConnector)(server.getConnectors()[0])).getLocalPort();
+      port =  ArrayUtils.isEmpty(server.getConnectors()) ? -1 : server.getConnectors()[0].getPort();
 
       System.out.println("templeton: listening on port " + port);
       LOG.info("Templeton listening on port " + port);
@@ -187,7 +185,6 @@ public class Main {
 
     // Add the Auth filter
     FilterHolder fHolder = makeAuthFilter();
-    EnumSet<DispatcherType> dispatches = EnumSet.of(DispatcherType.REQUEST);
 
     /* 
      * We add filters for each of the URIs supported by templeton.
@@ -196,18 +193,28 @@ public class Main {
      * This is because mapreduce does not use secure credentials for 
      * callbacks. So jetty would fail the request as unauthorized.
      */ 
-    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/ddl/*", dispatches);
-    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/pig/*", dispatches);
-    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/hive/*", dispatches);
-    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/sqoop/*", dispatches);
-    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/queue/*", dispatches);
-    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/jobs/*", dispatches);
-    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/mapreduce/*", dispatches);
-    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/status/*", dispatches);
-    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/version/*", dispatches);
+    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/ddl/*", 
+             FilterMapping.REQUEST);
+    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/pig/*", 
+             FilterMapping.REQUEST);
+    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/hive/*", 
+             FilterMapping.REQUEST);
+    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/sqoop/*",
+             FilterMapping.REQUEST);
+    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/queue/*", 
+             FilterMapping.REQUEST);
+    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/jobs/*",
+             FilterMapping.REQUEST);
+    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/mapreduce/*", 
+             FilterMapping.REQUEST);
+    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/status/*", 
+             FilterMapping.REQUEST);
+    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/version/*", 
+             FilterMapping.REQUEST);
 
     if (conf.getBoolean(AppConfig.XSRF_FILTER_ENABLED, false)){
-      root.addFilter(makeXSRFFilter(), "/" + SERVLET_PATH + "/*", dispatches);
+      root.addFilter(makeXSRFFilter(), "/" + SERVLET_PATH + "/*",
+             FilterMapping.REQUEST);
       LOG.debug("XSRF filter enabled");
     } else {
       LOG.warn("XSRF filter disabled");


[44/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/data/files/tpcds-perf/metastore_export/csv/TAB_COL_STATS.txt
----------------------------------------------------------------------
diff --git a/data/files/tpcds-perf/metastore_export/csv/TAB_COL_STATS.txt b/data/files/tpcds-perf/metastore_export/csv/TAB_COL_STATS.txt
new file mode 100644
index 0000000..2b0dd01
--- /dev/null
+++ b/data/files/tpcds-perf/metastore_export/csv/TAB_COL_STATS.txt
@@ -0,0 +1,425 @@
+default,store,s_store_sk,int,1,1704,,,,,1507,0,,,,,1434571564,6196,_store_
+default,store,s_store_id,string,,,,,,,822,0,16,16,,,1434571564,6197,_store_
+default,store,s_rec_start_date,string,,,,,,,5,0,9.94131455399061,10,,,1434571564,6198,_store_
+default,store,s_rec_end_date,string,,,,,,,4,0,5,10,,,1434571564,6199,_store_
+default,store,s_closed_date_sk,int,2450820,2451314,,,,,214,1211,,,,,1434571564,6200,_store_
+default,store,s_store_name,string,,,,,,,11,0,3.91138497652582,5,,,1434571564,6201,_store_
+default,store,s_number_employees,int,200,300,,,,,63,13,,,,,1434571564,6202,_store_
+default,store,s_floor_space,int,5000201,9997773,,,,,1323,12,,,,,1434571564,6203,_store_
+default,store,s_hours,string,,,,,,,4,0,7.07746478873239,8,,,1434571564,6204,_store_
+default,store,s_manager,string,,,,,,,1213,0,12.6584507042254,21,,,1434571564,6205,_store_
+default,store,s_market_id,int,1,10,,,,,11,15,,,,,1434571564,6206,_store_
+default,store,s_geography_class,string,,,,,,,2,0,6.94659624413146,7,,,1434571564,6207,_store_
+default,store,s_market_desc,string,,,,,,,1382,0,57.7194835680751,100,,,1434571564,6208,_store_
+default,store,s_market_manager,string,,,,,,,896,0,12.781103286385,20,,,1434571564,6209,_store_
+default,store,s_division_id,int,1,1,,,,,1,10,,,,,1434571564,6210,_store_
+default,store,s_division_name,string,,,,,,,2,0,6.95070422535211,7,,,1434571564,6211,_store_
+default,store,s_company_id,int,1,1,,,,,1,13,,,,,1434571564,6212,_store_
+default,store,s_company_name,string,,,,,,,2,0,6.95070422535211,7,,,1434571564,6213,_store_
+default,store,s_street_number,string,,,,,,,633,0,2.87793427230047,4,,,1434571564,6214,_store_
+default,store,s_street_name,string,,,,,,,1020,0,8.58509389671362,19,,,1434571564,6215,_store_
+default,store,s_street_type,string,,,,,,,23,0,4.19307511737089,9,,,1434571564,6216,_store_
+default,store,s_suite_number,string,,,,,,,86,0,7.83978873239437,9,,,1434571564,6217,_store_
+default,store,s_city,string,,,,,,,244,0,8.53462441314554,15,,,1434571564,6218,_store_
+default,store,s_county,string,,,,,,,122,0,13.9237089201878,22,,,1434571564,6219,_store_
+default,store,s_state,string,,,,,,,49,0,1.98591549295775,2,,,1434571564,6220,_store_
+default,store,s_zip,string,,,,,,,936,0,4.96478873239437,5,,,1434571564,6221,_store_
+default,store,s_country,string,,,,,,,2,0,12.931338028169,13,,,1434571564,6222,_store_
+default,store,s_gmt_offset,float,,,-9,-5,,,5,9,,,,,1434571564,6223,_store_
+default,store,s_tax_precentage,float,,,0,0.109999999403954,,,15,12,,,,,1434571564,6224,_store_
+default,call_center,cc_call_center_sk,int,1,60,,,,,37,0,,,,,1434571573,6225,_call_center_
+default,call_center,cc_call_center_id,string,,,,,,,30,0,16,16,,,1434571573,6226,_call_center_
+default,call_center,cc_rec_start_date,string,,,,,,,4,0,10,10,,,1434571573,6227,_call_center_
+default,call_center,cc_rec_end_date,string,,,,,,,4,0,5,10,,,1434571573,6228,_call_center_
+default,call_center,cc_closed_date_sk,int,,,,,,,1,60,,,,,1434571573,6229,_call_center_
+default,call_center,cc_open_date_sk,int,2450794,2451146,,,,,21,0,,,,,1434571573,6230,_call_center_
+default,call_center,cc_name,string,,,,,,,29,0,13.85,19,,,1434571573,6231,_call_center_
+default,call_center,cc_class,string,,,,,,,3,0,5.43333333333333,6,,,1434571573,6232,_call_center_
+default,call_center,cc_employees,int,5412266,1963174023,,,,,39,0,,,,,1434571573,6233,_call_center_
+default,call_center,cc_sq_ft,int,-2108783316,2044891959,,,,,56,0,,,,,1434571573,6234,_call_center_
+default,call_center,cc_hours,string,,,,,,,3,0,7.1,8,,,1434571573,6235,_call_center_
+default,call_center,cc_manager,string,,,,,,,47,0,12.2333333333333,17,,,1434571573,6236,_call_center_
+default,call_center,cc_mkt_id,int,1,6,,,,,5,0,,,,,1434571573,6237,_call_center_
+default,call_center,cc_mkt_class,string,,,,,,,43,0,34.7,50,,,1434571573,6238,_call_center_
+default,call_center,cc_mkt_desc,string,,,,,,,41,0,61.8666666666667,100,,,1434571573,6239,_call_center_
+default,call_center,cc_market_manager,string,,,,,,,45,0,12.3833333333333,17,,,1434571573,6240,_call_center_
+default,call_center,cc_division,int,1,6,,,,,5,0,,,,,1434571573,6241,_call_center_
+default,call_center,cc_division_name,string,,,,,,,7,0,3.96666666666667,5,,,1434571573,6242,_call_center_
+default,call_center,cc_company,int,1,6,,,,,5,0,,,,,1434571573,6243,_call_center_
+default,call_center,cc_company_name,string,,,,,,,7,0,3.8,5,,,1434571573,6244,_call_center_
+default,call_center,cc_street_number,string,,,,,,,31,0,2.9,3,,,1434571573,6245,_call_center_
+default,call_center,cc_street_name,string,,,,,,,28,0,8.16666666666667,16,,,1434571573,6246,_call_center_
+default,call_center,cc_street_type,string,,,,,,,14,0,4.28333333333333,9,,,1434571573,6247,_call_center_
+default,call_center,cc_suite_number,string,,,,,,,26,0,7.63333333333333,9,,,1434571573,6248,_call_center_
+default,call_center,cc_city,string,,,,,,,33,0,9.01666666666667,15,,,1434571573,6249,_call_center_
+default,call_center,cc_county,string,,,,,,,31,0,14.1166666666667,21,,,1434571573,6250,_call_center_
+default,call_center,cc_state,string,,,,,,,18,0,2,2,,,1434571573,6251,_call_center_
+default,call_center,cc_zip,string,,,,,,,20,0,5,5,,,1434571573,6252,_call_center_
+default,call_center,cc_country,string,,,,,,,1,0,13,13,,,1434571573,6253,_call_center_
+default,call_center,cc_gmt_offset,float,,,-8,-5,,,3,0,,,,,1434571573,6254,_call_center_
+default,call_center,cc_tax_percentage,float,,,0,0.119999997317791,,,17,0,,,,,1434571573,6255,_call_center_
+default,catalog_page,cp_catalog_page_sk,int,1,46000,,,,,62562,0,,,,,1434571586,6256,_catalog_page_
+default,catalog_page,cp_catalog_page_id,string,,,,,,,38846,0,16,16,,,1434571586,6257,_catalog_page_
+default,catalog_page,cp_start_date_sk,int,2450815,2453005,,,,,112,444,,,,,1434571586,6258,_catalog_page_
+default,catalog_page,cp_end_date_sk,int,2450844,2453186,,,,,86,446,,,,,1434571586,6259,_catalog_page_
+default,catalog_page,cp_department,string,,,,,,,2,0,9.90826086956522,10,,,1434571586,6260,_catalog_page_
+default,catalog_page,cp_catalog_number,int,1,109,,,,,66,465,,,,,1434571586,6261,_catalog_page_
+default,catalog_page,cp_catalog_page_number,int,1,425,,,,,224,443,,,,,1434571586,6262,_catalog_page_
+default,catalog_page,cp_description,string,,,,,,,48242,0,73.8601956521739,99,,,1434571586,6263,_catalog_page_
+default,catalog_page,cp_type,string,,,,,,,5,0,7.59682608695652,9,,,1434571586,6264,_catalog_page_
+default,customer,c_customer_sk,int,1,80000000,,,,,72955234,0,,,,,1434571680,6265,_customer_
+default,customer,c_customer_id,string,,,,,,,66900244,0,16,16,,,1434571680,6266,_customer_
+default,customer,c_current_cdemo_sk,int,1,_time_dim_800,,,,,1835839,2798616,,,,,1434571680,6267,_customer_
+default,customer,c_current_hdemo_sk,int,1,7200,,,,,9299,2799006,,,,,1434571680,6268,_customer_
+default,customer,c_current_addr_sk,int,1,40000000,,,,,45300013,0,,,,,1434571680,6269,_customer_
+default,customer,c_first_shipto_date_sk,int,2449028,2452678,,,,,3585,2800717,,,,,1434571680,6270,_customer_
+default,customer,c_first_sales_date_sk,int,2448998,2452648,,,,,3585,2799514,,,,,1434571680,6271,_customer_
+default,customer,c_salutation,string,,,,,,,9,0,3.1285808875,4,,,1434571680,6272,_customer_
+default,customer,c_first_name,string,,,,,,,5529,0,5.6321494125,11,,,1434571680,6273,_customer_
+default,customer,c_last_name,string,,,,,,,7820,0,5.9160140875,13,,,1434571680,6274,_customer_
+default,customer,c_preferred_cust_flag,string,,,,,,,3,0,0.96499295,1,,,1434571680,6275,_customer_
+default,customer,c_birth_day,int,1,31,,,,,23,2799325,,,,,1434571680,6276,_customer_
+default,customer,c_birth_month,int,1,12,,,,,13,2800192,,,,,1434571680,6277,_customer_
+default,customer,c_birth_year,int,1924,1992,,,,,49,2798667,,,,,1434571680,6278,_customer_
+default,customer,c_birth_country,string,,,,,,,214,0,8.3879500875,20,,,1434571680,6279,_customer_
+default,customer,c_login,string,,,,,,,2,0,0,0,,,1434571680,6280,_customer_
+default,customer,c_email_address,string,,,,,,,112512351,0,26.5065306,48,,,1434571680,6281,_customer_
+default,customer,c_last_review_date,string,,,,,,,224,0,6.7549565125,7,,,1434571680,6282,_customer_
+default,customer_address,ca_address_sk,int,1,40000000,,,,,53871098,0,,,,,1434571715,6283,_customer_address_
+default,customer_address,ca_address_id,string,,,,,,,47305616,0,16,16,,,1434571715,6284,_customer_address_
+default,customer_address,ca_street_number,string,,,,,,,822,0,2.806232675,4,,,1434571715,6285,_customer_address_
+default,customer_address,ca_street_name,string,,,,,,,7488,0,8.44949205,21,,,1434571715,6286,_customer_address_
+default,customer_address,ca_street_type,string,,,,,,,23,0,4.073537425,9,,,1434571715,6287,_customer_address_
+default,customer_address,ca_suite_number,string,,,,,,,86,0,7.653062625,9,,,1434571715,6288,_customer_address_
+default,customer_address,ca_city,string,,,,,,,896,0,8.684372925,20,,,1434571715,6289,_customer_address_
+default,customer_address,ca_county,string,,,,,,,1716,0,13.543959525,28,,,1434571715,6290,_customer_address_
+default,customer_address,ca_state,string,,,,,,,51,0,1.9400121,2,,,1434571715,6291,_customer_address_
+default,customer_address,ca_zip,string,,,,,,,10141,0,4.849962,5,,,1434571715,6292,_customer_address_
+default,customer_address,ca_country,string,,,,,,,2,0,12.610234325,13,,,1434571715,6293,_customer_address_
+default,customer_address,ca_gmt_offset,float,,,-10,-5,,,5,1201179,,,,,1434571715,6294,_customer_address_
+default,customer_address,ca_location_type,string,,,,,,,4,0,8.729921525,13,,,1434571715,6295,_customer_address_
+default,customer_demographics,cd_demo_sk,int,1,1920800,,,,,1835839,0,,,,,1434571729,6296,_customer_demographics_
+default,customer_demographics,cd_gender,string,,,,,,,2,0,1,1,,,1434571729,6297,_customer_demographics_
+default,customer_demographics,cd_marital_status,string,,,,,,,7,0,1,1,,,1434571729,6298,_customer_demographics_
+default,customer_demographics,cd_education_status,string,,,,,,,9,0,9.57142857142857,15,,,1434571729,6299,_customer_demographics_
+default,customer_demographics,cd_purchase_estimate,int,500,10000,,,,,24,0,,,,,1434571729,6300,_customer_demographics_
+default,customer_demographics,cd_credit_rating,string,,,,,,,5,0,7,9,,,1434571729,6301,_customer_demographics_
+default,customer_demographics,cd_dep_count,int,0,6,,,,,5,0,,,,,1434571729,6302,_customer_demographics_
+default,customer_demographics,cd_dep_employed_count,int,0,6,,,,,5,0,,,,,1434571729,6303,_customer_demographics_
+default,customer_demographics,cd_dep_college_count,int,0,6,,,,,5,0,,,,,1434571729,6304,_customer_demographics_
+default,date_dim,d_date_sk,int,2415022,2488070,,,,,65332,0,,,,,1434571736,6305,_date_dim_
+default,date_dim,d_date_id,string,,,,,,,109875,0,16,16,,,1434571736,6306,_date_dim_
+default,date_dim,d_date,string,,,,,,,92393,0,10,10,,,1434571736,6307,_date_dim_
+default,date_dim,d_month_seq,int,0,2400,,,,,2764,0,,,,,1434571736,6308,_date_dim_
+default,date_dim,d_week_seq,int,1,10436,,,,,13152,0,,,,,1434571736,6309,_date_dim_
+default,date_dim,d_quarter_seq,int,1,801,,,,,429,0,,,,,1434571736,6310,_date_dim_
+default,date_dim,d_year,int,1900,2100,,,,,112,0,,,,,1434571736,6311,_date_dim_
+default,date_dim,d_dow,int,0,6,,,,,5,0,,,,,1434571736,6312,_date_dim_
+default,date_dim,d_moy,int,1,12,,,,,13,0,,,,,1434571736,6313,_date_dim_
+default,date_dim,d_dom,int,1,31,,,,,23,0,,,,,1434571736,6314,_date_dim_
+default,date_dim,d_qoy,int,1,4,,,,,5,0,,,,,1434571736,6315,_date_dim_
+default,date_dim,d_fy_year,int,1900,2100,,,,,112,0,,,,,1434571736,6316,_date_dim_
+default,date_dim,d_fy_quarter_seq,int,1,801,,,,,429,0,,,,,1434571736,6317,_date_dim_
+default,date_dim,d_fy_week_seq,int,1,10436,,,,,13152,0,,,,,1434571736,6318,_date_dim_
+default,date_dim,d_day_name,string,,,,,,,9,0,7.14286300976057,9,,,1434571736,6319,_date_dim_
+default,date_dim,d_quarter_name,string,,,,,,,721,0,6,6,,,1434571736,6320,_date_dim_
+default,date_dim,d_holiday,string,,,,,,,2,0,1,1,,,1434571736,6321,_date_dim_
+default,date_dim,d_weekend,string,,,,,,,2,0,1,1,,,1434571736,6322,_date_dim_
+default,date_dim,d_following_holiday,string,,,,,,,2,0,1,1,,,1434571736,6323,_date_dim_
+default,date_dim,d_first_dom,int,2415021,2488070,,,,,2226,0,,,,,1434571736,6324,_date_dim_
+default,date_dim,d_last_dom,int,2415020,2488372,,,,,2535,0,,,,,1434571736,6325,_date_dim_
+default,date_dim,d_same_day_ly,int,2414657,2487705,,,,,65332,0,,,,,1434571736,6326,_date_dim_
+default,date_dim,d_same_day_lq,int,2414930,2487978,,,,,65332,0,,,,,1434571736,6327,_date_dim_
+default,date_dim,d_current_day,string,,,,,,,1,0,1,1,,,1434571736,6328,_date_dim_
+default,date_dim,d_current_week,string,,,,,,,1,0,1,1,,,1434571736,6329,_date_dim_
+default,date_dim,d_current_month,string,,,,,,,2,0,1,1,,,1434571736,6330,_date_dim_
+default,date_dim,d_current_quarter,string,,,,,,,2,0,1,1,,,1434571736,6331,_date_dim_
+default,date_dim,d_current_year,string,,,,,,,2,0,1,1,,,1434571736,6332,_date_dim_
+default,household_demographics,hd_demo_sk,int,1,7200,,,,,9299,0,,,,,1434571741,6333,_household_demographics_
+default,household_demographics,hd_income_band_sk,int,1,20,,,,,18,0,,,,,1434571741,6334,_household_demographics_
+default,household_demographics,hd_buy_potential,string,,,,,,,5,0,7.5,10,,,1434571741,6335,_household_demographics_
+default,household_demographics,hd_dep_count,int,0,9,,,,,11,0,,,,,1434571741,6336,_household_demographics_
+default,household_demographics,hd_vehicle_count,int,-1,4,,,,,6,0,,,,,1434571741,6337,_household_demographics_
+default,income_band,ib_income_band_sk,int,1,20,,,,,18,0,,,,,1434571745,6338,_income_band_
+default,income_band,ib_lower_bound,int,0,190001,,,,,21,0,,,,,1434571745,6339,_income_band_
+default,income_band,ib_upper_bound,int,10000,200000,,,,,18,0,,,,,1434571745,6340,_income_band_
+default,item,i_item_sk,int,1,462000,,,,,439501,0,,,,,1434571764,6341,_item_
+default,item,i_item_id,string,,,,,,,310774,0,16,16,,,1434571764,6342,_item_
+default,item,i_rec_start_date,string,,,,,,,5,0,9.97480519480519,10,,,1434571764,6343,_item_
+default,item,i_rec_end_date,string,,,,,,,5,0,5,10,,,1434571764,6344,_item_
+default,item,i_item_desc,string,,,,,,,338901,0,100.203757575758,200,,,1434571764,6345,_item_
+default,item,i_current_price,float,,,0.0900000035762787,99.9899978637695,,,12060,1167,,,,,1434571764,6346,_item_
+default,item,i_wholesale_cost,float,,,0.0199999995529652,89.7399978637695,,,7820,1120,,,,,1434571764,6347,_item_
+default,item,i_brand_id,int,1001001,10016017,,,,,633,1139,,,,,1434571764,6348,_item_
+default,item,i_brand,string,,,,,,,633,0,16.1186558441558,22,,,1434571764,6349,_item_
+default,item,i_class_id,int,1,16,,,,,15,1117,,,,,1434571764,6350,_item_
+default,item,i_class,string,,,,,,,102,0,7.76902164502164,15,,,1434571764,6351,_item_
+default,item,i_category_id,int,1,10,,,,,11,1141,,,,,1434571764,6352,_item_
+default,item,i_category,string,,,,,,,10,0,5.88895454545454,11,,,1434571764,6353,_item_
+default,item,i_manufact_id,int,1,1000,,,,,691,1152,,,,,1434571764,6354,_item_
+default,item,i_manufact,string,,,,,,,1267,0,11.2630519480519,15,,,1434571764,6355,_item_
+default,item,i_size,string,,,,,,,9,0,4.32115800865801,11,,,1434571764,6356,_item_
+default,item,i_formulation,string,,,,,,,310774,0,19.9492207792208,20,,,1434571764,6357,_item_
+default,item,i_color,string,,,,,,,66,0,5.36743939393939,10,,,1434571764,6358,_item_
+default,item,i_units,string,,,,,,,18,0,4.17825541125541,7,,,1434571764,6359,_item_
+default,item,i_container,string,,,,,,,2,0,6.9825,7,,,1434571764,6360,_item_
+default,item,i_manager_id,int,1,100,,,,,63,1124,,,,,1434571764,6361,_item_
+default,item,i_product_name,string,,,,,,,522658,0,22.8322532467532,30,,,1434571764,6362,_item_
+default,promotion,p_promo_sk,int,1,2300,,,,,2764,0,,,,,1434571768,6363,_promotion_
+default,promotion,p_promo_id,string,,,,,,,2427,0,16,16,,,1434571768,6364,_promotion_
+default,promotion,p_start_date_sk,int,2450096,2450915,,,,,822,30,,,,,1434571768,6365,_promotion_
+default,promotion,p_end_date_sk,int,2450102,2450970,,,,,1066,26,,,,,1434571768,6366,_promotion_
+default,promotion,p_item_sk,int,614,461932,,,,,2132,28,,,,,1434571768,6367,_promotion_
+default,promotion,p_cost,float,,,1000,1000,,,1,28,,,,,1434571768,6368,_promotion_
+default,promotion,p_response_target,int,1,1,,,,,1,33,,,,,1434571768,6369,_promotion_
+default,promotion,p_promo_name,string,,,,,,,11,0,3.94695652173913,5,,,1434571768,6370,_promotion_
+default,promotion,p_channel_dmail,string,,,,,,,3,0,0.989565217391304,1,,,1434571768,6371,_promotion_
+default,promotion,p_channel_email,string,,,,,,,2,0,0.988260869565217,1,,,1434571768,6372,_promotion_
+default,promotion,p_channel_catalog,string,,,,,,,2,0,0.989565217391304,1,,,1434571768,6373,_promotion_
+default,promotion,p_channel_tv,string,,,,,,,2,0,0.988260869565217,1,,,1434571768,6374,_promotion_
+default,promotion,p_channel_radio,string,,,,,,,2,0,0.987826086956522,1,,,1434571768,6375,_promotion_
+default,promotion,p_channel_press,string,,,,,,,2,0,0.988695652173913,1,,,1434571768,6376,_promotion_
+default,promotion,p_channel_event,string,,,,,,,2,0,0.989565217391304,1,,,1434571768,6377,_promotion_
+default,promotion,p_channel_demo,string,,,,,,,2,0,0.988260869565217,1,,,1434571768,6378,_promotion_
+default,promotion,p_channel_details,string,,,,,,,2041,0,39.2652173913044,60,,,1434571768,6379,_promotion_
+default,promotion,p_purpose,string,,,,,,,2,0,6.92391304347826,7,,,1434571768,6380,_promotion_
+default,promotion,p_discount_active,string,,,,,,,2,0,0.986521739130435,1,,,1434571768,6381,_promotion_
+default,reason,r_reason_sk,int,1,72,,,,,47,0,,,,,1434571773,6382,_reason_
+default,reason,r_reason_id,string,,,,,,,61,0,16,16,,,1434571773,6383,_reason_
+default,reason,r_reason_desc,string,,,,,,,112,0,12.6527777777778,43,,,1434571773,6384,_reason_
+default,ship_mode,sm_ship_mode_sk,int,,,,,,,0,0,,,,,1434571776,6385,_ship_mode_
+default,ship_mode,sm_ship_mode_id,string,,,,,,,0,0,0,0,,,1434571776,6386,_ship_mode_
+default,ship_mode,sm_type,string,,,,,,,0,0,0,0,,,1434571776,6387,_ship_mode_
+default,ship_mode,sm_code,string,,,,,,,0,0,0,0,,,1434571776,6388,_ship_mode_
+default,ship_mode,sm_carrier,string,,,,,,,0,0,0,0,,,1434571776,6389,_ship_mode_
+default,ship_mode,sm_contract,string,,,,,,,0,0,0,0,,,1434571776,6390,_ship_mode_
+default,time_dim,t_time_sk,int,0,86399,,,,,125124,0,,,,,1434571782,6391,_time_dim_
+default,time_dim,t_time_id,string,,,,,,,71245,0,16,16,,,1434571782,6392,_time_dim_
+default,time_dim,t_time,int,0,86399,,,,,125124,0,,,,,1434571782,6393,_time_dim_
+default,time_dim,t_hour,int,0,23,,,,,19,0,,,,,1434571782,6394,_time_dim_
+default,time_dim,t_minute,int,0,59,,,,,37,0,,,,,1434571782,6395,_time_dim_
+default,time_dim,t_second,int,0,59,,,,,37,0,,,,,1434571782,6396,_time_dim_
+default,time_dim,t_am_pm,string,,,,,,,2,0,2,2,,,1434571782,6397,_time_dim_
+default,time_dim,t_shift,string,,,,,,,3,0,5.33333333333333,6,,,1434571782,6398,_time_dim_
+default,time_dim,t_sub_shift,string,,,,,,,5,0,6.91666666666667,9,,,1434571782,6399,_time_dim_
+default,time_dim,t_meal_time,string,,,,,,,4,0,2.875,9,,,1434571782,6400,_time_dim_
+default,warehouse,w_warehouse_sk,int,1,27,,,,,22,0,,,,,1434571790,6401,_warehouse_
+default,warehouse,w_warehouse_id,string,,,,,,,22,0,16,16,,,1434571790,6402,_warehouse_
+default,warehouse,w_warehouse_name,string,,,,,,,21,0,15.5555555555556,20,,,1434571790,6403,_warehouse_
+default,warehouse,w_warehouse_sq_ft,int,73065,977787,,,,,37,1,,,,,1434571790,6404,_warehouse_
+default,warehouse,w_street_number,string,,,,,,,28,0,2.77777777777778,3,,,1434571790,6405,_warehouse_
+default,warehouse,w_street_name,string,,,,,,,23,0,8.40740740740741,15,,,1434571790,6406,_warehouse_
+default,warehouse,w_street_type,string,,,,,,,18,0,3.92592592592593,9,,,1434571790,6407,_warehouse_
+default,warehouse,w_suite_number,string,,,,,,,21,0,7.44444444444444,9,,,1434571790,6408,_warehouse_
+default,warehouse,w_city,string,,,,,,,19,0,9.14814814814815,15,,,1434571790,6409,_warehouse_
+default,warehouse,w_county,string,,,,,,,15,0,14.4074074074074,17,,,1434571790,6410,_warehouse_
+default,warehouse,w_state,string,,,,,,,15,0,2,2,,,1434571790,6411,_warehouse_
+default,warehouse,w_zip,string,,,,,,,17,0,5,5,,,1434571790,6412,_warehouse_
+default,warehouse,w_country,string,,,,,,,1,0,13,13,,,1434571790,6413,_warehouse_
+default,warehouse,w_gmt_offset,float,,,-8,-5,,,3,1,,,,,1434571790,6414,_warehouse_
+default,web_page,wp_web_page_sk,int,1,4602,,,,,5529,0,,,,,1434571794,6415,_web_page_
+default,web_page,wp_web_page_id,string,,,,,,,2647,0,16,16,,,1434571794,6416,_web_page_
+default,web_page,wp_rec_start_date,string,,,,,,,4,0,9.88700564971751,10,,,1434571794,6417,_web_page_
+default,web_page,wp_rec_end_date,string,,,,,,,4,0,5,10,,,1434571794,6418,_web_page_
+default,web_page,wp_creation_date_sk,int,2450492,2450815,,,,,234,64,,,,,1434571794,6419,_web_page_
+default,web_page,wp_access_date_sk,int,2452548,2452648,,,,,63,58,,,,,1434571794,6420,_web_page_
+default,web_page,wp_autogen_flag,string,,,,,,,3,0,0.984789222077358,1,,,1434571794,6421,_web_page_
+default,web_page,wp_customer_sk,int,33025,79895491,,,,,1382,3263,,,,,1434571794,6422,_web_page_
+default,web_page,wp_url,string,,,,,,,2,0,17.8005215123859,18,,,1434571794,6423,_web_page_
+default,web_page,wp_type,string,,,,,,,6,0,6.30704041720991,9,,,1434571794,6424,_web_page_
+default,web_page,wp_char_count,int,303,8523,,,,,3585,65,,,,,1434571794,6425,_web_page_
+default,web_page,wp_link_count,int,2,25,,,,,16,50,,,,,1434571794,6426,_web_page_
+default,web_page,wp_image_count,int,1,7,,,,,6,48,,,,,1434571794,6427,_web_page_
+default,web_page,wp_max_ad_count,int,0,4,,,,,5,52,,,,,1434571794,6428,_web_page_
+default,web_site,web_site_sk,int,1,84,,,,,49,0,,,,,1434572787,6429,_web_site_
+default,web_site,web_site_id,string,,,,,,,53,0,16,16,,,1434572787,6430,_web_site_
+default,web_site,web_rec_start_date,string,,,,,,,5,0,9.76190476190476,10,,,1434572787,6431,_web_site_
+default,web_site,web_rec_end_date,string,,,,,,,4,0,5,10,,,1434572787,6432,_web_site_
+default,web_site,web_name,string,,,,,,,18,0,6.14285714285714,7,,,1434572787,6433,_web_site_
+default,web_site,web_open_date_sk,int,2450118,2450807,,,,,41,1,,,,,1434572787,6434,_web_site_
+default,web_site,web_close_date_sk,int,2440993,2446218,,,,,29,15,,,,,1434572787,6435,_web_site_
+default,web_site,web_class,string,,,,,,,2,0,6.91666666666667,7,,,1434572787,6436,_web_site_
+default,web_site,web_manager,string,,,,,,,61,0,12.4642857142857,19,,,1434572787,6437,_web_site_
+default,web_site,web_mkt_id,int,1,6,,,,,5,1,,,,,1434572787,6438,_web_site_
+default,web_site,web_mkt_class,string,,,,,,,79,0,34.7142857142857,49,,,1434572787,6439,_web_site_
+default,web_site,web_mkt_desc,string,,,,,,,61,0,64.6190476190476,98,,,1434572787,6440,_web_site_
+default,web_site,web_market_manager,string,,,,,,,58,0,12.7738095238095,18,,,1434572787,6441,_web_site_
+default,web_site,web_company_id,int,1,6,,,,,5,0,,,,,1434572787,6442,_web_site_
+default,web_site,web_company_name,string,,,,,,,7,0,3.79761904761905,5,,,1434572787,6443,_web_site_
+default,web_site,web_street_number,string,,,,,,,51,0,2.89285714285714,3,,,1434572787,6444,_web_site_
+default,web_site,web_street_name,string,,,,,,,102,0,8.94047619047619,16,,,1434572787,6445,_web_site_
+default,web_site,web_street_type,string,,,,,,,23,0,3.8452380952381,9,,,1434572787,6446,_web_site_
+default,web_site,web_suite_number,string,,,,,,,58,0,7.90476190476191,9,,,1434572787,6447,_web_site_
+default,web_site,web_city,string,,,,,,,37,0,8.73809523809524,15,,,1434572787,6448,_web_site_
+default,web_site,web_county,string,,,,,,,69,0,13.8333333333333,22,,,1434572787,6449,_web_site_
+default,web_site,web_state,string,,,,,,,33,0,2,2,,,1434572787,6450,_web_site_
+default,web_site,web_zip,string,,,,,,,56,0,5,5,,,1434572787,6451,_web_site_
+default,web_site,web_country,string,,,,,,,2,0,12.8452380952381,13,,,1434572787,6452,_web_site_
+default,web_site,web_gmt_offset,float,,,-8,-5,,,3,1,,,,,1434572787,6453,_web_site_
+default,web_site,web_tax_percentage,float,,,0,0.119999997317791,,,17,1,,,,,1434572787,6454,_web_site_
+default,catalog_returns,cr_returned_date_sk,int,2450821,2452924,,,,,2535,0,,,,,1434701010,7142,_catalog_returns_
+default,catalog_returns,cr_returned_time_sk,int,0,86399,,,,,125124,0,,,,,1434701010,7143,_catalog_returns_
+default,catalog_returns,cr_item_sk,int,1,48000,,,,,62562,0,,,,,1434701010,7144,_catalog_returns_
+default,catalog_returns,cr_refunded_customer_sk,int,1,1600000,,,,,1415625,575640,,,,,1434701010,7145,_catalog_returns_
+default,catalog_returns,cr_refunded_cdemo_sk,int,1,1920800,,,,,1835839,576793,,,,,1434701010,7146,_catalog_returns_
+default,catalog_returns,cr_refunded_hdemo_sk,int,1,7200,,,,,9299,576604,,,,,1434701010,7147,_catalog_returns_
+default,catalog_returns,cr_refunded_addr_sk,int,1,800000,,,,,707812,576661,,,,,1434701010,7148,_catalog_returns_
+default,catalog_returns,cr_returning_customer_sk,int,1,1600000,,,,,1415625,575663,,,,,1434701010,7149,_catalog_returns_
+default,catalog_returns,cr_returning_cdemo_sk,int,1,1920800,,,,,1835839,576462,,,,,1434701010,7150,_catalog_returns_
+default,catalog_returns,cr_returning_hdemo_sk,int,1,7200,,,,,9299,577306,,,,,1434701010,7151,_catalog_returns_
+default,catalog_returns,cr_returning_addr_sk,int,1,800000,,,,,707812,577349,,,,,1434701010,7152,_catalog_returns_
+default,catalog_returns,cr_call_center_sk,int,1,8,,,,,9,576152,,,,,1434701010,7153,_catalog_returns_
+default,catalog_returns,cr_catalog_page_sk,int,1,9828,,,,,7488,577076,,,,,1434701010,7154,_catalog_returns_
+default,catalog_returns,cr_ship_mode_sk,int,1,20,,,,,18,576040,,,,,1434701010,7155,_catalog_returns_
+default,catalog_returns,cr_warehouse_sk,int,1,6,,,,,5,576830,,,,,1434701010,7156,_catalog_returns_
+default,catalog_returns,cr_reason_sk,int,1,37,,,,,24,576949,,,,,1434701010,7157,_catalog_returns_
+default,catalog_returns,cr_order_number,int,2,31999999,,,,,18238808,0,,,,,1434701010,7158,_catalog_returns_
+default,catalog_returns,cr_return_quantity,int,1,100,,,,,63,576244,,,,,1434701010,7159,_catalog_returns_
+default,catalog_returns,cr_return_amount,float,,,0,28778.310546875,,,1139925,576461,,,,,1434701010,7160,_catalog_returns_
+default,catalog_returns,cr_return_tax,float,,,0,2391.1201171875,,,130664,577147,,,,,1434701010,7161,_catalog_returns_
+default,catalog_returns,cr_return_amt_inc_tax,float,,,0,29677.359375,,,1758005,576846,,,,,1434701010,7162,_catalog_returns_
+default,catalog_returns,cr_fee,float,,,0.5,100,,,12060,576294,,,,,1434701010,7163,_catalog_returns_
+default,catalog_returns,cr_return_ship_cost,float,,,0,14130.9599609375,,,595197,576784,,,,,1434701010,7164,_catalog_returns_
+default,catalog_returns,cr_refunded_cash,float,,,0,25606.5390625,,,1139925,575637,,,,,1434701010,7165,_catalog_returns_
+default,catalog_returns,cr_reversed_charge,float,,,0,23801.240234375,,,649067,577379,,,,,1434701010,7166,_catalog_returns_
+default,catalog_returns,cr_store_credit,float,,,0,22653.91015625,,,621548,576505,,,,,1434701010,7167,_catalog_returns_
+default,catalog_returns,cr_net_loss,float,,,0.5,15781.830078125,,,1091596,576295,,,,,1434701010,7168,_catalog_returns_
+default,catalog_sales,cs_sold_date_sk,int,2450815,2452654,,,,,2226,1440109,,,,,1434701053,7169,_catalog_sales_
+default,catalog_sales,cs_sold_time_sk,int,0,86399,,,,,125124,1439981,,,,,1434701053,7170,_catalog_sales_
+default,catalog_sales,cs_ship_date_sk,int,2450817,2452744,,,,,2324,1439858,,,,,1434701053,7171,_catalog_sales_
+default,catalog_sales,cs_bill_customer_sk,int,1,1600000,,,,,1415625,1439092,,,,,1434701053,7172,_catalog_sales_
+default,catalog_sales,cs_bill_cdemo_sk,int,1,1920800,,,,,1835839,1440094,,,,,1434701053,7173,_catalog_sales_
+default,catalog_sales,cs_bill_hdemo_sk,int,1,7200,,,,,9299,1438041,,,,,1434701053,7174,_catalog_sales_
+default,catalog_sales,cs_bill_addr_sk,int,1,800000,,,,,707812,1439191,,,,,1434701053,7175,_catalog_sales_
+default,catalog_sales,cs_ship_customer_sk,int,1,1600000,,,,,1415625,1441690,,,,,1434701053,7176,_catalog_sales_
+default,catalog_sales,cs_ship_cdemo_sk,int,1,1920800,,,,,1835839,1439916,,,,,1434701053,7177,_catalog_sales_
+default,catalog_sales,cs_ship_hdemo_sk,int,1,7200,,,,,9299,1441155,,,,,1434701053,7178,_catalog_sales_
+default,catalog_sales,cs_ship_addr_sk,int,1,800000,,,,,707812,1437501,,,,,1434701053,7179,_catalog_sales_
+default,catalog_sales,cs_call_center_sk,int,1,8,,,,,9,1438943,,,,,1434701053,7180,_catalog_sales_
+default,catalog_sales,cs_catalog_page_sk,int,1,9828,,,,,7488,1439230,,,,,1434701053,7181,_catalog_sales_
+default,catalog_sales,cs_ship_mode_sk,int,1,20,,,,,18,1439234,,,,,1434701053,7182,_catalog_sales_
+default,catalog_sales,cs_warehouse_sk,int,1,6,,,,,5,1441117,,,,,1434701053,7183,_catalog_sales_
+default,catalog_sales,cs_item_sk,int,1,48000,,,,,62562,0,,,,,1434701053,7184,_catalog_sales_
+default,catalog_sales,cs_promo_sk,int,1,450,,,,,224,1440151,,,,,1434701053,7185,_catalog_sales_
+default,catalog_sales,cs_order_number,int,1,32000000,,,,,45300013,0,,,,,1434701053,7186,_catalog_sales_
+default,catalog_sales,cs_quantity,int,1,100,,,,,63,1439531,,,,,1434701053,7187,_catalog_sales_
+default,catalog_sales,cs_wholesale_cost,float,,,1,100,,,12060,1440635,,,,,1434701053,7188,_catalog_sales_
+default,catalog_sales,cs_list_price,float,,,1,300,,,27468,1440771,,,,,1434701053,7189,_catalog_sales_
+default,catalog_sales,cs_sales_price,float,,,0,300,,,27468,1439327,,,,,1434701053,7190,_catalog_sales_
+default,catalog_sales,cs_ext_discount_amt,float,,,0,29767,,,1543750,1440329,,,,,1434701053,7191,_catalog_sales_
+default,catalog_sales,cs_ext_sales_price,float,,,0,29943,,,1612098,1439296,,,,,1434701053,7192,_catalog_sales_
+default,catalog_sales,cs_ext_wholesale_cost,float,,,1,10000,,,545798,1439858,,,,,1434701053,7193,_catalog_sales_
+default,catalog_sales,cs_ext_list_price,float,,,1,30000,,,1612098,1439697,,,,,1434701053,7194,_catalog_sales_
+default,catalog_sales,cs_ext_tax,float,,,0,2673.27001953125,,,284981,1439788,,,,,1434701053,7195,_catalog_sales_
+default,catalog_sales,cs_coupon_amt,float,,,0,28422.939453125,,,2090632,1439246,,,,,1434701053,7196,_catalog_sales_
+default,catalog_sales,cs_ext_ship_cost,float,,,0,14994,,,771875,1439291,,,,,1434701053,7197,_catalog_sales_
+default,catalog_sales,cs_net_paid,float,,,0,29943,,,2711215,1439634,,,,,1434701053,7198,_catalog_sales_
+default,catalog_sales,cs_net_paid_inc_tax,float,,,0,32376.26953125,,,2956601,1439480,,,,,1434701053,7199,_catalog_sales_
+default,catalog_sales,cs_net_paid_inc_ship,float,,,0,43725,,,2956601,0,,,,,1434701053,7200,_catalog_sales_
+default,catalog_sales,cs_net_paid_inc_ship_tax,float,,,0,45460.80078125,,,3087500,0,,,,,1434701053,7201,_catalog_sales_
+default,catalog_sales,cs_net_profit,float,,,-10000,19962,,,2183192,0,,,,,1434701053,7202,_catalog_sales_
+default,inventory,inv_date_sk,int,2450815,2452635,,,,,316,0,,,,,1434681195,6879,_inventory_
+default,inventory,inv_item_sk,int,1,48000,,,,,62562,0,,,,,1434681195,6880,_inventory_
+default,inventory,inv_warehouse_sk,int,1,6,,,,,5,0,,,,,1434681195,6881,_inventory_
+default,inventory,inv_quantity_on_hand,int,0,1000,,,,,691,1880724,,,,,1434681195,6882,_inventory_
+default,store_sales,ss_sold_date_sk,int,2450816,2452642,,,,,2226,25919081,,,,,1434701145,7281,_store_sales_
+default,store_sales,ss_sold_time_sk,int,28800,75599,,,,,74399,25917462,,,,,1434701145,7282,_store_sales_
+default,store_sales,ss_item_sk,int,1,48000,,,,,62562,0,,,,,1434701145,7283,_store_sales_
+default,store_sales,ss_customer_sk,int,1,1600000,,,,,1415625,25915323,,,,,1434701145,7284,_store_sales_
+default,store_sales,ss_cdemo_sk,int,1,1920800,,,,,1835839,25919854,,,,,1434701145,7285,_store_sales_
+default,store_sales,ss_hdemo_sk,int,1,7200,,,,,9299,25915104,,,,,1434701145,7286,_store_sales_
+default,store_sales,ss_addr_sk,int,1,800000,,,,,707812,25915529,,,,,1434701145,7287,_store_sales_
+default,store_sales,ss_store_sk,int,1,212,,,,,90,25912696,,,,,1434701145,7288,_store_sales_
+default,store_sales,ss_promo_sk,int,1,450,,,,,224,25916282,,,,,1434701145,7289,_store_sales_
+default,store_sales,ss_ticket_number,int,1,48000000,,,,,56256175,0,,,,,1434701145,7290,_store_sales_
+default,store_sales,ss_quantity,int,1,100,,,,,63,25915535,,,,,1434701145,7291,_store_sales_
+default,store_sales,ss_wholesale_cost,float,,,1,100,,,12060,25919883,,,,,1434701145,7292,_store_sales_
+default,store_sales,ss_list_price,float,,,1,200,,,17056,25917233,,,,,1434701145,7293,_store_sales_
+default,store_sales,ss_sales_price,float,,,0,200,,,17056,25920715,,,,,1434701145,7294,_store_sales_
+default,store_sales,ss_ext_discount_amt,float,,,0,19225,,,1298134,25919240,,,,,1434701145,7295,_store_sales_
+default,store_sales,ss_ext_sales_price,float,,,0,19884,,,1139925,25919038,,,,,1434701145,7296,_store_sales_
+default,store_sales,ss_ext_wholesale_cost,float,,,1,10000,,,545798,25922497,,,,,1434701145,7297,_store_sales_
+default,store_sales,ss_ext_list_price,float,,,1,20000,,,1139925,25914179,,,,,1434701145,7298,_store_sales_
+default,store_sales,ss_ext_tax,float,,,0,1789.56005859375,,,219750,25918655,,,,,1434701145,7299,_store_sales_
+default,store_sales,ss_coupon_amt,float,,,0,19225,,,1298134,25919240,,,,,1434701145,7300,_store_sales_
+default,store_sales,ss_net_paid,float,,,0,19884,,,1917118,25919162,,,,,1434701145,7301,_store_sales_
+default,store_sales,ss_net_paid_inc_tax,float,,,0,21673.560546875,,,2380788,25925354,,,,,1434701145,7302,_store_sales_
+default,store_sales,ss_net_profit,float,,,-10000,9942,,,2380788,25912021,,,,,1434701145,7303,_store_sales_
+default,web_sales,ws_sold_date_sk,int,2450816,2452642,,,,,2226,35804,,,,,1434701088,7227,_web_sales_
+default,web_sales,ws_sold_time_sk,int,0,86399,,,,,125124,36037,,,,,1434701088,7228,_web_sales_
+default,web_sales,ws_ship_date_sk,int,2450817,2452762,,,,,2324,35780,,,,,1434701088,7229,_web_sales_
+default,web_sales,ws_item_sk,int,1,48000,,,,,62562,0,,,,,1434701088,7230,_web_sales_
+default,web_sales,ws_bill_customer_sk,int,1,1600000,,,,,1415625,35880,,,,,1434701088,7231,_web_sales_
+default,web_sales,ws_bill_cdemo_sk,int,1,1920800,,,,,1835839,35762,,,,,1434701088,7232,_web_sales_
+default,web_sales,ws_bill_hdemo_sk,int,1,7200,,,,,9299,36012,,,,,1434701088,7233,_web_sales_
+default,web_sales,ws_bill_addr_sk,int,1,800000,,,,,707812,35627,,,,,1434701088,7234,_web_sales_
+default,web_sales,ws_ship_customer_sk,int,1,1600000,,,,,1415625,35897,,,,,1434701088,7235,_web_sales_
+default,web_sales,ws_ship_cdemo_sk,int,1,1920800,,,,,1835839,35783,,,,,1434701088,7236,_web_sales_
+default,web_sales,ws_ship_hdemo_sk,int,1,7200,,,,,9299,35810,,,,,1434701088,7237,_web_sales_
+default,web_sales,ws_ship_addr_sk,int,1,800000,,,,,707812,35897,,,,,1434701088,7238,_web_sales_
+default,web_sales,ws_web_page_sk,int,1,342,,,,,205,35737,,,,,1434701088,7239,_web_sales_
+default,web_sales,ws_web_site_sk,int,1,38,,,,,25,35999,,,,,1434701088,7240,_web_sales_
+default,web_sales,ws_ship_mode_sk,int,1,20,,,,,18,35802,,,,,1434701088,7241,_web_sales_
+default,web_sales,ws_warehouse_sk,int,1,6,,,,,5,35925,,,,,1434701088,7242,_web_sales_
+default,web_sales,ws_promo_sk,int,1,450,,,,,224,36123,,,,,1434701088,7243,_web_sales_
+default,web_sales,ws_order_number,int,1,12000000,,,,,14686712,0,,,,,1434701088,7244,_web_sales_
+default,web_sales,ws_quantity,int,1,100,,,,,63,36054,,,,,1434701088,7245,_web_sales_
+default,web_sales,ws_wholesale_cost,float,,,1,100,,,12060,35939,,,,,1434701088,7246,_web_sales_
+default,web_sales,ws_list_price,float,,,1,300,,,27468,35970,,,,,1434701088,7247,_web_sales_
+default,web_sales,ws_sales_price,float,,,0,300,,,27468,35995,,,,,1434701088,7248,_web_sales_
+default,web_sales,ws_ext_discount_amt,float,,,0,29982,,,1543750,35968,,,,,1434701088,7249,_web_sales_
+default,web_sales,ws_ext_sales_price,float,,,0,29810,,,1415625,35945,,,,,1434701088,7250,_web_sales_
+default,web_sales,ws_ext_wholesale_cost,float,,,1,10000,,,545798,35704,,,,,1434701088,7251,_web_sales_
+default,web_sales,ws_ext_list_price,float,,,1.00999999046326,29997,,,1612098,36119,,,,,1434701088,7252,_web_sales_
+default,web_sales,ws_ext_tax,float,,,0,2682.89990234375,,,297598,35785,,,,,1434701088,7253,_web_sales_
+default,web_sales,ws_coupon_amt,float,,,0,27737.8203125,,,1612098,35986,,,,,1434701088,7254,_web_sales_
+default,web_sales,ws_ext_ship_cost,float,,,0,14927,,,677803,35929,,,,,1434701088,7255,_web_sales_
+default,web_sales,ws_net_paid,float,,,0,29810,,,2486195,35796,,,,,1434701088,7256,_web_sales_
+default,web_sales,ws_net_paid_inc_tax,float,,,0,32492.900390625,,,2596268,36094,,,,,1434701088,7257,_web_sales_
+default,web_sales,ws_net_paid_inc_ship,float,,,0,43674.83984375,,,2956601,0,,,,,1434701088,7258,_web_sales_
+default,web_sales,ws_net_paid_inc_ship_tax,float,,,0,46004.19140625,,,2956601,0,,,,,1434701088,7259,_web_sales_
+default,web_sales,ws_net_profit,float,,,-10000,19840,,,2090632,0,,,,,1434701088,7260,_web_sales_
+default,web_returns,wr_returned_date_sk,int,2450820,2453002,,,,,2647,648651,,,,,1434701060,7203,_web_returns_
+default,web_returns,wr_returned_time_sk,int,0,86399,,,,,125124,647816,,,,,1434701060,7204,_web_returns_
+default,web_returns,wr_item_sk,int,1,48000,,,,,62562,0,,,,,1434701060,7205,_web_returns_
+default,web_returns,wr_refunded_customer_sk,int,1,1600000,,,,,1415625,648741,,,,,1434701060,7206,_web_returns_
+default,web_returns,wr_refunded_cdemo_sk,int,1,1920800,,,,,1835839,648047,,,,,1434701060,7207,_web_returns_
+default,web_returns,wr_refunded_hdemo_sk,int,1,7200,,,,,9299,648703,,,,,1434701060,7208,_web_returns_
+default,web_returns,wr_refunded_addr_sk,int,1,800000,,,,,707812,648992,,,,,1434701060,7209,_web_returns_
+default,web_returns,wr_returning_customer_sk,int,1,1600000,,,,,1415625,648657,,,,,1434701060,7210,_web_returns_
+default,web_returns,wr_returning_cdemo_sk,int,1,1920800,,,,,1835839,648182,,,,,1434701060,7211,_web_returns_
+default,web_returns,wr_returning_hdemo_sk,int,1,7200,,,,,9299,648115,,,,,1434701060,7212,_web_returns_
+default,web_returns,wr_returning_addr_sk,int,1,800000,,,,,707812,648259,,,,,1434701060,7213,_web_returns_
+default,web_returns,wr_web_page_sk,int,1,342,,,,,205,649350,,,,,1434701060,7214,_web_returns_
+default,web_returns,wr_reason_sk,int,1,37,,,,,24,647884,,,,,1434701060,7215,_web_returns_
+default,web_returns,wr_order_number,int,1,11999999,,,,,8007986,0,,,,,1434701060,7216,_web_returns_
+default,web_returns,wr_return_quantity,int,1,100,,,,,63,648184,,,,,1434701060,7217,_web_returns_
+default,web_returns,wr_return_amt,float,,,0,28346.310546875,,,841735,648139,,,,,1434701060,7218,_web_returns_
+default,web_returns,wr_return_tax,float,,,0,2551.15991210938,,,105216,647420,,,,,1434701060,7219,_web_returns_
+default,web_returns,wr_return_amt_inc_tax,float,,,0,29493.380859375,,,1543750,647910,,,,,1434701060,7220,_web_returns_
+default,web_returns,wr_fee,float,,,0.5,100,,,12060,647879,,,,,1434701060,7221,_web_returns_
+default,web_returns,wr_return_ship_cost,float,,,0,14043.66015625,,,479279,647276,,,,,1434701060,7222,_web_returns_
+default,web_returns,wr_refunded_cash,float,,,0,26466.560546875,,,649067,649282,,,,,1434701060,7223,_web_returns_
+default,web_returns,wr_reversed_charge,float,,,0,22972.359375,,,545798,648744,,,,,1434701060,7224,_web_returns_
+default,web_returns,wr_account_credit,float,,,0,23166.330078125,,,439501,649368,,,,,1434701060,7225,_web_returns_
+default,web_returns,wr_net_loss,float,,,0.5,15068.9599609375,,,739150,649063,,,,,1434701060,7226,_web_returns_
+default,store_returns,sr_returned_date_sk,int,2450820,2452822,,,,,2324,2013145,,,,,1434701098,7261,_store_returns_
+default,store_returns,sr_return_time_sk,int,28799,61199,,,,,46196,2016532,,,,,1434701098,7262,_store_returns_
+default,store_returns,sr_item_sk,int,1,48000,,,,,62562,0,,,,,1434701098,7263,_store_returns_
+default,store_returns,sr_customer_sk,int,1,1600000,,,,,1415625,2016349,,,,,1434701098,7264,_store_returns_
+default,store_returns,sr_cdemo_sk,int,1,1920800,,,,,1835839,2013452,,,,,1434701098,7265,_store_returns_
+default,store_returns,sr_hdemo_sk,int,1,7200,,,,,9299,2015074,,,,,1434701098,7266,_store_returns_
+default,store_returns,sr_addr_sk,int,1,800000,,,,,707812,2015595,,,,,1434701098,7267,_store_returns_
+default,store_returns,sr_store_sk,int,1,212,,,,,90,2013530,,,,,1434701098,7268,_store_returns_
+default,store_returns,sr_reason_sk,int,1,37,,,,,24,2016587,,,,,1434701098,7269,_store_returns_
+default,store_returns,sr_ticket_number,int,1,48000000,,,,,34931085,0,,,,,1434701098,7270,_store_returns_
+default,store_returns,sr_return_quantity,int,1,100,,,,,63,2016579,,,,,1434701098,7271,_store_returns_
+default,store_returns,sr_return_amt,float,,,0,19387.169921875,,,841735,2015073,,,,,1434701098,7272,_store_returns_
+default,store_returns,sr_return_tax,float,,,0,1682.0400390625,,,114739,2014835,,,,,1434701098,7273,_store_returns_
+default,store_returns,sr_return_amt_inc_tax,float,,,0,20371.390625,,,1139925,2014852,,,,,1434701098,7274,_store_returns_
+default,store_returns,sr_fee,float,,,0.5,100,,,12060,2015809,,,,,1434701098,7275,_store_returns_
+default,store_returns,sr_return_ship_cost,float,,,0,9578.25,,,500499,2015477,,,,,1434701098,7276,_store_returns_
+default,store_returns,sr_refunded_cash,float,,,0,17556.94921875,,,917919,2015507,,,,,1434701098,7277,_store_returns_
+default,store_returns,sr_reversed_charge,float,,,0,16099.51953125,,,739150,2017819,,,,,1434701098,7278,_store_returns_
+default,store_returns,sr_store_credit,float,,,0,15642.1103515625,,,771875,2013589,,,,,1434701098,7279,_store_returns_
+default,store_returns,sr_net_loss,float,,,0.5,10447.7197265625,,,879002,2014488,,,,,1434701098,7280,_store_returns_

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/data/files/vector_ptf_part_simple.txt
----------------------------------------------------------------------
diff --git a/data/files/vector_ptf_part_simple.txt b/data/files/vector_ptf_part_simple.txt
deleted file mode 100644
index 2bcc7a6..0000000
--- a/data/files/vector_ptf_part_simple.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-Manufacturer#2	almond aquamarine rose maroon antique	900.66
-Manufacturer#1	almond aquamarine burnished black steel	1414.42
-Manufacturer#2	almond aquamarine rose maroon antique	1698.66
-Manufacturer#1	almond aquamarine pink moccasin thistle	\N
-Manufacturer#1	almond antique chartreuse lavender yellow	1753.76
-Manufacturer#5	almond antique medium spring khaki	1611.66
-Manufacturer#5	almond antique blue firebrick mint	1789.69
-Manufacturer#1	almond antique burnished rose metallic	1173.15
-Manufacturer#1	almond aquamarine pink moccasin thistle	1632.66
-Manufacturer#3	almond antique forest lavender goldenrod	1190.27
-Manufacturer#4	almond aquamarine yellow dodger mint	1844.92
-Manufacturer#1	almond antique chartreuse lavender yellow	1753.76
-Manufacturer#2	almond antique violet turquoise frosted	1800.7
-Manufacturer#3	almond antique forest lavender goldenrod	\N
-Manufacturer#2	almond antique violet chocolate turquoise	1690.68
-Manufacturer#4	almond antique violet mint lemon	1375.42
-Manufacturer#1	almond aquamarine pink moccasin thistle	1632.66
-Manufacturer#5	almond azure blanched chiffon midnight	1464.48
-Manufacturer#3	almond antique forest lavender goldenrod	590.27
-Manufacturer#1	almond antique chartreuse lavender yellow	1753.76
-Manufacturer#2	almond antique violet turquoise frosted	1800.7
-Manufacturer#5	almond antique sky peru orange	1788.73
-Manufacturer#1	almond aquamarine pink moccasin thistle	1632.66
-Manufacturer#3	almond antique chartreuse khaki white	99.68
-Manufacturer#4	almond antique gainsboro frosted violet	\N
-Manufacturer#1	almond antique chartreuse lavender yellow	1753.76
-Manufacturer#2	almond antique violet turquoise frosted	1800.7
-Manufacturer#3	almond antique olive coral navajo	1337.29
-Manufacturer#5	almond antique medium spring khaki	1611.66
-Manufacturer#1	almond antique salmon chartreuse burlywood	1602.59
-Manufacturer#3	almond antique misty red olive	1922.98
-Manufacturer#2	almond aquamarine sandy cyan gainsboro	1000.6
-Manufacturer#3	almond antique forest lavender goldenrod	1190.27
-Manufacturer#2	almond aquamarine midnight light salmon	2031.98
-Manufacturer#4	almond aquamarine floral ivory bisque	\N
-Manufacturer#5	almond aquamarine dodger light gainsboro	1018.1
-Manufacturer#4	almond azure aquamarine papaya violet	1290.35
-Manufacturer#3	almond antique metallic orange dim	55.39
-Manufacturer#1	almond antique burnished rose metallic	1173.15
-Manufacturer#4	almond aquamarine floral ivory bisque	1206.26

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/changes/ChangesFancyStyle.css
----------------------------------------------------------------------
diff --git a/docs/changes/ChangesFancyStyle.css b/docs/changes/ChangesFancyStyle.css
new file mode 100644
index 0000000..5eef241
--- /dev/null
+++ b/docs/changes/ChangesFancyStyle.css
@@ -0,0 +1,170 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+/**
+ * General
+ */
+
+img { border: 0; }
+
+#content table {
+  border: 0;
+  width: 100%;
+}
+/*Hack to get IE to render the table at 100%*/
+* html #content table { margin-left: -3px; }
+
+#content th,
+#content td {
+  margin: 0;
+  padding: 0;
+  vertical-align: top;
+}
+
+.clearboth {
+  clear: both;
+}
+
+.note, .warning, .fixme {
+  border: solid black 1px;
+  margin: 1em 3em;
+}
+
+.note .label {
+  background: #369;
+  color: white;
+  font-weight: bold;
+  padding: 5px 10px;
+}
+.note .content {
+  background: #F0F0FF;
+  color: black;
+  line-height: 120%;
+  font-size: 90%;
+  padding: 5px 10px;
+}
+.warning .label {
+  background: #C00;
+  color: white;
+  font-weight: bold;
+  padding: 5px 10px;
+}
+.warning .content {
+  background: #FFF0F0;
+  color: black;
+  line-height: 120%;
+  font-size: 90%;
+  padding: 5px 10px;
+}
+.fixme .label {
+  background: #C6C600;
+  color: black;
+  font-weight: bold;
+  padding: 5px 10px;
+}
+.fixme .content {
+  padding: 5px 10px;
+}
+
+/**
+ * Typography
+ */
+
+body {
+  font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif;
+  font-size: 100%;
+}
+
+#content {
+  font-family: Georgia, Palatino, Times, serif;
+  font-size: 95%;
+}
+#tabs {
+  font-size: 70%;
+}
+#menu {
+  font-size: 80%;
+}
+#footer {
+  font-size: 70%;
+}
+
+h1, h2, h3, h4, h5, h6 {
+  font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif;
+  font-weight: bold;
+  margin-top: 1em;
+  margin-bottom: .5em;
+}
+
+h1 {
+    margin-top: 0;
+    margin-bottom: 1em;
+  font-size: 1.4em;
+  background-color: 73CAFF
+}
+#content h1 {
+  font-size: 160%;
+  margin-bottom: .5em;
+}
+#menu h1 {
+  margin: 0;
+  padding: 10px;
+  background: #336699;
+  color: white;
+}
+h2 { 
+  font-size: 120%;
+  background-color: 73CAFF
+}
+h3 { font-size: 100%; }
+h4 { font-size: 90%; }
+h5 { font-size: 80%; }
+h6 { font-size: 75%; }
+
+p {
+  line-height: 120%;
+  text-align: left;
+  margin-top: .5em;
+  margin-bottom: 1em;
+}
+
+#content li,
+#content th,
+#content td,
+#content li ul,
+#content li ol{
+  margin-top: .5em;
+  margin-bottom: .5em;
+}
+
+
+#content li li,
+#minitoc-area li{
+  margin-top: 0em;
+  margin-bottom: 0em;
+}
+
+#content .attribution {
+  text-align: right;
+  font-style: italic;
+  font-size: 85%;
+  margin-top: 1em;
+}
+
+.codefrag {
+  font-family: "Courier New", Courier, monospace;
+  font-size: 110%;
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/changes/ChangesSimpleStyle.css
----------------------------------------------------------------------
diff --git a/docs/changes/ChangesSimpleStyle.css b/docs/changes/ChangesSimpleStyle.css
new file mode 100644
index 0000000..407d0f1
--- /dev/null
+++ b/docs/changes/ChangesSimpleStyle.css
@@ -0,0 +1,49 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+body {
+  font-family: Courier New, monospace;
+  font-size: 10pt;
+}
+
+h1 {
+  font-family: Courier New, monospace;
+  font-size: 10pt;
+}
+
+h2 {
+  font-family: Courier New, monospace;
+  font-size: 10pt; 
+}
+
+h3 {
+  font-family: Courier New, monospace;
+  font-size: 10pt; 
+}
+
+a:link {
+  color: blue;
+}
+
+a:visited {
+  color: purple; 
+}
+
+li {
+  margin-top: 1em;
+  margin-bottom: 1em;
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/changes/changes2html.pl
----------------------------------------------------------------------
diff --git a/docs/changes/changes2html.pl b/docs/changes/changes2html.pl
new file mode 100644
index 0000000..03f0bbb
--- /dev/null
+++ b/docs/changes/changes2html.pl
@@ -0,0 +1,282 @@
+#!/usr/bin/perl
+#
+# Transforms Lucene Java's CHANGES.txt into Changes.html
+#
+# Input is on STDIN, output is to STDOUT
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use strict;
+use warnings;
+
+my $jira_url_prefix = 'http://issues.apache.org/jira/browse/';
+my $title = undef;
+my $release = undef;
+my $sections = undef;
+my $items = undef;
+my $first_relid = undef;
+my $second_relid = undef;
+my @releases = ();
+
+my @lines = <>;                        # Get all input at once
+
+#
+# Parse input and build hierarchical release structure in @releases
+#
+for (my $line_num = 0 ; $line_num <= $#lines ; ++$line_num) {
+  $_ = $lines[$line_num];
+  next unless (/\S/);                  # Skip blank lines
+
+  unless ($title) {
+    if (/\S/) {
+      s/^\s+//;                        # Trim leading whitespace
+      s/\s+$//;                        # Trim trailing whitespace
+    }
+    $title = $_;
+    next;
+  }
+
+  if (/^(Release)|(Trunk)/) {   # Release headings
+    $release = $_;
+    $sections = [];
+    push @releases, [ $release, $sections ];
+    ($first_relid = lc($release)) =~ s/\s+/_/g   if ($#releases == 0);
+    ($second_relid = lc($release)) =~ s/\s+/_/g  if ($#releases == 1);
+    $items = undef;
+    next;
+  }
+
+  # Section heading: 2 leading spaces, words all capitalized
+  if (/^  ([A-Z]+)\s*/) {
+    my $heading = $_;
+    $items = [];
+    push @$sections, [ $heading, $items ];
+    next;
+  }
+
+  # Handle earlier releases without sections - create a headless section
+  unless ($items) {
+    $items = [];
+    push @$sections, [ undef, $items ];
+  }
+
+  my $type;
+  if (@$items) { # A list item has been encountered in this section before
+    $type = $items->[0];  # 0th position of items array is list type
+  } else {
+    $type = get_list_type($_);
+    push @$items, $type;
+  }
+
+  if ($type eq 'numbered') { # The modern items list style
+    # List item boundary is another numbered item or an unindented line
+    my $line;
+    my $item = $_;
+    $item =~ s/^(\s{0,2}\d+\.\s*)//;       # Trim the leading item number
+    my $leading_ws_width = length($1);
+    $item =~ s/\s+$//;                     # Trim trailing whitespace
+    $item .= "\n";
+
+    while ($line_num < $#lines
+           and ($line = $lines[++$line_num]) !~ /^(?:\s{0,2}\d+\.\s*\S|\S)/) {
+      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
+      $line =~ s/\s+$//;                   # Trim trailing whitespace
+      $item .= "$line\n";
+    }
+    $item =~ s/\n+\Z/\n/;                  # Trim trailing blank lines
+    push @$items, $item;
+    --$line_num unless ($line_num == $#lines);
+  } elsif ($type eq 'paragraph') {         # List item boundary is a blank line
+    my $line;
+    my $item = $_;
+    $item =~ s/^(\s+)//;
+    my $leading_ws_width = defined($1) ? length($1) : 0;
+    $item =~ s/\s+$//;                     # Trim trailing whitespace
+    $item .= "\n";
+
+    while ($line_num < $#lines and ($line = $lines[++$line_num]) =~ /\S/) {
+      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
+      $line =~ s/\s+$//;                   # Trim trailing whitespace
+      $item .= "$line\n";
+    }
+    push @$items, $item;
+    --$line_num unless ($line_num == $#lines);
+  } else { # $type is one of the bulleted types
+    # List item boundary is another bullet or a blank line
+    my $line;
+    my $item = $_;
+    $item =~ s/^(\s*$type\s*)//;           # Trim the leading bullet
+    my $leading_ws_width = length($1);
+    $item =~ s/\s+$//;                     # Trim trailing whitespace
+    $item .= "\n";
+
+    while ($line_num < $#lines
+           and ($line = $lines[++$line_num]) !~ /^\s*(?:$type|\Z)/) {
+      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
+      $line =~ s/\s+$//;                   # Trim trailing whitespace
+      $item .= "$line\n";
+    }
+    push @$items, $item;
+    --$line_num unless ($line_num == $#lines);
+  }
+}
+
+#
+# Print HTML-ified version to STDOUT
+#
+print<<"__HTML_HEADER__";
+<!--
+**********************************************************
+** WARNING: This file is generated from CHANGES.txt by the 
+**          Perl script 'changes2html.pl'.
+**          Do *not* edit this file!
+**********************************************************
+          
+****************************************************************************
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+****************************************************************************
+-->
+<html>
+<head>
+  <title>$title</title>
+  <link rel="stylesheet" href="ChangesFancyStyle.css" title="Fancy">
+  <link rel="alternate stylesheet" href="ChangesSimpleStyle.css" title="Simple">
+  <META http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
+  <SCRIPT>
+    function toggleList(e) {
+      element = document.getElementById(e).style;
+      element.display == 'none' ? element.display = 'block' : element.display='none';
+    }
+    function collapse() {
+      for (var i = 0; i < document.getElementsByTagName("ul").length; i++) {
+        var list = document.getElementsByTagName("ul")[i];
+        if (list.id != '$first_relid' && list.id != '$second_relid') {
+          list.style.display = "none";
+        }
+      }
+      for (var i = 0; i < document.getElementsByTagName("ol").length; i++) {
+        document.getElementsByTagName("ol")[i].style.display = "none"; 
+      }
+    }
+    window.onload = collapse;
+  </SCRIPT>
+</head>
+<body>
+
+<a href="http://hadoop.apache.org/hive/"><img class="logoImage" alt="Hive" src="images/hive-logo.jpg" title="SQL and Data Warehousing Platform on Hadoop"></a>
+<h1>$title</h1>
+
+__HTML_HEADER__
+
+my $heading;
+my $relcnt = 0;
+my $header = 'h2';
+for my $rel (@releases) {
+  if (++$relcnt == 3) {
+    $header = 'h3';
+    print "<h2><a href=\"javascript:toggleList('older')\">";
+    print "Older Releases";
+    print "</a></h2>\n";
+    print "<ul id=\"older\">\n"
+  }
+      
+  ($release, $sections) = @$rel;
+
+  # The first section heading is undefined for the older sectionless releases
+  my $has_release_sections = $sections->[0][0];
+
+  (my $relid = lc($release)) =~ s/\s+/_/g;
+  print "<$header><a href=\"javascript:toggleList('$relid')\">";
+  print "$release";
+  print "</a></$header>\n";
+  print "<ul id=\"$relid\">\n"
+    if ($has_release_sections);
+
+  for my $section (@$sections) {
+    ($heading, $items) = @$section;
+    (my $sectid = lc($heading)) =~ s/\s+/_/g;
+    my $numItemsStr = $#{$items} > 0 ? "($#{$items})" : "(none)";  
+
+    print "  <li><a href=\"javascript:toggleList('$relid.$sectid')\">",
+          ($heading || ''), "</a>&nbsp;&nbsp;&nbsp;$numItemsStr\n"
+      if ($has_release_sections);
+
+    my $list_type = $items->[0] || '';
+    my $list = ($has_release_sections || $list_type eq 'numbered' ? 'ol' : 'ul');
+    my $listid = $sectid ? "$relid.$sectid" : $relid;
+    print "    <$list id=\"$listid\">\n";
+
+    for my $itemnum (1..$#{$items}) {
+      my $item = $items->[$itemnum];
+      $item =~ s:&:&amp;:g;                            # Escape HTML metachars
+      $item =~ s:<:&lt;:g; 
+      $item =~ s:>:&gt;:g;
+
+      $item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:;       # Separate attribution
+      $item =~ s:\n{2,}:\n<p/>\n:g;                    # Keep paragraph breaks
+      $item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)}  # Link to JIRA
+                {<a href="${jira_url_prefix}$1">$1</a>}g;
+      print "      <li>$item</li>\n";
+    }
+    print "    </$list>\n";
+    print "  </li>\n" if ($has_release_sections);
+  }
+  print "</ul>\n" if ($has_release_sections);
+}
+print "</ul>\n" if ($relcnt > 3);
+print "</body>\n</html>\n";
+
+
+#
+# Subroutine: get_list_type
+#
+# Takes one parameter:
+#
+#    - The first line of a sub-section/point
+#
+# Returns one scalar:
+#
+#    - The list type: 'numbered'; or one of the bulleted types '-', or '.' or
+#      'paragraph'.
+#
+sub get_list_type {
+  my $first_list_item_line = shift;
+  my $type = 'paragraph'; # Default to paragraph type
+
+  if ($first_list_item_line =~ /^\s{0,2}\d+\.\s+\S+/) {
+    $type = 'numbered';
+  } elsif ($first_list_item_line =~ /^\s*([-.])\s+\S+/) {
+    $type = $1;
+  }
+  return $type;
+}
+
+1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/site.css
----------------------------------------------------------------------
diff --git a/docs/site.css b/docs/site.css
new file mode 100644
index 0000000..49ca65a
--- /dev/null
+++ b/docs/site.css
@@ -0,0 +1,305 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.    
+ */
+
+
+/** defined standard tags **/
+body {
+	background-color: #ffffff;
+	color: #000000;
+}
+
+a:link, a:active, a:visited {
+    color: #525D76;
+}
+
+
+h1 {
+	background-color: #525D76;
+	color: #ffffff;
+	font-family: arial,helvetica,sanserif;
+	font-size: large;
+	padding-left:2px;
+}
+
+h2 {
+	background-color: #828DA6;
+	color: #ffffff;
+	font-family: arial,helvetica,sanserif;
+	font-size: medium;
+	padding-left:2px;
+}
+
+table {
+	border: none;
+	border-spacing:0px;
+	border-collapse: collapse;
+}
+
+img {
+	border: none 0px;
+}
+
+/** define layout **/
+
+/** table used to force footer to end of page **/
+table#layout {
+	width:100%;
+}
+
+table#layout td {
+	padding:0px;
+}
+
+div#container {
+	width: 95%;
+	margin: 10px;
+	margin-left: 0;
+	margin-right: auto;
+	padding: 10px;
+}
+
+div#header {
+	padding: 5px;
+	margin: 0px;
+	margin-top:5px;
+	margin-bottom:5px;
+	height:80px;
+	border-bottom: 1px solid #333333;
+}
+
+div#menu {
+	float: left;
+	width: 200px;
+	margin: 0;
+	margin-left: 0px;
+	margin-right: 5px;
+
+	/** little higher margin since it doesn't start with a header **/
+	margin-top:10px;
+	margin-bottom:0px;
+
+	padding: 5px;
+}
+
+div#body {
+	margin-right:0px;
+	margin-left: 215px;
+	margin-top:5px;
+	margin-bottom:0px;
+
+	padding: 5px;
+
+}
+
+div#footer {
+
+	clear: both;
+
+	padding-top:15px;
+	margin-top:25px;
+	border-top: 1px solid #333333;
+
+
+	text-align:center;
+	color: #525D76;
+	font-style: italic;
+	font-size: smaller;
+}
+
+div#logo1 {
+	float:left;
+	margin-left:5px;
+	margin-top:10px;
+}
+
+
+div#logo2 {
+	float:right;
+	margin-top:10px;
+}
+
+
+/** define body tag redefinitions **/
+
+
+div#body th {
+	background-color: #039acc;
+	color: #000000;
+	font-family: arial,helvetica,sanserif;
+	font-size: smaller;
+	vertical-align: top;
+	text-align:left;
+	border:1px #FFFFFF solid;
+	padding: 2px;
+}
+
+div#body td {
+	background-color: #a0ddf0;
+	color: #000000;
+	font-family: arial,helvetica,sanserif;
+	font-size: smaller;
+	vertical-align: top;
+	text-align:left;
+	border:1px #FFFFFF solid;
+	padding: 2px;
+}
+
+
+div#body li {
+	 margin-top:3px;
+}
+
+/** define other body styles **/
+
+div.section {
+	margin-left: 25px;
+}
+
+div.subsection {
+	margin-left: 25px;
+}
+
+div.source {
+	margin-left:25px;
+	margin-top:20px;
+	margin-bottom:20px;
+	padding-left:4px;
+	padding-right:4px;
+	padding-bottom:4px;
+	padding-top:5px;
+
+	width:600;
+
+	border: 1px solid #333333;
+	background-color: #EEEEEE;
+	color: #333333;
+
+	/** bug: puts a extra line before the block in IE and after the block in FireFox **/
+	white-space: pre;
+
+	font-family: Courier;
+	font-size: smaller;
+	text-align: left;
+
+	overflow:auto;
+}
+
+
+div.license {
+	margin-left:0px;
+	margin-top:20px;
+	margin-bottom:20px;
+	padding:5px;
+
+	border: 1px solid #333333;
+	background-color: #EEEEEE;
+	color: #333333;
+
+	text-align: left;
+}
+
+/** define menu styles **/
+
+div.menusection {
+	margin-bottom:10px;
+}
+
+.menuheader {
+	font-weight:bold;
+	margin-bottom:0px;
+}
+
+div.menusection ul {
+	margin-top:5px;
+
+}
+div.menusection li {
+
+}
+
+
+
+
+/** printing **/
+@page Section1
+    {
+    size:8.5in 11.0in;
+    margin:1.0in .75in 1.0in .75in;
+}
+
+@media print {
+
+	/** make sure this fits the page **/
+	div#container {
+		width:100%;
+		min-height:0px;
+	}
+
+
+	div#menu {
+		display:none;
+	}
+
+	div#header {
+		display:none;
+	}
+
+	div#body {
+		margin-left:5px;
+	}
+
+
+	div.source {
+		width:95%;
+		margin-left:0px;
+	}
+
+	/** make a bit more room on the page **/
+	div.section {
+		margin-left: 0px;
+	}
+
+	div.subsection {
+		margin-left: 0px;
+	}
+
+	h1 {
+		background-color: #FFFFFF;
+		color: #000000;
+	}
+
+	h2 {
+		background-color: #FFFFFF;
+		color: #000000;
+	}
+
+	div#body td {
+		background-color: #FFFFFF;
+		color: #000000;
+		border: #333333 1px solid;
+	}
+
+	div#body th {
+		background-color: #FFFFFF;
+		color: #000000;
+		border: #333333 1px solid;
+		font-style:bold;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/stylesheets/project.xml
----------------------------------------------------------------------
diff --git a/docs/stylesheets/project.xml b/docs/stylesheets/project.xml
new file mode 100644
index 0000000..60bb75f
--- /dev/null
+++ b/docs/stylesheets/project.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.    
+-->
+
+<project name="Apache Hadoop Hive" href="http://hadoop.apache.org/hive">
+  <title>Hadoop Hive</title>
+  <logo href="images/hive-logo.jpg">Hadoop Hive</logo>
+  <body>
+    <menu name="Hadoop hive">
+      <item name="General"  href="/index.html" />
+    </menu>
+    <menu name="Hive Language Manual">
+      <item name="Data Manipulation Statements" href="/language_manual/data-manipulation-statements.html" />
+      <item name="Joins" href="/language_manual/joins.html" />
+      <item name="Cli" href="/language_manual/cli.html" />
+      <item name="Var Substitution" href="/language_manual/var_substitution.html" />
+    </menu>
+    <menu name="Developer Guide">
+      <item name="Issue Tracking (JIRA)" href="https://issues.apache.org/jira/browse/HIVE"/>
+    </menu>
+    <menu name="User Defined Functions">
+      <item name="reflect" href="/udf/reflect.html" />
+    </menu>
+  </body>
+</project>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/stylesheets/site.vsl
----------------------------------------------------------------------
diff --git a/docs/stylesheets/site.vsl b/docs/stylesheets/site.vsl
new file mode 100644
index 0000000..9b23f40
--- /dev/null
+++ b/docs/stylesheets/site.vsl
@@ -0,0 +1,317 @@
+## Licensed to the Apache Software Foundation (ASF) under one
+## or more contributor license agreements.  See the NOTICE file
+## distributed with this work for additional information
+## regarding copyright ownership.  The ASF licenses this file
+## to you under the Apache License, Version 2.0 (the
+## "License"); you may not use this file except in compliance
+## with the License.  You may obtain a copy of the License at
+##
+##   http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing,
+## software distributed under the License is distributed on an
+## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+## KIND, either express or implied.  See the License for the
+## specific language governing permissions and limitations
+## under the License.    
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.    
+-->
+
+<!-- start the processing -->
+#document()
+<!-- end the processing -->
+
+## This is where the macro's live
+
+#macro ( table $table)
+<table>
+    #foreach ( $items in $table.getChildren() )
+        #if ($items.getName().equals("tr"))
+            #tr ($items)
+        #end
+    #end
+</table>
+#end
+
+#macro ( tr $tr)
+<tr>
+    #foreach ( $items in $tr.getChildren() )
+        #if ($items.getName().equals("td"))
+            #td ($items)
+        #elseif ($items.getName().equals("th"))
+            #th ($items)
+        #end
+    #end
+</tr>
+#end
+
+#macro ( td $value)
+#if ($value.getAttributeValue("colspan"))
+#set ($colspan = $value.getAttributeValue("colspan"))
+#end
+#if ($value.getAttributeValue("rowspan"))
+#set ($rowspan = $value.getAttributeValue("rowspan"))
+#end
+<td colspan="$!colspan" rowspan="$!rowspan">
+	#foreach ( $items in $value.getContent() )
+		#if($items.name)
+			#display($items)
+		#else
+			$items.value
+		#end
+	#end
+</td>
+#end
+
+#macro ( th $value)
+#if ($value.getAttributeValue("colspan"))
+#set ($colspan = $value.getAttributeValue("colspan"))
+#end
+#if ($value.getAttributeValue("rowspan"))
+#set ($rowspan = $value.getAttributeValue("rowspan"))
+#end
+<th colspan="$!colspan" rowspan="$!rowspan">
+	#foreach ( $items in $value.getContent() )
+		#if($items.name)
+			#display($items)
+		#else
+			$items.value
+		#end
+	#end
+</th>
+#end
+
+#macro ( projectanchor $name $value )
+#if ($value.startsWith("http://"))
+    <a href="$value">$name</a>
+#elseif ($value.startsWith("https://"))
+    <a href="$value">$name</a>
+#else
+    <a href="$relativePath$value">$name</a>
+#end
+#end
+
+#macro ( metaauthor $author $email )
+            <meta name="author" value="$author">
+            <meta name="email" value="$email">
+#end
+
+#macro ( image $value )
+#if ($value.getAttributeValue("width"))
+#set ($width=$value.getAttributeValue("width"))
+#end
+#if ($value.getAttributeValue("height"))
+#set ($height=$value.getAttributeValue("height"))
+#end
+#if ($value.getAttributeValue("align"))
+#set ($align=$value.getAttributeValue("align"))
+#end
+<img src="$relativePath$value.getAttributeValue("src")" width="$!width" height="$!height" align="$!align">
+#end
+
+#macro ( source $value)
+<div class="source">$escape.getText($value.getText())</pre></div>
+#end
+
+
+## need these to catch special macros within lists
+#macro(list $node)
+<$node.getName()>
+	#foreach ( $items in $node.getChildren() )
+		#listitem($items)
+	#end
+</$node.getName()>
+#end
+
+#macro (listitem $node)
+<$node.getName()>
+## use getContent instead of getChildren
+## to include both text and nodes
+	#foreach ( $items in $node.getContent() )
+		#if($items.name)
+			#display($items)
+		#else
+			$items.value
+		#end
+	#end
+</$node.getName()>
+#end
+
+
+## # displays a basic node, calling macros if appropriate
+#macro ( display $node )
+		#if ($node.getName().equals("img"))
+			#image ($node)
+		#elseif ($node.getName().equals("source"))
+			#source ($node)
+		#elseif ($node.getName().equals("table"))
+			#table ($node)
+		#elseif ($node.getName().equals("ul"))
+			#list ($node)
+		#elseif ($node.getName().equals("ol"))
+			#list ($node)
+		#else
+			$node
+		#end
+#end
+
+#macro ( section $section)
+	<a name="#anchorName($section)"></a>
+	<h1>$section.getAttributeValue("name")</h1>
+
+	<div class="subsection">
+		#foreach ( $items in $section.getChildren() )
+			#if ($items.getName().equals("subsection"))
+				#subsection ($items)
+			#else
+				#display($items)
+			#end
+		#end
+	</div>
+#end
+
+#macro ( subsection $subsection)
+	<a name="#anchorName($subsection)"></a>
+	<h2>$subsection.getAttributeValue("name")</h2>
+	<div class="subsection">
+		#foreach ( $items in $subsection.getChildren() )
+			#display($items)
+		#end
+	</div>
+#end
+
+#macro ( anchorName $section)
+#if ($section.getAttributeValue("href"))
+$section.getAttributeValue("href")##
+#else
+$section.getAttributeValue("name")##
+#end
+#end
+
+#macro ( makeProject )
+
+    <!-- ============================================================ -->
+
+    #set ($menus = $project.getChild("body").getChildren("menu"))
+    #foreach ( $menu in $menus )
+    	<div class="menusection">
+    		<span class="menuheader">$menu.getAttributeValue("name")</span>
+			<ul>
+			#foreach ( $item in $menu.getChildren() )
+				#set ($name = $item.getAttributeValue("name"))
+				<li>#projectanchor($name $item.getAttributeValue("href"))</li>
+			#end
+			</ul>
+        </div>
+    #end
+#end
+
+#macro (getProjectImage)
+
+<div id="logo1">
+	<a href="http://hadoop.apache.org/hive/"><img src="${relativePath}/images/hive-logo.jpg" border="0"/></a>
+</div>
+
+
+#if ($project.getChild("logo"))
+
+<div id="logo2">
+
+#set ( $logoString = $project.getChild("logo").getAttributeValue("href") )
+#if ( $logoString.startsWith("/") )
+<a href="$project.getAttributeValue("href")"><img src="$relativePath$logoString" alt="$project.getChild("logo").getText()" border="0"/></a>
+#else
+<a href="$project.getAttributeValue("href")"><img src="$relativePath/$logoString" alt="$project.getChild("logo").getText()" border="0"/></a>
+#end
+
+</div>
+
+#end
+#end
+
+#macro (printMeta $metaElement)
+<meta #set ($attribs = $metaElement.getAttributes())
+#foreach ($a in $attribs) $a.getName()="$a.getValue()" #end />
+#end
+
+#macro (document)
+    <!-- ====================================================================== -->
+    <!-- GENERATED FILE, DO NOT EDIT, EDIT THE XML FILE IN xdocs INSTEAD! -->
+    <!-- Main Page Section -->
+    <!-- ====================================================================== -->
+    <html>
+        <head>
+            <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>
+
+            #set ($authors = $root.getChild("properties").getChildren("author"))
+            #foreach ( $au in $authors )
+                #metaauthor ( $au.getText() $au.getAttributeValue("email") )
+            #end
+
+           #set ($metas = $root.getChildren("meta"))
+
+            ##    Parse meta directives such as
+            ##    <meta name="keyword" content="apache, velocity, java"/>
+            #foreach ($meta in $metas) #printMeta($meta) #end
+
+            ##    Support for <base> tags.
+            #if ($root.getChild("properties").getChild("base"))
+              #set ($url = $root.getChild("properties").getChild("base").getAttributeValue("href"))
+              <base href="$url"/>
+            #end
+
+            <title>$project.getChild("title").getText() - $root.getChild("properties").getChild("title").getText()</title>
+
+			## use a relative CSS for when the page is displayed locally (will overwrite
+			## previous CSS settings)
+			<link rel="stylesheet" href="${relativePath}/site.css" type="text/css">
+        </head>
+
+        <body>
+
+			## use a table in order to force footer to end of page
+
+			<div id="container">
+
+				<div id="header">
+					#getProjectImage()
+				</div>
+
+				<div id="menu">
+					#makeProject()
+				</div>
+
+				<div id="body">
+					#set ($allSections = $root.getChild("body").getChildren("section"))
+					#foreach ( $section in $allSections )
+						#section ($section)
+					#end
+				</div>
+
+				<div id="footer">
+					Copyright &#169; 1999-2007, <a href="http://www.apache.org/">The Apache Software Foundation</a>.
+				</div>
+
+			</div>
+
+        </body>
+    </html>
+#end

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/velocity.properties
----------------------------------------------------------------------
diff --git a/docs/velocity.properties b/docs/velocity.properties
new file mode 100644
index 0000000..77ee2de
--- /dev/null
+++ b/docs/velocity.properties
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+runtime.log=build/docs/velocity.log

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/xdocs/index.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/index.xml b/docs/xdocs/index.xml
new file mode 100644
index 0000000..f1df3fa
--- /dev/null
+++ b/docs/xdocs/index.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.    
+-->
+<document>
+  <properties>
+    <title>Hadoop Hive</title>
+    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
+  </properties>
+  <body>
+    <section name="What is Hive?" href="WhatisHive?">
+      <p>Hive is a data warehouse infrastructure built on top of Hadoop. It provides tools to enable easy data ETL, a mechanism to put structures on the data, and the capability to querying and analysis of large data sets stored in Hadoop files. Hive defines a simple SQL-like query language, called QL, that enables users familiar with SQL to query the data. At the same time, this language also allows programmers who are familiar with the MapReduce fromwork to be able to plug in their custom mappers and reducers to perform more sophisticated analysis that may not be supported by the built-in capabilities of the language.</p>
+
+<p>
+Hive does not mandate read or written data be in the "Hive format"---there is no such thing. Hive works equally well on Thrift, control delimited, or your specialized data formats. Please see File Format and SerDe in Developer Guide for details. </p>
+    </section>
+    <section name="What Hive is NOT" href="WhatHiveIsNot?">
+<p>Hive is based on Hadoop, which is a batch processing system. As a result, Hive does not and cannot promise low latencies on queries. The paradigm here is strictly of submitting jobs and being notified when the jobs are completed as opposed to real-time queries. In contrast to the systems such as Oracle where analysis is run on a significantly smaller amount of data, but the analysis proceeds much more iteratively with the response times between iterations being less than a few minutes, Hive queries response times for even the smallest jobs can be of the order of several minutes. However for larger jobs (e.g., jobs processing terabytes of data) in general they may run into hours.</p>
+
+<p>In summary, low latency performance is not the top-priority of Hive's design principles. What Hive values most are scalability (scale out with more machines added dynamically to the Hadoop cluster), extensibility (with MapReduce framework and UDF/UDAF/UDTF), fault-tolerance, and loose-coupling with its input formats.</p>
+    </section>
+  </body>
+</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/xdocs/language_manual/cli.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/cli.xml b/docs/xdocs/language_manual/cli.xml
new file mode 100644
index 0000000..aaa8e81
--- /dev/null
+++ b/docs/xdocs/language_manual/cli.xml
@@ -0,0 +1,208 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<document>
+
+  <properties>
+    <title>Hadoop Hive- Command Line Interface (CLI)</title>
+    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
+  </properties>
+
+  <body>
+<h3>Hive Cli</h3>
+<section name="Hive Command line Options" href="command_line_options">
+
+<p>Usage:</p>
+
+<source><![CDATA[Usage: hive [-hiveconf x=y]* [<-i filename>]* [<-f filename>|<-e query-string>] [-S]
+
+  -i <filename>             Initialization Sql from file (executed automatically and silently before any other commands)
+  -e 'quoted query string'  Sql from command line
+  -f <filename>             Sql from file
+  -S                        Silent mode in interactive shell where only data is emitted
+  -hiveconf x=y             Use this to set hive/hadoop configuration variables. 
+  
+   -e and -f cannot be specified together. In the absence of these options, interactive shell is started.  However, -i can be used with any other options.
+
+   To see this usage help, run hive -h
+]]></source>
+
+<ul>
+<li>Example of running a Query from the command line
+<source><![CDATA[$HIVE_HOME/bin/hive -e 'select a.col from tab1 a'
+   ]]></source>
+</li>
+
+<li>Example of setting hive configuration variables 
+<source><![CDATA[$HIVE_HOME/bin/hive -e 'select a.col from tab1 a' -hiveconf hive.exec.scratchdir=/home/my/hive_scratch  -hiveconf mapred.reduce.tasks=32
+   ]]></source>
+</li>
+
+<li>Example of dumping data out from a query into a file using silent mode 
+<source><![CDATA[$HIVE_HOME/bin/hive -S -e 'select a.col from tab1 a' > a.txt
+]]></source>
+</li>
+
+<li>Example of running a script non-interactively
+<source><![CDATA[$HIVE_HOME/bin/hive -f /home/my/hive-script.sql
+]]></source>
+</li>
+
+<li>Example of running an initialization script before entering interactive mode 
+<source><![CDATA[$HIVE_HOME/bin/hive -i /home/my/hive-init.sql
+]]></source>
+</li>
+
+</ul>
+</section>
+
+<section name="hiverc file " href="hiverc">
+<p>
+The cli when invoked without the -i option will attempt to load HIVE_HOME/bin/.hiverc and $HOME/.hiverc as initialization files.
+</p>
+</section>
+
+<section name="Hive interactive shell commands" href="hive_interactive_shell_commands">
+When $HIVE_HOME/bin/hive is run without either -e/-f option it enters interactive shell mode.
+
+Use ";" (semicolon) to terminate commands. Comments in scripts can be specified using the "--" prefix. 
+
+<table border="1">
+
+<tr>
+<td><b>Command</b></td>
+<td><b>Description</b></td>
+</tr>
+
+<tr>
+<td>quit</td>
+<td>Use quit or exit to leave the interactive shell.</td>
+</tr>
+
+<tr>
+<td>set key=value</td>
+<td>Use this to set value of particular configuration variable. One thing to note here is that if you misspell the variable name, cli will not show an error.</td>
+</tr>
+
+<tr>
+<td>set</td>
+<td>This will print a list of configuration variables that are overridden by user or hive.</td>
+</tr>
+
+
+<tr>
+<td>set -v </td>
+<td>This will print all hadoop and hive configuration variables.</td>
+</tr>
+
+
+<tr>
+<td>add FILE [file] [file]*</td>
+<td>Adds a file to the list of resources</td>
+</tr>
+
+<tr>
+<td>list FILE</td>
+<td>list all the files added to the distributed cache</td>
+</tr>
+
+<tr>
+<td>list FILE [file]*</td>
+<td>Check if given resources are already added to distributed cache</td>
+</tr>
+
+<tr>
+<td>! [cmd]</td>
+<td>Executes a shell command from the hive shell</td>
+</tr>
+
+<tr>
+<td>dfs [dfs cmd]</td>
+<td>Executes a dfs command from the hive shell</td>
+</tr>
+
+<tr>
+<td>[query]</td>
+<td>Executes a hive query and prints results to standard out</td>
+</tr>
+
+<tr>
+<td>source FILE</td>
+<td>Used to execute a script file inside the CLI.</td>
+</tr>
+
+</table>
+
+Sample Usage:
+
+<source><![CDATA[  hive> set  mapred.reduce.tasks=32;
+  hive> set;
+  hive> select a.* from tab1;
+  hive> !ls;
+  hive> dfs -ls;
+]]></source>
+
+</section>
+
+<section name="Logging" href="logging">
+<p>
+Hive uses log4j for logging. These logs are not emitted to the standard output by default but are instead captured to a log file specified by Hive's log4j properties file. By default Hive will use <i>hive-log4j2.properties</i> in the <i>conf/</i> directory of the hive installation which writes out logs to <i>/tmp/$USER/hive.log</i> and uses the <i>WARN</i> level.
+</p>
+<p>
+It is often desirable to emit the logs to the standard output and/or change the logging level for debugging purposes. These can be done from the command line as follows: </p>
+
+<source><![CDATA[$HIVE_HOME/bin/hive -hiveconf hive.root.logger=INFO,console ]]></source>
+<p>
+<i>hive.root.logger</i> specifies the logging level as well as the log destination. Specifying console as the target sends the logs to the standard error (instead of the log file).
+</p>
+</section>
+
+<section name="Hive Resources" href="Hive Resources">
+<p>
+Hive can manage the addition of resources to a session where those resources need to be made available at query execution time. Any locally accessible file can be added to the session. Once a file is added to a session, hive query can refer to this file by its name (in map/reduce/transform clauses) and this file is available locally at execution time on the entire hadoop cluster. Hive uses Hadoop's Distributed Cache to distribute the added files to all the machines in the cluster at query execution time.</p>
+
+<source><![CDATA[   ADD { FILE[S] | JAR[S] | ARCHIVE[S] } <filepath1> [<filepath2>]*
+   LIST { FILE[S] | JAR[S] | ARCHIVE[S] } [<filepath1> <filepath2> ..]
+   DELETE { FILE[S] | JAR[S] | ARCHIVE[S] } [<filepath1> <filepath2> ..] ]]></source>
+
+<ul>
+<li>FILE resources are just added to the distributed cache. Typically, this might be something like a transform script to be executed.</li>
+<li>JAR resources are also added to the Java classpath. This is required in order to reference objects they contain such as UDF's. </li>
+<li>ARCHIVE resources are automatically unarchived as part of distributing them.  </li>
+</ul>
+
+<p>Example</p>
+
+<source><![CDATA[hive> add FILE /tmp/tt.py;
+hive> list FILES;
+/tmp/tt.py
+hive> from networks a  MAP a.networkid USING 'python tt.py' as nn where a.ds = '2009-01-04' limit  10; ]]></source>
+
+<p>It is not neccessary to add files to the session if the files used in a transform script are already available on all machines in the hadoop cluster using the same path name. For example: </p>
+
+<ul>
+<li>... MAP a.networkid USING 'wc -l' ...: here wc is an executable available on all machines</li>
+<li>... MAP a.networkid USING '/home/nfsserv1/hadoopscripts/tt.py' ...: here tt.py may be accessible via a nfs mount point that's configured identically on all the cluster nodes. </li>
+</ul>
+
+
+</section>
+</body>
+</document>


[11/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
deleted file mode 100644
index 7beee42..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
+++ /dev/null
@@ -1,356 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.cache;
-
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.cache.CachedStore.PartitionWrapper;
-import org.apache.hadoop.hive.metastore.cache.CachedStore.StorageDescriptorWrapper;
-import org.apache.hadoop.hive.metastore.cache.CachedStore.TableWrapper;
-import org.apache.hadoop.hive.metastore.hbase.HBaseUtils;
-import org.apache.hive.common.util.HiveStringUtils;
-
-import com.google.common.annotations.VisibleForTesting;
-
-public class SharedCache {
-  private static Map<String, Database> databaseCache = new TreeMap<String, Database>();
-  private static Map<String, TableWrapper> tableCache = new TreeMap<String, TableWrapper>();
-  private static Map<String, PartitionWrapper> partitionCache = new TreeMap<String, PartitionWrapper>();
-  private static Map<String, ColumnStatisticsObj> partitionColStatsCache = new TreeMap<String, ColumnStatisticsObj>();
-  private static Map<ByteArrayWrapper, StorageDescriptorWrapper> sdCache = new HashMap<ByteArrayWrapper, StorageDescriptorWrapper>();
-  private static MessageDigest md;
-
-  static {
-    try {
-      md = MessageDigest.getInstance("MD5");
-    } catch (NoSuchAlgorithmException e) {
-      throw new RuntimeException("should not happen", e);
-    }
-  }
-
-  public static synchronized Database getDatabaseFromCache(String name) {
-    return databaseCache.get(name)!=null?databaseCache.get(name).deepCopy():null;
-  }
-
-  public static synchronized void addDatabaseToCache(String dbName, Database db) {
-    Database dbCopy = db.deepCopy();
-    dbCopy.setName(HiveStringUtils.normalizeIdentifier(dbName));
-    databaseCache.put(dbName, dbCopy);
-  }
-
-  public static synchronized void removeDatabaseFromCache(String dbName) {
-    databaseCache.remove(dbName);
-  }
-
-  public static synchronized List<String> listCachedDatabases() {
-    return new ArrayList<String>(databaseCache.keySet());
-  }
-
-  public static synchronized void alterDatabaseInCache(String dbName, Database newDb) {
-    removeDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbName));
-    addDatabaseToCache(HiveStringUtils.normalizeIdentifier(newDb.getName()), newDb.deepCopy());
-  }
-
-  public static synchronized int getCachedDatabaseCount() {
-    return databaseCache.size();
-  }
-
-  public static synchronized Table getTableFromCache(String dbName, String tableName) {
-    TableWrapper tblWrapper = tableCache.get(CacheUtils.buildKey(dbName, tableName));
-    if (tblWrapper == null) {
-      return null;
-    }
-    Table t = CacheUtils.assemble(tblWrapper);
-    return t;
-  }
-
-  public static synchronized void addTableToCache(String dbName, String tblName, Table tbl) {
-    Table tblCopy = tbl.deepCopy();
-    tblCopy.setDbName(HiveStringUtils.normalizeIdentifier(dbName));
-    tblCopy.setTableName(HiveStringUtils.normalizeIdentifier(tblName));
-    for (FieldSchema fs : tblCopy.getPartitionKeys()) {
-      fs.setName(HiveStringUtils.normalizeIdentifier(fs.getName()));
-    }
-    TableWrapper wrapper;
-    if (tbl.getSd()!=null) {
-      byte[] sdHash = HBaseUtils.hashStorageDescriptor(tbl.getSd(), md);
-      StorageDescriptor sd = tbl.getSd();
-      increSd(sd, sdHash);
-      tblCopy.setSd(null);
-      wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters());
-    } else {
-      wrapper = new TableWrapper(tblCopy, null, null, null);
-    }
-    tableCache.put(CacheUtils.buildKey(dbName, tblName), wrapper);
-  }
-
-  public static synchronized void removeTableFromCache(String dbName, String tblName) {
-    TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildKey(dbName, tblName));
-    byte[] sdHash = tblWrapper.getSdHash();
-    if (sdHash!=null) {
-      decrSd(sdHash);
-    }
-  }
-
-  public static synchronized void alterTableInCache(String dbName, String tblName, Table newTable) {
-    removeTableFromCache(dbName, tblName);
-    addTableToCache(HiveStringUtils.normalizeIdentifier(newTable.getDbName()),
-        HiveStringUtils.normalizeIdentifier(newTable.getTableName()), newTable);
-    if (!dbName.equals(newTable.getDbName()) || !tblName.equals(newTable.getTableName())) {
-      List<Partition> partitions = listCachedPartitions(dbName, tblName, -1);
-      for (Partition part : partitions) {
-        removePartitionFromCache(part.getDbName(), part.getTableName(), part.getValues());
-        part.setDbName(HiveStringUtils.normalizeIdentifier(newTable.getDbName()));
-        part.setTableName(HiveStringUtils.normalizeIdentifier(newTable.getTableName()));
-        addPartitionToCache(HiveStringUtils.normalizeIdentifier(newTable.getDbName()),
-            HiveStringUtils.normalizeIdentifier(newTable.getTableName()), part);
-      }
-    }
-  }
-
-  public static synchronized int getCachedTableCount() {
-    return tableCache.size();
-  }
-
-  public static synchronized List<Table> listCachedTables(String dbName) {
-    List<Table> tables = new ArrayList<Table>();
-    for (TableWrapper wrapper : tableCache.values()) {
-      if (wrapper.getTable().getDbName().equals(dbName)) {
-        tables.add(CacheUtils.assemble(wrapper));
-      }
-    }
-    return tables;
-  }
-
-  public static synchronized void updateTableColumnStatistics(String dbName, String tableName,
-      List<ColumnStatisticsObj> statsObjs) {
-    Table tbl = getTableFromCache(dbName, tableName);
-    tbl.getSd().getParameters();
-    List<String> colNames = new ArrayList<>();
-    for (ColumnStatisticsObj statsObj:statsObjs) {
-      colNames.add(statsObj.getColName());
-    }
-    StatsSetupConst.setColumnStatsState(tbl.getParameters(), colNames);
-    alterTableInCache(dbName, tableName, tbl);
-  }
-
-  public static synchronized List<TableMeta> getTableMeta(String dbNames, String tableNames, List<String> tableTypes) {
-    List<TableMeta> tableMetas = new ArrayList<TableMeta>();
-    for (String dbName : listCachedDatabases()) {
-      if (CacheUtils.matches(dbName, dbNames)) {
-        for (Table table : listCachedTables(dbName)) {
-          if (CacheUtils.matches(table.getTableName(), tableNames)) {
-            if (tableTypes==null || tableTypes.contains(table.getTableType())) {
-              TableMeta metaData = new TableMeta(
-                  dbName, table.getTableName(), table.getTableType());
-                metaData.setComments(table.getParameters().get("comment"));
-                tableMetas.add(metaData);
-            }
-          }
-        }
-      }
-    }
-    return tableMetas;
-  }
-
-  public static synchronized void addPartitionToCache(String dbName, String tblName, Partition part) {
-    Partition partCopy = part.deepCopy();
-    PartitionWrapper wrapper;
-    if (part.getSd()!=null) {
-      byte[] sdHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md);
-      StorageDescriptor sd = part.getSd();
-      increSd(sd, sdHash);
-      partCopy.setSd(null);
-      wrapper = new PartitionWrapper(partCopy, sdHash, sd.getLocation(), sd.getParameters());
-    } else {
-      wrapper = new PartitionWrapper(partCopy, null, null, null);
-    }
-    partitionCache.put(CacheUtils.buildKey(dbName, tblName, part.getValues()), wrapper);
-  }
-
-  public static synchronized Partition getPartitionFromCache(String key) {
-    PartitionWrapper wrapper = partitionCache.get(key);
-    if (wrapper == null) {
-      return null;
-    }
-    Partition p = CacheUtils.assemble(wrapper);
-    return p;
-  }
-
-  public static synchronized Partition getPartitionFromCache(String dbName, String tblName, List<String> part_vals) {
-    return getPartitionFromCache(CacheUtils.buildKey(dbName, tblName, part_vals));
-  }
-
-  public static synchronized boolean existPartitionFromCache(String dbName, String tblName, List<String> part_vals) {
-    return partitionCache.containsKey(CacheUtils.buildKey(dbName, tblName, part_vals));
-  }
-
-  public static synchronized Partition removePartitionFromCache(String dbName, String tblName, List<String> part_vals) {
-    PartitionWrapper wrapper = partitionCache.remove(CacheUtils.buildKey(dbName, tblName, part_vals));
-    if (wrapper.getSdHash()!=null) {
-      decrSd(wrapper.getSdHash());
-    }
-    return wrapper.getPartition();
-  }
-
-  public static synchronized List<Partition> listCachedPartitions(String dbName, String tblName, int max) {
-    List<Partition> partitions = new ArrayList<Partition>();
-    int count = 0;
-    for (PartitionWrapper wrapper : partitionCache.values()) {
-      if (wrapper.getPartition().getDbName().equals(dbName)
-          && wrapper.getPartition().getTableName().equals(tblName)
-          && (max == -1 || count < max)) {
-        partitions.add(CacheUtils.assemble(wrapper));
-        count++;
-      }
-    }
-    return partitions;
-  }
-
-  public static synchronized void alterPartitionInCache(String dbName, String tblName, List<String> partVals, Partition newPart) {
-    removePartitionFromCache(dbName, tblName, partVals);
-    addPartitionToCache(HiveStringUtils.normalizeIdentifier(newPart.getDbName()),
-        HiveStringUtils.normalizeIdentifier(newPart.getTableName()), newPart);
-  }
-
-  public static synchronized void updatePartitionColumnStatistics(String dbName, String tableName,
-      List<String> partVals, List<ColumnStatisticsObj> statsObjs) {
-    Partition part = getPartitionFromCache(dbName, tableName, partVals);
-    part.getSd().getParameters();
-    List<String> colNames = new ArrayList<>();
-    for (ColumnStatisticsObj statsObj:statsObjs) {
-      colNames.add(statsObj.getColName());
-    }
-    StatsSetupConst.setColumnStatsState(part.getParameters(), colNames);
-    alterPartitionInCache(dbName, tableName, partVals, part);
-  }
-
-  public static synchronized int getCachedPartitionCount() {
-    return partitionCache.size();
-  }
-
-  public static synchronized ColumnStatisticsObj getCachedPartitionColStats(String key) {
-    return partitionColStatsCache.get(key);
-  }
-
-  public static synchronized void addPartitionColStatsToCache(Map<String, ColumnStatisticsObj> aggrStatsPerPartition) {
-    partitionColStatsCache.putAll(aggrStatsPerPartition);
-  }
-
-
-  public static void increSd(StorageDescriptor sd, byte[] sdHash) {
-    ByteArrayWrapper byteArray = new ByteArrayWrapper(sdHash);
-    if (sdCache.containsKey(byteArray)) {
-      sdCache.get(byteArray).refCount++;
-    } else {
-      StorageDescriptor sdToCache = sd.deepCopy();
-      sdToCache.setLocation(null);
-      sdToCache.setParameters(null);
-      sdCache.put(byteArray, new StorageDescriptorWrapper(sdToCache, 1));
-    }
-  }
-
-  public static void decrSd(byte[] sdHash) {
-    ByteArrayWrapper byteArray = new ByteArrayWrapper(sdHash);
-    StorageDescriptorWrapper sdWrapper = sdCache.get(byteArray);
-    sdWrapper.refCount--;
-    if (sdWrapper.getRefCount() == 0) {
-      sdCache.remove(byteArray);
-    }
-  }
-
-  public static StorageDescriptor getSdFromCache(byte[] sdHash) {
-    StorageDescriptorWrapper sdWrapper = sdCache.get(new ByteArrayWrapper(sdHash));
-    return sdWrapper.getSd();
-  }
-
-  // Replace databases in databaseCache with the new list
-  public static synchronized void refreshDatabases(List<Database> databases) {
-    for (String dbName : listCachedDatabases()) {
-      removeDatabaseFromCache(dbName);
-    }
-    for (Database db : databases) {
-      addDatabaseToCache(db.getName(), db);
-    }
-  }
-
-  // Replace tables in tableCache with the new list
-  public static synchronized void refreshTables(String dbName, List<Table> tables) {
-    for (Table tbl : listCachedTables(dbName)) {
-      removeTableFromCache(dbName, tbl.getTableName());
-    }
-    for (Table tbl : tables) {
-      addTableToCache(dbName, tbl.getTableName(), tbl);
-    }
-  }
-
-  public static void refreshPartitions(String dbName, String tblName, List<Partition> partitions) {
-    List<String> keysToRemove = new ArrayList<String>();
-    for (Map.Entry<String, PartitionWrapper> entry : partitionCache.entrySet()) {
-      if (entry.getValue().getPartition().getDbName().equals(dbName)
-          && entry.getValue().getPartition().getTableName().equals(tblName)) {
-        keysToRemove.add(entry.getKey());
-      }
-    }
-    for (String key : keysToRemove) {
-      partitionCache.remove(key);
-    }
-    for (Partition part : partitions) {
-      addPartitionToCache(dbName, tblName, part);
-    }
-  }
-
-  @VisibleForTesting
-  static Map<String, Database> getDatabaseCache() {
-    return databaseCache;
-  }
-
-  @VisibleForTesting
-  static Map<String, TableWrapper> getTableCache() {
-    return tableCache;
-  }
-
-  @VisibleForTesting
-  static Map<String, PartitionWrapper> getPartitionCache() {
-    return partitionCache;
-  }
-
-  @VisibleForTesting
-  static Map<ByteArrayWrapper, StorageDescriptorWrapper> getSdCache() {
-    return sdCache;
-  }
-
-  @VisibleForTesting
-  static Map<String, ColumnStatisticsObj> getPartitionColStatsCache() {
-    return partitionColStatsCache;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterPartitionEvent.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterPartitionEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterPartitionEvent.java
index e5b8495..8edb50b 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterPartitionEvent.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterPartitionEvent.java
@@ -27,15 +27,13 @@ public class AlterPartitionEvent extends ListenerEvent {
   private final Partition oldPart;
   private final Partition newPart;
   private final Table table;
-  private final boolean isTruncateOp;
 
-  public AlterPartitionEvent(Partition oldPart, Partition newPart, Table table, boolean isTruncateOp,
-                             boolean status, HMSHandler handler) {
+  public AlterPartitionEvent(Partition oldPart, Partition newPart, Table table,
+      boolean status, HMSHandler handler) {
     super(status, handler);
     this.oldPart = oldPart;
     this.newPart = newPart;
     this.table = table;
-    this.isTruncateOp = isTruncateOp;
   }
 
   /**
@@ -60,12 +58,4 @@ public class AlterPartitionEvent extends ListenerEvent {
   public Table getTable() {
     return table;
   }
-
-  /**
-   * Get the truncate table flag
-   * @return
-   */
-  public boolean getIsTruncateOp() {
-    return isTruncateOp;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterTableEvent.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterTableEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterTableEvent.java
index 22ea513..4d6dce2 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterTableEvent.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterTableEvent.java
@@ -26,13 +26,10 @@ public class AlterTableEvent extends ListenerEvent {
 
   private final Table newTable;
   private final Table oldTable;
-  private final boolean isTruncateOp;
-
-  public AlterTableEvent (Table oldTable, Table newTable, boolean isTruncateOp, boolean status, HMSHandler handler) {
+  public AlterTableEvent (Table oldTable, Table newTable, boolean status, HMSHandler handler) {
     super (status, handler);
     this.oldTable = oldTable;
     this.newTable = newTable;
-    this.isTruncateOp = isTruncateOp;
   }
 
   /**
@@ -48,11 +45,4 @@ public class AlterTableEvent extends ListenerEvent {
   public Table getNewTable() {
     return newTable;
   }
-
-  /**
-   * @return the flag for truncate
-   */
-  public boolean getIsTruncateOp() {
-    return isTruncateOp;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
index dff1195..7bc0e04 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java
@@ -38,7 +38,6 @@ public class InsertEvent extends ListenerEvent {
   private final String db;
   private final String table;
   private final Map<String, String> keyValues;
-  private final boolean replace;
   private final List<String> files;
   private List<String> fileChecksums = new ArrayList<String>();
 
@@ -57,9 +56,6 @@ public class InsertEvent extends ListenerEvent {
     super(status, handler);
     this.db = db;
     this.table = table;
-
-    // If replace flag is not set by caller, then by default set it to true to maintain backward compatibility
-    this.replace = (insertData.isSetReplace() ? insertData.isReplace() : true);
     this.files = insertData.getFilesAdded();
     GetTableRequest req = new GetTableRequest(db, table);
     req.setCapabilities(HiveMetaStoreClient.TEST_VERSION);
@@ -94,13 +90,6 @@ public class InsertEvent extends ListenerEvent {
   }
 
   /**
-   * @return The replace flag.
-   */
-  public boolean isReplace() {
-    return replace;
-  }
-
-  /**
    * Get list of files created as a result of this DML operation
    *
    * @return list of new files

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
index b741549..62aeb8c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
@@ -21,18 +21,10 @@ package org.apache.hadoop.hive.metastore.events;
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
 
-import javax.annotation.concurrent.NotThreadSafe;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
 /**
  * Base class for all the events which are defined for metastore.
- *
- * This class is not thread-safe and not expected to be called in parallel.
  */
 
-@NotThreadSafe
 public abstract class ListenerEvent {
 
   /**
@@ -41,26 +33,6 @@ public abstract class ListenerEvent {
   private final boolean status;
   private final HMSHandler handler;
 
-  /**
-   * Key/value parameters used by listeners to store notifications results
-   * i.e. DbNotificationListener sets a DB_NOTIFICATION_EVENT_ID.
-   *
-   * NotThreadSafe: The parameters map is not expected to be access in parallel by Hive, so keep it thread-unsafe
-   * to avoid locking overhead.
-   */
-  private Map<String, String> parameters;
-
-  /** For performance concerns, it is preferable to cache the unmodifiable parameters variable that will be returned on the
-   * {@link #getParameters()} method. It is expected that {@link #putParameter(String, String)} is called less times
-   * than {@link #getParameters()}, so performance may be better by using this cache.
-   */
-  private Map<String, String> unmodifiableParameters;
-
-  // Listener parameters aren't expected to have many values. So far only
-  // DbNotificationListener will add a parameter; let's set a low initial capacity for now.
-  // If we find out many parameters are added, then we can adjust or remove this initial capacity.
-  private static final int PARAMETERS_INITIAL_CAPACITY = 1;
-
   // Properties passed by the client, to be used in execution hooks.
   private EnvironmentContext environmentContext = null;
 
@@ -68,8 +40,6 @@ public abstract class ListenerEvent {
     super();
     this.status = status;
     this.handler = handler;
-    this.parameters = new HashMap<>(PARAMETERS_INITIAL_CAPACITY);
-    updateUnmodifiableParameters();
   }
 
   /**
@@ -79,12 +49,6 @@ public abstract class ListenerEvent {
     return status;
   }
 
-  /**
-   * Set the environment context of the event.
-   *
-   * @param environmentContext An EnvironmentContext object that contains environment parameters sent from
-   *                           the HMS client.
-   */
   public void setEnvironmentContext(EnvironmentContext environmentContext) {
     this.environmentContext = environmentContext;
   }
@@ -102,74 +66,4 @@ public abstract class ListenerEvent {
   public HMSHandler getHandler() {
     return handler;
   }
-
-  /**
-   * Return all parameters of the listener event. Parameters are read-only (unmodifiable map). If a new parameter
-   * must be added, please use the putParameter() method.
-   *
-   *
-   * @return A map object with all parameters.
-   */
-  public final Map<String, String> getParameters() {
-    return unmodifiableParameters;
-  }
-
-  /**
-   * Put a new parameter to the listener event.
-   *
-   * Overridden parameters is not allowed, and an exception may be thrown to avoid a mis-configuration
-   * between listeners setting the same parameters.
-   *
-   * @param name Name of the parameter.
-   * @param value Value of the parameter.
-   * @throws IllegalStateException if a parameter already exists.
-   */
-  public void putParameter(String name, String value) {
-    putParameterIfAbsent(name, value);
-    updateUnmodifiableParameters();
-  }
-
-  /**
-   * Put a new set the parameters to the listener event.
-   *
-   * Overridden parameters is not allowed, and an exception may be thrown to avoid a mis-configuration
-   * between listeners setting the same parameters.
-   *
-   * @param parameters A Map object with the a set of parameters.
-   * @throws IllegalStateException if a parameter already exists.
-   */
-  public void putParameters(final Map<String, String> parameters) {
-    if (parameters != null) {
-      for (Map.Entry<String, String> entry : parameters.entrySet()) {
-        putParameterIfAbsent(entry.getKey(), entry.getValue());
-      }
-
-      updateUnmodifiableParameters();
-    }
-  }
-
-  /**
-   * Put a parameter to the listener event only if the parameter is absent.
-   *
-   * Overridden parameters is not allowed, and an exception may be thrown to avoid a mis-configuration
-   * between listeners setting the same parameters.
-   *
-   * @param name Name of the parameter.
-   * @param value Value of the parameter.
-   * @throws IllegalStateException if a parameter already exists.
-   */
-  private void putParameterIfAbsent(String name, String value) {
-    if (parameters.containsKey(name)) {
-      throw new IllegalStateException("Invalid attempt to overwrite a read-only parameter: " + name);
-    }
-
-    parameters.put(name, value);
-  }
-
-  /**
-   * Keeps a cache of unmodifiable parameters returned by the getParameters() method.
-   */
-  private void updateUnmodifiableParameters() {
-    unmodifiableParameters = Collections.unmodifiableMap(parameters);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index 945e99e..1340645 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -2708,8 +2708,6 @@ public class HBaseStore implements RawStore {
 
   @Override
   public List<SQLPrimaryKey> getPrimaryKeys(String db_name, String tbl_name) throws MetaException {
-    db_name = HiveStringUtils.normalizeIdentifier(db_name);
-    tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name);
     boolean commit = false;
     openTransaction();
     try {
@@ -2728,10 +2726,6 @@ public class HBaseStore implements RawStore {
   public List<SQLForeignKey> getForeignKeys(String parent_db_name, String parent_tbl_name,
                                             String foreign_db_name, String foreign_tbl_name)
       throws MetaException {
-    parent_db_name = parent_db_name!=null?HiveStringUtils.normalizeIdentifier(parent_db_name):null;
-    parent_tbl_name = parent_tbl_name!=null?HiveStringUtils.normalizeIdentifier(parent_tbl_name):null;
-    foreign_db_name = HiveStringUtils.normalizeIdentifier(foreign_db_name);
-    foreign_tbl_name = HiveStringUtils.normalizeIdentifier(foreign_tbl_name);
     boolean commit = false;
     openTransaction();
     try {
@@ -2776,9 +2770,6 @@ public class HBaseStore implements RawStore {
     // This is something of pain, since we have to search both primary key and foreign key to see
     // which they want to drop.
     boolean commit = false;
-    dbName = HiveStringUtils.normalizeIdentifier(dbName);
-    tableName = HiveStringUtils.normalizeIdentifier(tableName);
-    constraintName = HiveStringUtils.normalizeIdentifier(constraintName);
     openTransaction();
     try {
       List<SQLPrimaryKey> pk = getHBase().getPrimaryKey(dbName, tableName);
@@ -2818,12 +2809,6 @@ public class HBaseStore implements RawStore {
   @Override
   public void addPrimaryKeys(List<SQLPrimaryKey> pks) throws InvalidObjectException, MetaException {
     boolean commit = false;
-    for (SQLPrimaryKey pk : pks) {
-      pk.setTable_db(HiveStringUtils.normalizeIdentifier(pk.getTable_db()));
-      pk.setTable_name(HiveStringUtils.normalizeIdentifier(pk.getTable_name()));
-      pk.setColumn_name(HiveStringUtils.normalizeIdentifier(pk.getColumn_name()));
-      pk.setPk_name(HiveStringUtils.normalizeIdentifier(pk.getPk_name()));
-    }
     openTransaction();
     try {
       List<SQLPrimaryKey> currentPk =
@@ -2845,13 +2830,6 @@ public class HBaseStore implements RawStore {
   @Override
   public void addForeignKeys(List<SQLForeignKey> fks) throws InvalidObjectException, MetaException {
     boolean commit = false;
-    for (SQLForeignKey fk : fks) {
-      fk.setPktable_db(HiveStringUtils.normalizeIdentifier(fk.getPktable_db()));
-      fk.setPktable_name(HiveStringUtils.normalizeIdentifier(fk.getPktable_name()));
-      fk.setFktable_db(HiveStringUtils.normalizeIdentifier(fk.getFktable_db()));
-      fk.setFktable_name(HiveStringUtils.normalizeIdentifier(fk.getFktable_name()));
-      fk.setFk_name(HiveStringUtils.normalizeIdentifier(fk.getFk_name()));
-    }
     openTransaction();
     try {
       // Fetch the existing keys (if any) and add in these new ones
@@ -2870,13 +2848,6 @@ public class HBaseStore implements RawStore {
   }
 
   @Override
-  public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
-      String tableName) throws MetaException, NoSuchObjectException {
-    // TODO: see if it makes sense to implement this here
-    return null;
-  }
-
-  @Override
   public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
     // TODO: Auto-generated method stub
     throw new UnsupportedOperationException();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
index 3172f92..94087b1 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
@@ -619,7 +619,7 @@ public class HBaseUtils {
    * @param md message descriptor to use to generate the hash
    * @return the hash as a byte array
    */
-  public static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest md)  {
+  static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest md)  {
     // Note all maps and lists have to be absolutely sorted.  Otherwise we'll produce different
     // results for hashes based on the OS or JVM being used.
     md.reset();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AlterPartitionMessage.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AlterPartitionMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AlterPartitionMessage.java
index e9ed7e5..ed6080b 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AlterPartitionMessage.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AlterPartitionMessage.java
@@ -31,8 +31,6 @@ public abstract class AlterPartitionMessage extends EventMessage {
 
   public abstract String getTable();
 
-  public abstract boolean getIsTruncateOp();
-
   public abstract Map<String,String> getKeyValues();
 
   public abstract Table getTableObj() throws Exception;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AlterTableMessage.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AlterTableMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AlterTableMessage.java
index 39a87bc..5487123 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AlterTableMessage.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AlterTableMessage.java
@@ -28,8 +28,6 @@ public abstract class AlterTableMessage extends EventMessage {
 
   public abstract String getTable();
 
-  public abstract boolean getIsTruncateOp();
-
   public abstract Table getTableObjBefore() throws Exception;
 
   public abstract Table getTableObjAfter() throws Exception;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
index 8205c25..a5414d1 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.metastore.messaging;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter;
 import org.apache.thrift.TException;
 
 import java.io.IOException;
@@ -31,10 +30,88 @@ import java.util.List;
 
 public class EventUtils {
 
+  /**
+   * Utility function that constructs a notification filter to match a given db name and/or table name.
+   * If dbName == null, fetches all warehouse events.
+   * If dnName != null, but tableName == null, fetches all events for the db
+   * If dbName != null && tableName != null, fetches all events for the specified table
+   * @param dbName
+   * @param tableName
+   * @return
+   */
+  public static IMetaStoreClient.NotificationFilter getDbTblNotificationFilter(final String dbName, final String tableName){
+    return new IMetaStoreClient.NotificationFilter() {
+      @Override
+      public boolean accept(NotificationEvent event) {
+        if (event == null){
+          return false; // get rid of trivial case first, so that we can safely assume non-null
+        }
+        if (dbName == null){
+          return true; // if our dbName is null, we're interested in all wh events
+        }
+        if (dbName.equalsIgnoreCase(event.getDbName())){
+          if ( (tableName == null)
+              // if our dbName is equal, but tableName is blank, we're interested in this db-level event
+              || (tableName.equalsIgnoreCase(event.getTableName()))
+            // table level event that matches us
+              ){
+            return true;
+          }
+        }
+        return false;
+      }
+    };
+  }
+
+  public static IMetaStoreClient.NotificationFilter restrictByMessageFormat(final String messageFormat){
+    return new IMetaStoreClient.NotificationFilter() {
+      @Override
+      public boolean accept(NotificationEvent event) {
+        if (event == null){
+          return false; // get rid of trivial case first, so that we can safely assume non-null
+        }
+        if (messageFormat == null){
+          return true; // let's say that passing null in will not do any filtering.
+        }
+        if (messageFormat.equalsIgnoreCase(event.getMessageFormat())){
+          return true;
+        }
+        return false;
+      }
+    };
+  }
+
+  public static IMetaStoreClient.NotificationFilter getEventBoundaryFilter(final Long eventFrom, final Long eventTo){
+    return new IMetaStoreClient.NotificationFilter() {
+      @Override
+      public boolean accept(NotificationEvent event) {
+        if ( (event == null) || (event.getEventId() < eventFrom) || (event.getEventId() > eventTo)) {
+          return false;
+        }
+        return true;
+      }
+    };
+  }
+
+  public static IMetaStoreClient.NotificationFilter andFilter(
+      final IMetaStoreClient.NotificationFilter... filters ) {
+    return new IMetaStoreClient.NotificationFilter() {
+      @Override
+      public boolean accept(NotificationEvent event) {
+        for (IMetaStoreClient.NotificationFilter filter : filters){
+          if (!filter.accept(event)){
+            return false;
+          }
+        }
+        return true;
+      }
+    };
+  }
+
   public interface NotificationFetcher {
-    int getBatchSize() throws IOException;
-    long getCurrentNotificationEventId() throws IOException;
-    List<NotificationEvent> getNextNotificationEvents(
+    public int getBatchSize() throws IOException;
+    public long getCurrentNotificationEventId() throws IOException;
+    public List<NotificationEvent> getNextNotificationEvents(
         long pos, IMetaStoreClient.NotificationFilter filter) throws IOException;
   }
 
@@ -100,7 +177,7 @@ public class EventUtils {
     public NotificationEventIterator(
         NotificationFetcher nfetcher, long eventFrom, int maxEvents,
         String dbName, String tableName) throws IOException {
-      init(nfetcher, eventFrom, maxEvents, new DatabaseAndTableFilter(dbName, tableName));
+      init(nfetcher, eventFrom, maxEvents, EventUtils.getDbTblNotificationFilter(dbName, tableName));
       // using init(..) instead of this(..) because the EventUtils.getDbTblNotificationFilter
       // is an operation that needs to run before delegating to the other ctor, and this messes up chaining
       // ctors

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/InsertMessage.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/InsertMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/InsertMessage.java
index 6d146e0..3d16721 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/InsertMessage.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/InsertMessage.java
@@ -37,12 +37,6 @@ public abstract class InsertMessage extends EventMessage {
   public abstract String getTable();
 
   /**
-   * Getter for the replace flag being insert into/overwrite
-   * @return Replace flag to represent INSERT INTO or INSERT OVERWRITE (Boolean).
-   */
-  public abstract boolean isReplace();
-
-  /**
    * Get the map of partition keyvalues.  Will be null if this insert is to a table and not a
    * partition.
    * @return Map of partition keyvalues, or null.

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
index 1bd52a8..aa770f2 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
@@ -149,10 +149,9 @@ public abstract class MessageFactory {
    * and some are not yet supported.
    * @param before The table before the alter
    * @param after The table after the alter
-   * @param isTruncateOp Flag to denote truncate table
    * @return
    */
-  public abstract AlterTableMessage buildAlterTableMessage(Table before, Table after, boolean isTruncateOp);
+  public abstract AlterTableMessage buildAlterTableMessage(Table before, Table after);
 
   /**
    * Factory method for DropTableMessage.
@@ -176,11 +175,10 @@ public abstract class MessageFactory {
    * @param table The table in which the partition is being altered
    * @param before The partition before it was altered
    * @param after The partition after it was altered
-   * @param isTruncateOp Flag to denote truncate partition
    * @return a new AlterPartitionMessage
    */
   public abstract AlterPartitionMessage buildAlterPartitionMessage(Table table, Partition before,
-                                                                   Partition after, boolean isTruncateOp);
+                                                                   Partition after);
 
   /**
    * Factory method for DropPartitionMessage.
@@ -233,10 +231,9 @@ public abstract class MessageFactory {
    * @param table Name of the table the insert occurred in
    * @param partVals Partition values for the partition that the insert occurred in, may be null if
    *          the insert was done into a non-partitioned table
-   * @param replace Flag to represent if INSERT OVERWRITE or INSERT INTO
    * @param files Iterator of file created
    * @return instance of InsertMessage
    */
   public abstract InsertMessage buildInsertMessage(String db, String table,
-      Map<String, String> partVals, boolean replace, Iterator<String> files);
+      Map<String, String> partVals, Iterator<String> files);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/PartitionFiles.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/PartitionFiles.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/PartitionFiles.java
index 4fd7f8c..b10b8a8 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/PartitionFiles.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/PartitionFiles.java
@@ -22,13 +22,10 @@ import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.collect.Lists;
-import org.codehaus.jackson.annotate.JsonProperty;
 
 public class PartitionFiles {
 
-  @JsonProperty
   private String partitionName;
-  @JsonProperty
   private List<String> files;
 
   public PartitionFiles(String partitionName, Iterator<String> files) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java
deleted file mode 100644
index d6429f6..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.messaging.event.filters;
-
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-
-public class AndFilter implements IMetaStoreClient.NotificationFilter {
-  final IMetaStoreClient.NotificationFilter[] filters;
-
-  public AndFilter(final IMetaStoreClient.NotificationFilter... filters) {
-    this.filters = filters;
-  }
-
-  @Override
-  public boolean accept(final NotificationEvent event) {
-    for (IMetaStoreClient.NotificationFilter filter : filters) {
-      if (!filter.accept(event)) {
-        return false;
-      }
-    }
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java
deleted file mode 100644
index 5294063..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.messaging.event.filters;
-
-import org.apache.hadoop.hive.metastore.IMetaStoreClient.NotificationFilter;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-
-public abstract class BasicFilter implements NotificationFilter {
-  @Override
-  public boolean accept(final NotificationEvent event) {
-    if (event == null) {
-      return false; // get rid of trivial case first, so that we can safely assume non-null
-    }
-    return shouldAccept(event);
-  }
-
-  abstract boolean shouldAccept(final NotificationEvent event);
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
deleted file mode 100644
index 4a7ca6d..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.messaging.event.filters;
-
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-
-/**
- * Utility function that constructs a notification filter to match a given db name and/or table name.
- * If dbName == null, fetches all warehouse events.
- * If dnName != null, but tableName == null, fetches all events for the db
- * If dbName != null && tableName != null, fetches all events for the specified table
- */
-public class DatabaseAndTableFilter extends BasicFilter {
-  private final String databaseName, tableName;
-
-  public DatabaseAndTableFilter(final String databaseName, final String tableName) {
-    this.databaseName = databaseName;
-    this.tableName = tableName;
-  }
-
-  @Override
-  boolean shouldAccept(final NotificationEvent event) {
-    if (databaseName == null) {
-      return true; // if our dbName is null, we're interested in all wh events
-    }
-    if (databaseName.equalsIgnoreCase(event.getDbName())) {
-      if ((tableName == null)
-          // if our dbName is equal, but tableName is blank, we're interested in this db-level event
-          || (tableName.equalsIgnoreCase(event.getTableName()))
-        // table level event that matches us
-          ) {
-        return true;
-      }
-    }
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java
deleted file mode 100644
index 137b4ce..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.messaging.event.filters;
-
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-
-public class EventBoundaryFilter extends BasicFilter {
-  private final long eventFrom, eventTo;
-
-  public EventBoundaryFilter(final long eventFrom, final long eventTo) {
-    this.eventFrom = eventFrom;
-    this.eventTo = eventTo;
-  }
-
-  @Override
-  boolean shouldAccept(final NotificationEvent event) {
-    return eventFrom <= event.getEventId() && event.getEventId() <= eventTo;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java
deleted file mode 100644
index 4e91ee6..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.messaging.event.filters;
-
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-
-public class MessageFormatFilter extends BasicFilter {
-  private final String format;
-
-  public MessageFormatFilter(String format) {
-    this.format = format;
-  }
-
-  @Override
-  boolean shouldAccept(final NotificationEvent event) {
-    if (format == null) {
-      return true; // let's say that passing null in will not do any filtering.
-    }
-    return format.equalsIgnoreCase(event.getMessageFormat());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java
index bd7776c..dd1bf3c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java
@@ -37,9 +37,6 @@ public class JSONAlterPartitionMessage extends AlterPartitionMessage {
   String server, servicePrincipal, db, table, tableObjJson;
 
   @JsonProperty
-  String isTruncateOp;
-
-  @JsonProperty
   Long timestamp;
 
   @JsonProperty
@@ -55,12 +52,11 @@ public class JSONAlterPartitionMessage extends AlterPartitionMessage {
   }
 
   public JSONAlterPartitionMessage(String server, String servicePrincipal, Table tableObj,
-      Partition partitionObjBefore, Partition partitionObjAfter, boolean isTruncateOp, Long timestamp) {
+      Partition partitionObjBefore, Partition partitionObjAfter, Long timestamp) {
     this.server = server;
     this.servicePrincipal = servicePrincipal;
     this.db = tableObj.getDbName();
     this.table = tableObj.getTableName();
-    this.isTruncateOp = Boolean.toString(isTruncateOp);
     this.timestamp = timestamp;
     this.keyValues = JSONMessageFactory.getPartitionKeyValues(tableObj, partitionObjBefore);
     try {
@@ -99,9 +95,6 @@ public class JSONAlterPartitionMessage extends AlterPartitionMessage {
   }
 
   @Override
-  public boolean getIsTruncateOp() { return Boolean.parseBoolean(isTruncateOp); }
-
-  @Override
   public Map<String, String> getKeyValues() {
     return keyValues;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java
index 58eb1a7..792015e 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java
@@ -32,9 +32,6 @@ public class JSONAlterTableMessage extends AlterTableMessage {
   String server, servicePrincipal, db, table, tableObjBeforeJson, tableObjAfterJson;
 
   @JsonProperty
-  String isTruncateOp;
-
-  @JsonProperty
   Long timestamp;
 
   /**
@@ -44,12 +41,11 @@ public class JSONAlterTableMessage extends AlterTableMessage {
   }
 
   public JSONAlterTableMessage(String server, String servicePrincipal, Table tableObjBefore, Table tableObjAfter,
-      boolean isTruncateOp, Long timestamp) {
+      Long timestamp) {
     this.server = server;
     this.servicePrincipal = servicePrincipal;
     this.db = tableObjBefore.getDbName();
     this.table = tableObjBefore.getTableName();
-    this.isTruncateOp = Boolean.toString(isTruncateOp);
     this.timestamp = timestamp;
     try {
       this.tableObjBeforeJson = JSONMessageFactory.createTableObjJson(tableObjBefore);
@@ -86,9 +82,6 @@ public class JSONAlterTableMessage extends AlterTableMessage {
   }
 
   @Override
-  public boolean getIsTruncateOp() { return Boolean.parseBoolean(isTruncateOp); }
-
-  @Override
   public Table getTableObjBefore() throws Exception {
     return (Table) JSONMessageFactory.getTObj(tableObjBeforeJson,Table.class);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java
index c059d47..e1316a4 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java
@@ -40,9 +40,6 @@ public class JSONInsertMessage extends InsertMessage {
   Long timestamp;
 
   @JsonProperty
-  String replace;
-
-  @JsonProperty
   List<String> files;
 
   @JsonProperty
@@ -55,13 +52,12 @@ public class JSONInsertMessage extends InsertMessage {
   }
 
   public JSONInsertMessage(String server, String servicePrincipal, String db, String table,
-      Map<String, String> partKeyVals, boolean replace, Iterator<String> fileIter, Long timestamp) {
+      Map<String, String> partKeyVals, Iterator<String> fileIter, Long timestamp) {
     this.server = server;
     this.servicePrincipal = servicePrincipal;
     this.db = db;
     this.table = table;
     this.timestamp = timestamp;
-    this.replace = Boolean.toString(replace);
     this.partKeyVals = partKeyVals;
     this.files = Lists.newArrayList(fileIter);
     checkValid();
@@ -103,9 +99,6 @@ public class JSONInsertMessage extends InsertMessage {
   }
 
   @Override
-  public boolean isReplace() { return Boolean.parseBoolean(replace); }
-
-  @Override
   public String toString() {
     try {
       return JSONMessageDeserializer.mapper.writeValueAsString(this);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java
index 40ef5fb..41732c7 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hive.metastore.messaging.InsertMessage;
 import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer;
 import org.codehaus.jackson.map.DeserializationConfig;
 import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.SerializationConfig;
 
 /**
  * MessageDeserializer implementation, for deserializing from JSON strings.
@@ -47,9 +46,6 @@ public class JSONMessageDeserializer extends MessageDeserializer {
 
   static {
     mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false);
-    mapper.configure(SerializationConfig.Feature.AUTO_DETECT_GETTERS, false);
-    mapper.configure(SerializationConfig.Feature.AUTO_DETECT_IS_GETTERS, false);
-    mapper.configure(SerializationConfig.Feature.AUTO_DETECT_FIELDS, false);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java
index 04a4041..3406afb 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java
@@ -28,10 +28,6 @@ import javax.annotation.Nullable;
 
 import com.google.common.collect.Iterables;
 
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.Index;
@@ -108,8 +104,8 @@ public class JSONMessageFactory extends MessageFactory {
   }
 
   @Override
-  public AlterTableMessage buildAlterTableMessage(Table before, Table after, boolean isTruncateOp) {
-    return new JSONAlterTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, before, after, isTruncateOp, now());
+  public AlterTableMessage buildAlterTableMessage(Table before, Table after) {
+    return new JSONAlterTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, before, after, now());
   }
 
   @Override
@@ -127,8 +123,8 @@ public class JSONMessageFactory extends MessageFactory {
 
   @Override
   public AlterPartitionMessage buildAlterPartitionMessage(Table table, Partition before,
-      Partition after, boolean isTruncateOp) {
-    return new JSONAlterPartitionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, before, after, isTruncateOp,
+      Partition after) {
+    return new JSONAlterPartitionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, before, after,
         now());
   }
 
@@ -165,9 +161,10 @@ public class JSONMessageFactory extends MessageFactory {
   }
 
   @Override
-  public InsertMessage buildInsertMessage(String db, String table, Map<String, String> partKeyVals, boolean replace,
+  public InsertMessage buildInsertMessage(String db, String table, Map<String, String> partKeyVals,
       Iterator<String> fileIter) {
-    return new JSONInsertMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, db, table, partKeyVals, replace, fileIter, now());
+    return new JSONInsertMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, db, table, partKeyVals,
+        fileIter, now());
   }
 
   private long now() {
@@ -301,4 +298,5 @@ public class JSONMessageFactory extends MessageFactory {
     };
     return getTObjs(Iterables.transform(jsonArrayIterator, textExtractor), objClass);
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
index 10fcbea..63be7b7 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
@@ -284,7 +284,7 @@ public class ExpressionTree {
     //can only support "=" and "!=" for now, because our JDO lib is buggy when
     // using objects from map.get()
     private static final Set<Operator> TABLE_FILTER_OPS = Sets.newHashSet(
-        Operator.EQUALS, Operator.NOTEQUALS, Operator.NOTEQUALS2, Operator.LIKE);
+        Operator.EQUALS, Operator.NOTEQUALS, Operator.NOTEQUALS2);
 
     private void generateJDOFilterOverTables(Map<String, Object> params,
         FilterBuilder filterBuilder) throws MetaException {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 970038d..d378d06 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -52,7 +52,6 @@ import javax.sql.DataSource;
 
 import java.io.IOException;
 import java.io.PrintWriter;
-import java.nio.ByteBuffer;
 import java.sql.*;
 import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
@@ -148,7 +147,6 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
   static final private Logger LOG = LoggerFactory.getLogger(TxnHandler.class.getName());
 
   static private DataSource connPool;
-  private static DataSource connPoolMutex;
   static private boolean doRetryOnConnPool = false;
   
   private enum OpertaionType {
@@ -205,8 +203,8 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
   private int deadlockCnt;
   private long deadlockRetryInterval;
   protected HiveConf conf;
-  private static DatabaseProduct dbProduct;
-  private static SQLGenerator sqlGenerator;
+  protected DatabaseProduct dbProduct;
+  private SQLGenerator sqlGenerator;
 
   // (End user) Transaction timeout, in milliseconds.
   private long timeout;
@@ -225,6 +223,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
    */
   private final static ConcurrentHashMap<String, Semaphore> derbyKey2Lock = new ConcurrentHashMap<>();
   private static final String hostname = ServerUtils.hostname();
+  private static volatile boolean dumpConfig = true;
 
   // Private methods should never catch SQLException and then throw MetaException.  The public
   // methods depend on SQLException coming back so they can detect and handle deadlocks.  Private
@@ -248,36 +247,20 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
 
     checkQFileTestHack();
 
-    synchronized (TxnHandler.class) {
-      if (connPool == null) {
-        //only do this once per JVM; useful for support
-        LOG.info(HiveConfUtil.dumpConfig(conf).toString());
-
-        Connection dbConn = null;
-        // Set up the JDBC connection pool
-        try {
-          int maxPoolSize = conf.getIntVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS);
-          long getConnectionTimeoutMs = 30000;
-          connPool = setupJdbcConnectionPool(conf, maxPoolSize, getConnectionTimeoutMs);
-          /*the mutex pools should ideally be somewhat larger since some operations require 1
-           connection from each pool and we want to avoid taking a connection from primary pool
-           and then blocking because mutex pool is empty.  There is only 1 thread in any HMS trying
-           to mutex on each MUTEX_KEY except MUTEX_KEY.CheckLock.  The CheckLock operation gets a
-           connection from connPool first, then connPoolMutex.  All others, go in the opposite
-           order (not very elegant...).  So number of connection requests for connPoolMutex cannot
-           exceed (size of connPool + MUTEX_KEY.values().length - 1).*/
-          connPoolMutex = setupJdbcConnectionPool(conf, maxPoolSize + MUTEX_KEY.values().length, getConnectionTimeoutMs);
-          dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
-          determineDatabaseProduct(dbConn);
-          sqlGenerator = new SQLGenerator(dbProduct, conf);
-        } catch (SQLException e) {
-          String msg = "Unable to instantiate JDBC connection pooling, " + e.getMessage();
-          LOG.error(msg);
-          throw new RuntimeException(e);
-        } finally {
-          closeDbConn(dbConn);
-        }
-      }
+    Connection dbConn = null;
+    // Set up the JDBC connection pool
+    try {
+      setupJdbcConnectionPool(conf);
+      dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+      determineDatabaseProduct(dbConn);
+      sqlGenerator = new SQLGenerator(dbProduct, conf);
+    } catch (SQLException e) {
+      String msg = "Unable to instantiate JDBC connection pooling, " + e.getMessage();
+      LOG.error(msg);
+      throw new RuntimeException(e);
+    }
+    finally {
+      closeDbConn(dbConn);
     }
 
     timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS);
@@ -287,6 +270,11 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
     retryLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HMSHANDLERATTEMPTS);
     deadlockRetryInterval = retryInterval / 10;
     maxOpenTxns = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_MAX_OPEN_TXNS);
+    if(dumpConfig) {
+      LOG.info(HiveConfUtil.dumpConfig(conf).toString());
+      //only do this once per JVM; useful for support
+      dumpConfig = false;
+    }
   }
   @Override
   @RetrySemantics.ReadOnly
@@ -379,7 +367,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
       try {
         /**
          * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()}
-         */
+\         */
         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
         stmt = dbConn.createStatement();
         String s = "select ntxn_next - 1 from NEXT_TXN_ID";
@@ -395,27 +383,23 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
             "initialized, null record found in next_txn_id");
         }
         close(rs);
-        List<Long> openList = new ArrayList<Long>();
+        Set<Long> openList = new HashSet<Long>();
         //need the WHERE clause below to ensure consistent results with READ_COMMITTED
-        s = "select txn_id, txn_state from TXNS where txn_id <= " + hwm + " order by txn_id";
+        s = "select txn_id, txn_state from TXNS where txn_id <= " + hwm;
         LOG.debug("Going to execute query<" + s + ">");
         rs = stmt.executeQuery(s);
         long minOpenTxn = Long.MAX_VALUE;
-        BitSet abortedBits = new BitSet();
         while (rs.next()) {
           long txnId = rs.getLong(1);
           openList.add(txnId);
           char c = rs.getString(2).charAt(0);
           if(c == TXN_OPEN) {
             minOpenTxn = Math.min(minOpenTxn, txnId);
-          } else if (c == TXN_ABORTED) {
-            abortedBits.set(openList.size() - 1);
           }
         }
         LOG.debug("Going to rollback");
         dbConn.rollback();
-        ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray());
-        GetOpenTxnsResponse otr = new GetOpenTxnsResponse(hwm, openList, byteBuffer);
+        GetOpenTxnsResponse otr = new GetOpenTxnsResponse(hwm, openList);
         if(minOpenTxn < Long.MAX_VALUE) {
           otr.setMin_open_txn(minOpenTxn);
         }
@@ -860,7 +844,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
   /**
    * As much as possible (i.e. in absence of retries) we want both operations to be done on the same
    * connection (but separate transactions).  This avoid some flakiness in BONECP where if you
-   * perform an operation on 1 connection and immediately get another from the pool, the 2nd one
+   * perform an operation on 1 connection and immediately get another fron the pool, the 2nd one
    * doesn't see results of the first.
    * 
    * Retry-by-caller note: If the call to lock is from a transaction, then in the worst case
@@ -999,13 +983,6 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
                 case SELECT:
                   updateTxnComponents = false;
                   break;
-                case NO_TXN:
-                  /*this constant is a bit of a misnomer since we now always have a txn context.  It
-                   just means the operation is such that we don't care what tables/partitions it
-                   affected as it doesn't trigger a compaction or conflict detection.  A better name
-                   would be NON_TRANSACTIONAL.*/
-                  updateTxnComponents = false;
-                  break;
                 default:
                   //since we have an open transaction, only 4 values above are expected 
                   throw new IllegalStateException("Unexpected DataOperationType: " + lc.getOperationType()
@@ -1957,10 +1934,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
 
   }
 
-  Connection getDbConn(int isolationLevel) throws SQLException {
-    return getDbConn(isolationLevel, connPool);
-  }
-  private Connection getDbConn(int isolationLevel, DataSource connPool) throws SQLException {
+  protected Connection getDbConn(int isolationLevel) throws SQLException {
     int rc = doRetryOnConnPool ? 10 : 1;
     Connection dbConn = null;
     while (true) {
@@ -2483,14 +2457,14 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
       response.setLockid(extLockId);
 
       LOG.debug("checkLock(): Setting savepoint. extLockId=" + JavaUtils.lockIdToString(extLockId));
-      Savepoint save = dbConn.setSavepoint();
+      Savepoint save = dbConn.setSavepoint();//todo: get rid of this
       StringBuilder query = new StringBuilder("select hl_lock_ext_id, " +
         "hl_lock_int_id, hl_db, hl_table, hl_partition, hl_lock_state, " +
         "hl_lock_type, hl_txnid from HIVE_LOCKS where hl_db in (");
 
       Set<String> strings = new HashSet<String>(locksBeingChecked.size());
 
-      //This the set of entities that the statement represented by extLockId wants to update
+      //This the set of entities that the statement represnted by extLockId wants to update
       List<LockInfo> writeSet = new ArrayList<>();
 
       for (LockInfo info : locksBeingChecked) {
@@ -3157,7 +3131,9 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
     }
   }
 
-  private static synchronized DataSource setupJdbcConnectionPool(HiveConf conf, int maxPoolSize, long getConnectionTimeoutMs) throws SQLException {
+  private static synchronized void setupJdbcConnectionPool(HiveConf conf) throws SQLException {
+    if (connPool != null) return;
+
     String driverUrl = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORECONNECTURLKEY);
     String user = getMetastoreJdbcUser(conf);
     String passwd = getMetastoreJdbcPasswd(conf);
@@ -3167,40 +3143,33 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
     if ("bonecp".equals(connectionPooler)) {
       BoneCPConfig config = new BoneCPConfig();
       config.setJdbcUrl(driverUrl);
-      //if we are waiting for connection for a long time, something is really wrong
+      //if we are waiting for connection for 60s, something is really wrong
       //better raise an error than hang forever
-      //see DefaultConnectionStrategy.getConnectionInternal()
-      config.setConnectionTimeoutInMs(getConnectionTimeoutMs);
-      config.setMaxConnectionsPerPartition(maxPoolSize);
+      config.setConnectionTimeoutInMs(60000);
+      config.setMaxConnectionsPerPartition(10);
       config.setPartitionCount(1);
       config.setUser(user);
       config.setPassword(passwd);
+      connPool = new BoneCPDataSource(config);
       doRetryOnConnPool = true;  // Enable retries to work around BONECP bug.
-      return new BoneCPDataSource(config);
     } else if ("dbcp".equals(connectionPooler)) {
-      GenericObjectPool objectPool = new GenericObjectPool();
-      //https://commons.apache.org/proper/commons-pool/api-1.6/org/apache/commons/pool/impl/GenericObjectPool.html#setMaxActive(int)
-      objectPool.setMaxActive(maxPoolSize);
-      objectPool.setMaxWait(getConnectionTimeoutMs);
+      ObjectPool objectPool = new GenericObjectPool();
       ConnectionFactory connFactory = new DriverManagerConnectionFactory(driverUrl, user, passwd);
       // This doesn't get used, but it's still necessary, see
       // http://svn.apache.org/viewvc/commons/proper/dbcp/branches/DBCP_1_4_x_BRANCH/doc/ManualPoolingDataSourceExample.java?view=markup
       PoolableConnectionFactory poolConnFactory =
           new PoolableConnectionFactory(connFactory, objectPool, null, null, false, true);
-      return new PoolingDataSource(objectPool);
+      connPool = new PoolingDataSource(objectPool);
     } else if ("hikaricp".equals(connectionPooler)) {
       HikariConfig config = new HikariConfig();
-      config.setMaximumPoolSize(maxPoolSize);
       config.setJdbcUrl(driverUrl);
       config.setUsername(user);
       config.setPassword(passwd);
-      //https://github.com/brettwooldridge/HikariCP
-      config.setConnectionTimeout(getConnectionTimeoutMs);
 
-      return new HikariDataSource(config);
+      connPool = new HikariDataSource(config);
     } else if ("none".equals(connectionPooler)) {
       LOG.info("Choosing not to pool JDBC connections");
-      return new NoPoolConnectionPool(conf);
+      connPool = new NoPoolConnectionPool(conf);
     } else {
       throw new RuntimeException("Unknown JDBC connection pooling " + connectionPooler);
     }
@@ -3458,7 +3427,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
       try {
         String sqlStmt = sqlGenerator.addForUpdateClause("select MT_COMMENT from AUX_TABLE where MT_KEY1=" + quoteString(key) + " and MT_KEY2=0");
         lockInternal();
-        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED, connPoolMutex);
+        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
         stmt = dbConn.createStatement();
         if(LOG.isDebugEnabled()) {
           LOG.debug("About to execute SQL: " + sqlStmt);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index 6e0070b..517eec3 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@ -32,9 +32,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.util.Arrays;
-import java.util.BitSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 public class TxnUtils {
   private static final Logger LOG = LoggerFactory.getLogger(TxnUtils.class);
@@ -50,13 +50,8 @@ public class TxnUtils {
    * @return a valid txn list.
    */
   public static ValidTxnList createValidReadTxnList(GetOpenTxnsResponse txns, long currentTxn) {
-    /*todo: should highWater be min(currentTxn,txns.getTxn_high_water_mark()) assuming currentTxn>0
-     * otherwise if currentTxn=7 and 8 commits before 7, then 7 will see result of 8 which
-     * doesn't make sense for Snapshot Isolation.  Of course for Read Committed, the list should
-     * inlude the latest committed set.*/
     long highWater = txns.getTxn_high_water_mark();
-    List<Long> open = txns.getOpen_txns();
-    BitSet abortedBits = BitSet.valueOf(txns.getAbortedBits());
+    Set<Long> open = txns.getOpen_txns();
     long[] exceptions = new long[open.size() - (currentTxn > 0 ? 1 : 0)];
     int i = 0;
     for(long txn: open) {
@@ -64,10 +59,10 @@ public class TxnUtils {
       exceptions[i++] = txn;
     }
     if(txns.isSetMin_open_txn()) {
-      return new ValidReadTxnList(exceptions, abortedBits, highWater, txns.getMin_open_txn());
+      return new ValidReadTxnList(exceptions, highWater, txns.getMin_open_txn());
     }
     else {
-      return new ValidReadTxnList(exceptions, abortedBits, highWater);
+      return new ValidReadTxnList(exceptions, highWater);
     }
   }
 
@@ -98,9 +93,7 @@ public class TxnUtils {
       exceptions = Arrays.copyOf(exceptions, i);
     }
     highWater = minOpenTxn == Long.MAX_VALUE ? highWater : minOpenTxn - 1;
-    BitSet bitSet = new BitSet(exceptions.length);
-    bitSet.set(0, bitSet.length()); // for ValidCompactorTxnList, everything in exceptions are aborted
-    return new ValidCompactorTxnList(exceptions, bitSet, highWater);
+    return new ValidCompactorTxnList(exceptions, highWater);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/model/package.jdo
----------------------------------------------------------------------
diff --git a/metastore/src/model/package.jdo b/metastore/src/model/package.jdo
index 67e2c20..844bc46 100644
--- a/metastore/src/model/package.jdo
+++ b/metastore/src/model/package.jdo
@@ -63,10 +63,10 @@
 
     <class name="MFieldSchema" embedded-only="true" table="TYPE_FIELDS" detachable="true">
       <field name="name">
-        <column name="FNAME" length="767" jdbc-type="VARCHAR"/>
+        <column name="FNAME" length="128" jdbc-type="VARCHAR"/>
       </field>
       <field name="type" >
-        <column name="FTYPE" jdbc-type="CLOB" allows-null="false"/>
+        <column name="FTYPE" length="4000" jdbc-type="VARCHAR" allows-null="false"/>
       </field>
       <field name="comment" >
         <column name="FCOMMENT" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
@@ -118,7 +118,7 @@
         <column name="DB_ID"/>
       </index>
       <field name="tableName">
-        <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+        <column name="TBL_NAME" length="128" jdbc-type="VARCHAR"/>
       </field>
       <field name="database">
         <column name="DB_ID"/>
@@ -170,7 +170,7 @@
            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
         </key>
         <value>
-           <column name="PARAM_VALUE" jdbc-type="CLOB"/>
+           <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
         </value>
       </field>
       <field name="viewOriginalText" default-fetch-group="false">
@@ -251,14 +251,14 @@
            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
         </key>
         <value>
-           <column name="PARAM_VALUE" jdbc-type="CLOB"/>
+           <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
         </value>
       </field>
     </class>
 
     <class name="MOrder" embedded-only="true" table="SORT_ORDER" detachable="true">
       <field name="col">
-        <column name="COL_NAME" length="767" jdbc-type="VARCHAR"/>
+        <column name="COL_NAME" length="128" jdbc-type="VARCHAR"/>
       </field>
       <field name="order">
         <column name="ORDER" jdbc-type="INTEGER"  allows-null="false"/>
@@ -280,10 +280,10 @@
         <element>
           <embedded>
             <field name="name">
-              <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+              <column name="COLUMN_NAME" length="128" jdbc-type="VARCHAR"/>
               </field>
             <field name="type">
-              <column name="TYPE_NAME" jdbc-type="CLOB"  allows-null="false"/>
+              <column name="TYPE_NAME" length="4000" jdbc-type="VARCHAR"  allows-null="false"/>
             </field>
             <field name="comment">
               <column name="COMMENT" length="256" jdbc-type="VARCHAR" allows-null="true"/>
@@ -349,7 +349,7 @@
         <element>
           <embedded>
             <field name="col">
-              <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+              <column name="COLUMN_NAME" length="128" jdbc-type="VARCHAR"/>
               </field>
             <field name="order">
               <column name="ORDER" jdbc-type="INTEGER"  allows-null="false"/>
@@ -366,7 +366,7 @@
            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
         </key>
         <value>
-           <column name="PARAM_VALUE" jdbc-type="CLOB"/>
+           <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
         </value>
       </field>
       <field name="skewedColNames" table="SKEWED_COL_NAMES">
@@ -725,7 +725,7 @@
         <column name="TBL_ID" />
       </field>
       <field name="columnName">
-        <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+        <column name="COLUMN_NAME" length="128" jdbc-type="VARCHAR"/>
       </field>
       <field name="privilege">
         <column name="TBL_COL_PRIV" length="128" jdbc-type="VARCHAR"/>
@@ -770,7 +770,7 @@
         <column name="PART_ID" />
       </field>
       <field name="columnName">
-        <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+        <column name="COLUMN_NAME" length="128" jdbc-type="VARCHAR"/>
       </field>
       <field name="privilege">
         <column name="PART_COL_PRIV" length="128" jdbc-type="VARCHAR"/>
@@ -803,7 +803,7 @@
         <column name="DB_NAME" length="128" jdbc-type="VARCHAR"/>
       </field>
       <field name="tblName">
-        <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+        <column name="TBL_NAME" length="128" jdbc-type="VARCHAR"/>
       </field>
        <field name="partName">
         <column name="PARTITION_NAME" length="767" jdbc-type="VARCHAR"/>
@@ -850,13 +850,13 @@
         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
       </field>
       <field name="tableName">
-        <column name="TABLE_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+        <column name="TABLE_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
       </field>
       <field name="table">
         <column name="TBL_ID"/>
       </field>
       <field name="colName">
-        <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+        <column name="COLUMN_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
       </field>
       <field name="colType">
         <column name="COLUMN_TYPE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
@@ -911,7 +911,7 @@
         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
       </field>
       <field name="tableName">
-        <column name="TABLE_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+        <column name="TABLE_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
       </field>
       <field name="partitionName">
         <column name="PARTITION_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
@@ -920,7 +920,7 @@
         <column name="PART_ID"/>
       </field>
       <field name="colName">
-        <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+        <column name="COLUMN_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
       </field>
       <field name="colType">
         <column name="COLUMN_TYPE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
@@ -1050,7 +1050,7 @@
         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="true"/>
       </field>
       <field name="tableName">
-        <column name="TBL_NAME" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+        <column name="TBL_NAME" length="128" jdbc-type="VARCHAR" allows-null="true"/>
       </field>
       <field name="message">
         <column name="MESSAGE" jdbc-type="LONGVARCHAR"/>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 7760bc7..64da9b4 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
@@ -874,13 +873,6 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   }
 
   @Override
-  public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
-      String tableName) throws MetaException, NoSuchObjectException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
   public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
   }
 


[43/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/xdocs/language_manual/data-manipulation-statements.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/data-manipulation-statements.xml b/docs/xdocs/language_manual/data-manipulation-statements.xml
new file mode 100644
index 0000000..214a0dc
--- /dev/null
+++ b/docs/xdocs/language_manual/data-manipulation-statements.xml
@@ -0,0 +1,234 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.    
+-->
+
+<document>
+
+  <properties>
+    <title>Hadoop Hive- Data Manipulation Statements</title>
+    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
+  </properties>
+
+  <body>
+
+    <section name="Create Table Syntax" href="create_table_syntax">
+
+    <source><![CDATA[
+CREATE [EXTERNAL] TABLE [IF NOT EXISTS] table_name
+  [(col_name data_type [COMMENT col_comment], ...)]
+  [COMMENT table_comment]
+  [PARTITIONED BY (col_name data_type [COMMENT col_comment], ...)]
+  [CLUSTERED BY (col_name, col_name, ...) [SORTED BY (col_name [ASC|DESC], ...)] INTO num_buckets BUCKETS]
+  [ROW FORMAT row_format]
+  [STORED AS file_format]
+  [LOCATION hdfs_path]
+  [TBLPROPERTIES (property_name=property_value, ...)]  
+  [AS select_statement]  
+
+CREATE [EXTERNAL] TABLE [IF NOT EXISTS] table_name
+  LIKE existing_table_name
+  [LOCATION hdfs_path]
+
+data_type
+  : primitive_type
+  | array_type
+  | map_type
+  | struct_type
+
+primitive_type
+  : TINYINT
+  | SMALLINT
+  | INT
+  | BIGINT
+  | BOOLEAN
+  | FLOAT
+  | DOUBLE
+  | STRING
+
+array_type
+  : ARRAY < data_type >
+
+map_type
+  : MAP < primitive_type, data_type >
+
+struct_type
+  : STRUCT < col_name : data_type [COMMENT col_comment], ...>
+
+row_format
+  : DELIMITED [FIELDS TERMINATED BY char] [COLLECTION ITEMS TERMINATED BY char]
+        [MAP KEYS TERMINATED BY char] [LINES TERMINATED BY char]
+  | SERDE serde_name [WITH SERDEPROPERTIES (property_name=property_value, property_name=property_value, ...)]
+
+file_format:
+  : SEQUENCEFILE
+  | TEXTFILE
+  | INPUTFORMAT input_format_classname OUTPUTFORMAT output_format_classname
+]]></source>
+ 
+<p>
+CREATE TABLE creates a table with the given name. An error is thrown if a table or view with the same name already exists. You can use IF NOT EXISTS to skip the error.
+</p>
+
+<p>
+The EXTERNAL keyword lets you create a table and provide a LOCATION so that Hive does not use a default location for this table. This comes in handy if you already have data generated. When dropping an EXTERNAL table, data in the table is NOT deleted from the file system.
+</p>
+The LIKE form of CREATE TABLE allows you to copy an existing table definition exactly (without copying its data).
+
+<p>
+You can create tables with custom SerDe or using native SerDe. A native SerDe is used if ROW FORMAT is not specified or ROW FORMAT DELIMITED is specified. You can use the DELIMITED clause to read delimited files. Use the SERDE clause to create a table with custom SerDe. Refer to SerDe section of the User Guide for more information on SerDe.
+</p>
+
+<p>
+You must specify a list of a columns for tables that use a native SerDe. Refer to the Types part of the User Guide for the allowable column types. A list of columns for tables that use a custom SerDe may be specified but Hive will query the SerDe to determine the actual list of columns for this table.
+</p>
+
+<p>
+Use STORED AS TEXTFILE if the data needs to be stored as plain text files. Use STORED AS SEQUENCEFILE if the data needs to be compressed. Please read more about Hive/CompressedStorage if you are planning to keep data compressed in your Hive tables. Use INPUTFORMAT and OUTPUTFORMAT to specify the name of a corresponding InputFormat and OutputFormat class as a string literal, e.g. 'org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat'.
+</p>
+
+<p>
+Partitioned tables can be created using the PARTITIONED BY clause. A table can have one or more partition columns and a separate data directory is created for each distinct value combination in the partition columns. Further, tables or partitions can be bucketed using CLUSTERED BY columns, and data can be sorted within that bucket via SORT BY columns. This can improve performance on certain kinds of queries.
+</p>
+
+<p>
+Table names and column names are case insensitive but SerDe and property names are case sensitive. Table and column comments are string literals (single-quoted). The TBLPROPERTIES clause allows you to tag the table definition with your own metadata key/value pairs.
+</p>
+
+<p>A create table example:</p>
+  <source><![CDATA[CREATE TABLE page_view(viewTime INT, userid BIGINT,
+     page_url STRING, referrer_url STRING,
+     ip STRING COMMENT 'IP Address of the User')
+ COMMENT 'This is the page view table'
+ PARTITIONED BY(dt STRING, country STRING)
+ STORED AS SEQUENCEFILE;]]></source>  
+
+ <p>The statement above creates the page_view table with viewTime, userid, page_url, referrer_url, and ip columns (including comments). The table is also partitioned and data is stored in sequence files. The data format in the files is assumed to be field-delimited by ctrl-A and row-delimited by newline.
+  </p>
+
+</section>
+
+<section name="Create Table as Select (CTAS)" href="ctas?">
+
+  <p>
+  Tables can also be created and populated by the results of a query in one create-table-as-select (CTAS) statement. The table created by CTAS is atomic, meaning that the table is not seen by other users until all the query results are populated. So other users will either see the table with the complete results of the query or will not see the table at all.
+  </p>
+
+  <p>
+  There are two parts in CTAS, the SELECT part can be any SELECT statement supported by HiveQL. The CREATE part of the CTAS takes the resulting schema from the SELECT part and creates the target table with other table properties such as the SerDe and storage format. The only restrictions in CTAS is that the target table cannot be a partitioned table (nor can it be an external table).
+  </p> 
+
+  <source><![CDATA[CREATE TABLE page_view(viewTime INT, userid BIGINT,
+     page_url STRING, referrer_url STRING,
+     ip STRING COMMENT 'IP Address of the User')
+ COMMENT 'This is the page view table'
+ PARTITIONED BY(dt STRING, country STRING)
+ STORED AS SEQUENCEFILE;
+]]></source>
+
+</section>
+
+<section name="Using SerDes" href="SerDes">
+
+<p>
+This example CTAS statement creates the target table new_key_value_store with the 
+schema (new_key DOUBLE, key_value_pair STRING) derived from the results of the 
+SELECT statement. If the SELECT statement does not specify column aliases, the 
+column names will be automatically assigned to _col0, _col1, and _col2 etc. 
+In addition, the new target table is created using a specific SerDe and a storage 
+format independent of the source tables in the SELECT statement. 
+</p>
+
+<source><![CDATA[CREATE TABLE new_key_value_store
+   ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"
+   STORED AS RCFile AS
+SELECT (key % 1024) new_key, concat(key, value) key_value_pair
+FROM key_value_store
+SORT BY new_key, key_value_pair;
+]]></source>
+
+<p>
+<b>Being able to select data from one table to another is one of the most
+powerful features of Hive. Hive handles the conversion of the data from the source
+format to the destination format as the query is being executed!</b>
+</p>
+
+</section>
+
+<section name="Bucketed Sorted Table" href="bucketed_sorted_table">
+
+<source><![CDATA[CREATE TABLE page_view(viewTime INT, userid BIGINT,
+     page_url STRING, referrer_url STRING,
+     ip STRING COMMENT 'IP Address of the User')
+ COMMENT 'This is the page view table'
+ PARTITIONED BY(dt STRING, country STRING)
+ CLUSTERED BY(userid) SORTED BY(viewTime) INTO 32 BUCKETS
+ ROW FORMAT DELIMITED
+   FIELDS TERMINATED BY '\001'
+   COLLECTION ITEMS TERMINATED BY '\002'
+   MAP KEYS TERMINATED BY '\003'
+ STORED AS SEQUENCEFILE;
+]]></source>
+
+<p>In the example above, the page_view table is bucketed (clustered by) userid and within each bucket the data is sorted in increasing order of viewTime. Such an organization allows the user to do efficient sampling on the clustered column - in this case userid. The sorting property allows internal operators to take advantage of the better-known data structure while evaluating queries, also increasing efficiency. MAP KEYS and COLLECTION ITEMS keywords can be used if any of the columns are lists or maps.
+</p>
+
+<p>
+The CLUSTERED BY and SORTED BY creation commands do not affect how data is inserted into a table -- only how it is read. This means that users must be careful to insert data correctly by specifying the number of reducers to be equal to the number of buckets, and using CLUSTER BY and SORT BY commands in their query. See
+<a href="working_with_bucketed_tables.html">Working with Bucketed tables</a> to see how these
+are used. 
+</p>
+
+</section>
+
+<section name="External Tables" href="external_table?">
+
+<p>
+Unless a table is specified as EXTERNAL it will be stored inside a folder specified by the
+configuration property hive.metastore.warehouse.dir.
+EXTERNAL tables points to any hdfs location for its storage. You still have to make sure that the data is format is specified to match the data.
+ 
+</p>
+<source><![CDATA[CREATE EXTERNAL TABLE page_view(viewTime INT, userid BIGINT,
+     page_url STRING, referrer_url STRING,
+     ip STRING COMMENT 'IP Address of the User',
+     country STRING COMMENT 'country of origination')
+ COMMENT 'This is the staging page view table'
+ ROW FORMAT DELIMITED FIELDS TERMINATED BY '\054'
+ STORED AS TEXTFILE
+ LOCATION '<hdfs_location>';
+ ]]></source>
+
+</section>
+
+<section name="Create Table ... Like" href="create_table_like?">
+
+<p>The statement above creates a new empty_key_value_store table whose definition exactly matches the existing key_value_store in all particulars other than table name. The new table contains no rows.
+</p>
+
+<source><![CDATA[CREATE TABLE empty_key_value_store
+LIKE key_value_store;
+]]></source>
+
+</section>
+
+<section name="drop" href="drop">
+<p>Drop it like it is hot</p>
+</section>
+  </body>
+</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/xdocs/language_manual/joins.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/joins.xml b/docs/xdocs/language_manual/joins.xml
new file mode 100644
index 0000000..190ecd4
--- /dev/null
+++ b/docs/xdocs/language_manual/joins.xml
@@ -0,0 +1,212 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.    
+-->
+
+<document>
+
+  <properties>
+    <title>Hadoop Hive- Joins</title>
+    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
+  </properties>
+
+  <body>
+
+    <section name="Join Syntax" href="join_syntax">
+
+    <source><![CDATA[join_table:
+    table_reference [INNER] JOIN table_factor [join_condition]
+  | table_reference {LEFT|RIGHT|FULL} [OUTER] JOIN table_reference join_condition
+  | table_reference LEFT SEMI JOIN table_reference join_condition
+
+table_reference:
+    table_factor
+  | join_table
+
+table_factor:
+    tbl_name [alias]
+  | table_subquery alias
+  | ( table_references )
+
+join_condition:
+    ON equality_expression ( AND equality_expression )*
+
+equality_expression: 
+    expression = expression
+]]></source>
+
+<p>
+Only equality joins, outer joins, and left semi joins are supported in Hive. Hive does not support join conditions that are not equality conditions as it is very difficult to express such conditions as a map/reduce job. Also, more than two tables can be joined in Hive. 
+</p>
+
+<b>Allowed Equality Joins</b>
+
+<source><![CDATA[SELECT a.* FROM a JOIN b ON (a.id = b.id) 
+]]></source>
+
+<source><![CDATA[SELECT a.* FROM a JOIN b ON (a.id = b.id AND a.department = b.department)
+]]></source>
+
+<b>Disallowed Joins</b>
+
+<source><![CDATA[SELECT a.* FROM a JOIN b ON (a.id <> b.id)
+]]></source>
+
+<p>Multiple Tables can be joined in the same query</p>
+
+<source><![CDATA[SELECT a.val, b.val, c.val FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key2)
+]]></source>
+
+
+
+<source><![CDATA[
+]]></source>
+ 
+</section>
+
+<section name="Join implementation with Map Reduce" href="join_map_reduce">
+
+<p>Hive converts joins over multiple tables into a single map/reduce job if for every table the same column is used in the join clauses. The query below is
+converted into a single map/reduce job as only key1 column for b is involved in the join.</p>
+
+<source><![CDATA[SELECT a.val, b.val, c.val FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key1)]]></source>
+<i>It is very interesting to note that any number of tables can be joined in single map/reduce process as long as they fit the above criteria.</i>
+
+<p>However if the join colums are not the same for all tables the is converted into multiple map/reduce jobs</p>
+
+<source><![CDATA[SELECT a.val, b.val, c.val FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key2)
+]]></source>
+
+<p>In this case the first map/reduce job joins a with b and the results are then joined with c in the second map/reduce job. </p>
+</section>
+
+<section name="Largest Table LAST" href="lagest_table_last">
+
+<p>In every map/reduce stage of the join, the last table in the sequence is streamed through the reducers where as the others are buffered. Therefore, it helps to reduce the memory needed in the reducer for buffering the rows for a particular value of the join key by organizing the tables such that the largest tables appear last in the sequence. e.g. in</p>
+
+<source><![CDATA[SELECT a.val, b.val, c.val FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key1)]]></source>
+
+<p>all the three tables are joined in a single map/reduce job and the values for a particular value of the key for tables a and b are buffered in the memory in the reducers. Then for each row retrieved from c, the join is computed with the buffered rows.</p>
+
+<p>For the query:</p>
+
+<source><![CDATA[SELECT a.val, b.val, c.val FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key2)]]></source>
+
+<p>    * there are two map/reduce jobs involved in computing the join. The first of these joins a with b and buffers the values of a while streaming the values of b in the reducers. The second of one of these jobs buffers the results of the first join while streaming the values of c through the reducers. </p>
+
+</section>
+
+<section name="Streamtable hint" href="stream_table_hint">
+
+<p>In every map/reduce stage of the join, the table to be streamed can be specified via a hint:</p>
+
+<source><![CDATA[SELECT /*+ STREAMTABLE(a) */ a.val, b.val, c.val 
+FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key1)]]></source>
+
+<p>All the three tables are joined in a single map/reduce job and the values for a particular value of the key for tables b and c are buffered in the memory in the reducers. Then for each row retrieved from a, the join is computed with the buffered rows.
+</p>
+
+</section>
+
+<section name="Outer Joins" href="outer_joins">
+
+<p>LEFT, RIGHT, and FULL OUTER joins exist in order to provide more control over ON clauses for which there is no match. For example:</p>
+
+<source><![CDATA[SELECT a.val, b.val FROM a LEFT OUTER JOIN b ON (a.key=b.key)
+]]></source>
+
+<p>The above query will return a row for every row in a. This output row will be a.val,b.val when there is a b.key that equals a.key, and the output row will be a.val,NULL when there is no corresponding b.key. Rows from b which have no corresponding a.key will be dropped. The syntax "FROM a LEFT OUTER JOIN b" must be written on one line in order to understand how it works--a is to the LEFT of b in this query, and so all rows from a are kept; a RIGHT OUTER JOIN will keep all rows from b, and a FULL OUTER JOIN will keep all rows from a and all rows from b. OUTER JOIN semantics should conform to standard SQL specs.
+</p>
+
+<p>Joins occur BEFORE WHERE CLAUSES. So, if you want to restrict the OUTPUT of a join, a requirement should be in the WHERE clause, otherwise it should be in the JOIN clause. A big point of confusion for this issue is partitioned tables</p>
+
+<source><![CDATA[SELECT a.val, b.val FROM a LEFT OUTER JOIN b ON (a.key=b.key)
+  WHERE a.ds='2009-07-07' AND b.ds='2009-07-07']]></source>
+
+<p>will join a on b, producing a list of a.val and b.val. The WHERE clause, however, can also reference other columns of a and b that are in the output of the join, and then filter them out. However, whenever a row from the JOIN has found a key for a and no key for b, all of the columns of b will be NULL, including the ds column. This is to say, you will filter out all rows of join output for which there was no valid b.key, and thus you have outsmarted your LEFT OUTER requirement. In other words, the LEFT OUTER part of the join is irrelevant if you reference any column of b in the WHERE clause. Instead, when OUTER JOINing, use this syntax:</p>
+
+<source><![CDATA[SELECT a.val, b.val FROM a LEFT OUTER JOIN b
+  ON (a.key=b.key AND b.ds='2009-07-07' AND a.ds='2009-07-07')]]></source>
+
+<p>Joins are NOT commutative! Joins are left-associative regardless of whether they are LEFT or RIGHT joins. </p>
+
+<source><![CDATA[SELECT a.val1, a.val2, b.val, c.val
+FROM a
+JOIN b ON (a.key = b.key)
+LEFT OUTER JOIN c ON (a.key = c.key)]]></source>
+
+<p>The above query first joins a on b, throwing away everything in a or b that does not have a corresponding key in the other table. The reduced table is then joined on c. This provides unintuitive results if there is a key that exists in both a and c, but not b: The whole row (including a.val1, a.val2, and a.key) is dropped in the "a JOIN b" step, so when the result of that is joined with c, any row with a c.key that had a corresponding a.key or b.key (but not both) will show up as NULL, NULL, NULL, c.val.</p>
+</section>
+
+<section name="Left Semi Join" href="left_semi_join">
+
+<p>LEFT SEMI JOIN implements the correlated IN/EXISTS subquery semantics in an efficient way. Since Hive currently does not support IN/EXISTS subqueries, you can rewrite your queries using LEFT SEMI JOIN. The restrictions of using LEFT SEMI JOIN is that the right-hand-side table should only be referenced in the join condition (ON-clause), but not in WHERE- or SELECT-clauses etc.</p>
+
+<p>This type of query</p>
+<source><![CDATA[SELECT a.key, a.value
+FROM a 
+WHERE a.key in 
+(SELECT b.key
+FROM B);]]></source>
+
+<p>Can be written as:</p>
+
+<source><![CDATA[SELECT a.key, a.val
+FROM a LEFT SEMI JOIN b on (a.key = b.key)]]></source>
+
+</section>
+
+<section name="Map Side Join" href="map_side_join">
+
+<p>If all but one of the tables being joined are small, the join can be performed as a map only job. The query
+does not need a reducer. For every mapper a,b is read completely. A restriction is that a <b>FULL/RIGHT OUTER JOIN b</b> cannot be performed. </p>
+
+<source><![CDATA[SELECT /*+ MAPJOIN(b) */ a.key, a.value
+FROM a join b on a.key = b.key]]></source>
+
+</section>
+
+<section name="Bucketed Map Join" href="bucket_map_join">
+
+<p>If the tables being joined are bucketized, and the buckets are a multiple of each other, the buckets can be joined with each other. If table A has 8 buckets are table B has 4 buckets, the following join:</p>
+
+<source><![CDATA[SELECT /*+ MAPJOIN(b) */ a.key, a.value
+FROM a join b on a.key = b.key]]></source>
+
+<p>can be done on the mapper only. Instead of fetching B completely for each mapper of A, only the required buckets are fetched. For the query above, the mapper processing bucket 1 for A will only fetch bucket 1 of B. It is not the default behavior, and is governed by the following parameter </p>
+
+<i>set hive.optimize.bucketmapjoin = true</i>
+
+<p>If the tables being joined are sorted and bucketized, and the number of buckets are same, a sort-merge join can be performed. The corresponding buckets are joined with each other at the mapper. If both A and B have 4 buckets</p>
+
+<source><![CDATA[ SELECT /*+ MAPJOIN(b) */ a.key, a.value
+FROM A a join B b on a.key = b.key]]></source>
+
+<p>can be done on the mapper only. The mapper for the bucket for A will traverse the corresponding bucket for B. This is not the default behavior, and the following parameters need to be set:</p>
+
+<source><![CDATA[set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+set hive.optimize.bucketmapjoin = true;
+set hive.optimize.bucketmapjoin.sortedmerge = true;]]></source>
+
+</section>
+
+</body>
+
+
+
+</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/xdocs/language_manual/var_substitution.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/var_substitution.xml b/docs/xdocs/language_manual/var_substitution.xml
new file mode 100644
index 0000000..3f3b04b
--- /dev/null
+++ b/docs/xdocs/language_manual/var_substitution.xml
@@ -0,0 +1,130 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<document>
+
+  <properties>
+    <title>Hadoop Hive- Variable Substitution</title>
+    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
+  </properties>
+
+  <body>
+<h3>Hive Variable Substitution</h3>
+<section name="Introduction" href="Introduction">
+
+<p>Hive is used for both interactive queries as well as part. The hive variable substitution mechanism was 
+designed to avoid some of the code that was getting baked into the scripting language ontop of hive. For example:</p>
+
+<source><![CDATA[$ a=b
+$ hive -e " describe $a "
+]]></source>
+
+<p>
+are becoming common place. This is frustrating as hive becomes closely coupled with scripting languages. The hive
+startup time of a couple seconds is non-trivial when doing thousands of manipulations multiple hive -e invocations.</p>
+
+<p>
+Hive Variables combine the set capability you know and love with some limited yet powerful (evil laugh) substitution 
+ability. For example:</p>
+
+<source><![CDATA[$ bin/hive -hiveconf a=b -e 'set a; set hiveconf:a; \
+create table if not exists b (col int); describe ${hiveconf:a}'
+]]></source>
+
+<p>Results in:</p>
+<source><![CDATA[Hive history file=/tmp/edward/hive_job_log_edward_201011240906_1463048967.txt
+a=b
+hiveconf:a=b
+OK
+Time taken: 5.913 seconds
+OK
+col	int	
+Time taken: 0.754 seconds
+]]></source>
+
+</section>
+
+<section name="Using variables" href="using_variables">
+
+<p>There are three namespaces for variables hiveconf,system, and env. hiveconf variables are set as normal:</p>
+
+<source><![CDATA[set x=myvalue
+]]></source>
+
+<p>However they are retrieved using</p>
+
+<source><![CDATA[${hiveconf:x}
+]]></source>
+ 
+<p>Annotated examples of usage from the test case ql/src/test/queries/clientpositive/set_processor_namespaces.q</p>
+
+<source><![CDATA[set zzz=5;
+--  sets zzz=5
+set zzz;
+
+set system:xxx=5;
+set system:xxx;
+-- sets a system property xxx to 5
+
+set system:yyy=${system:xxx};
+set system:yyy;
+-- sets yyy with value of xxx
+
+set go=${hiveconf:zzz};
+set go;
+-- sets go base on value on zzz
+
+set hive.variable.substitute=false;
+set raw=${hiveconf:zzz};
+set raw;
+-- disable substitution set a value to the literal
+
+set hive.variable.substitute=true;
+
+EXPLAIN SELECT * FROM src where key=${hiveconf:zzz};
+SELECT * FROM src where key=${hiveconf:zzz};
+--use a variable in a query
+
+set a=1;
+set b=a;
+set c=${hiveconf:${hiveconf:b}};
+set c;
+--uses nested variables. 
+
+
+set jar=../lib/derby.jar;
+
+add file ${hiveconf:jar};
+list file;
+delete file ${hiveconf:jar};
+list file;
+]]></source>
+</section>
+
+<section name="Disabling" href="disable">
+  <p>Variable substitution is on by default. If this causes an issue with an already existing script disable it.</p>
+
+<source><![CDATA[set hive.variable.substitute=false;
+]]></source>
+
+</section>
+
+</body>
+</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/xdocs/language_manual/working_with_bucketed_tables.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/working_with_bucketed_tables.xml b/docs/xdocs/language_manual/working_with_bucketed_tables.xml
new file mode 100644
index 0000000..de4b599
--- /dev/null
+++ b/docs/xdocs/language_manual/working_with_bucketed_tables.xml
@@ -0,0 +1,87 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<document>
+
+  <properties>
+    <title>Hadoop Hive- Working with Bucketed Tables</title>
+    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
+  </properties>
+
+  <body>
+
+ <section name="Defining Bucketed Tables" href="defining_bucketed_tables?">
+
+<p>
+This is a brief example on creating a populating bucketed tables. Bucketed tables 
+are fantastic in that they allow much more efficient sampling than do non-bucketed 
+tables, and they may later allow for time saving operations such as mapside joins. 
+However, the bucketing specified at table creation is not enforced when the table 
+is written to, and so it is possible for the table's metadata to advertise 
+properties which are not upheld by the table's actual layout. This should obviously 
+be avoided. Here's how to do it right.
+</p>
+<p>First there’s table creation:</p>
+
+ <source><![CDATA[CREATE TABLE user_info_bucketed(user_id BIGINT, firstname STRING, lastname STRING)
+COMMENT 'A bucketed copy of user_info'
+PARTITIONED BY(ds STRING)
+CLUSTERED BY(user_id) INTO 256 BUCKETS;
+ ]]></source>
+
+<p>notice that we define user_id as the bucket column</p>
+</section>
+
+<section name="Populating Bucketed Tables" href="populating_bucketed_tables?">
+
+ <source><![CDATA[set hive.enforce.bucketing = true;  
+FROM user_id
+INSERT OVERWRITE TABLE user_info_bucketed
+PARTITION (ds='2009-02-25')
+SELECT userid, firstname, lastname WHERE ds='2009-02-25';
+ ]]></source>
+
+<p>The command <strong>set hive.enforce.bucketing = true;</strong>  allows the 
+correct number of reducers and the cluster by column to be automatically selected 
+based on the table. Otherwise, you would need to set the number of reducers to be 
+the same as the number of buckets with 
+<strong>set mapred.reduce.tasks = 256;</strong> and have a 
+<strong>CLUSTER BY ...</strong> clause in the select.</p>
+
+</section>
+
+<section name="Bucketing Explained" href="bucketing_explained?">
+<p>
+How does Hive distribute the rows across the buckets? In general, the bucket number is determined by the expression hash_function(bucketing_column) mod num_buckets. (There's a '0x7FFFFFFF in there too, but that's not that important). The hash_function depends on the type of the bucketing column. For an int, it's easy, hash_int(i) == i. For example, if user_id were an int, and there were 10 buckets, we would expect all user_id's that end in 0 to be in bucket 1, all user_id's that end in a 1 to be in bucket 2, etc. For other datatypes, it's a little tricky. In particular, the hash of a BIGINT is not the same as the BIGINT. And the hash of a string or a complex datatype will be some number that's derived from the value, but not anything humanly-recognizable. For example, if user_id were a STRING, then the user_id's in bucket 1 would probably not end in 0. In general, distributing rows based on the hash will give you a even distribution in the buckets.
+</p>
+
+</section>
+
+<section name="What can go wrong?" href="bucketing_gone_wrong?">
+<p>
+So, what can go wrong? As long as you 
+<strong>set hive.enforce.bucketing = true</strong>, and use the syntax above, 
+the tables should be populated properly. Things can go wrong if the bucketing 
+column type is different during the insert and on read, or if you manually 
+cluster by a value that's different from the table definition. 
+</p>
+</section>
+</body>
+</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/docs/xdocs/udf/reflect.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/udf/reflect.xml b/docs/xdocs/udf/reflect.xml
new file mode 100644
index 0000000..435f025
--- /dev/null
+++ b/docs/xdocs/udf/reflect.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<document>
+
+  <properties>
+    <title>Hadoop Hive- Reflect User Defined Function</title>
+    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
+  </properties>
+
+  <body>
+<section name="Reflect (Generic) UDF" href="reflect">
+
+<p>A java class and method often exists to handle the exact function a user would like to use in hive. Rather
+then having to write a wrapper UDF to call this method, the majority of these methods can be called using reflect udf. Reflect uses 
+java reflection to instantiate and call methods of objects, it can also call static functions. The method must return a primative type
+or a type that hive knows how to serialize.
+</p>
+
+<source><![CDATA[SELECT reflect("java.lang.String", "valueOf", 1),
+       reflect("java.lang.String", "isEmpty"),
+       reflect("java.lang.Math", "max", 2, 3),
+       reflect("java.lang.Math", "min", 2, 3),
+       reflect("java.lang.Math", "round", 2.5),
+       reflect("java.lang.Math", "exp", 1.0),
+       reflect("java.lang.Math", "floor", 1.9)
+FROM src LIMIT 1;
+
+
+1	true	3	2	3	2.7182818284590455	1.0]]></source>
+
+</section>
+</body>
+</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/druid-handler/pom.xml
----------------------------------------------------------------------
diff --git a/druid-handler/pom.xml b/druid-handler/pom.xml
index 3f8a74f..fc70185 100644
--- a/druid-handler/pom.xml
+++ b/druid-handler/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -107,10 +107,6 @@
           <groupId>com.fasterxml.jackson.core</groupId>
           <artifactId>jackson-databind</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>io.druid</groupId>
-          <artifactId>druid-aws-common</artifactId>
-        </exclusion>
       </exclusions>
     </dependency>
     <dependency>
@@ -222,17 +218,6 @@
       <groupId>org.apache.calcite</groupId>
       <artifactId>calcite-druid</artifactId>
       <version>${calcite.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.calcite.avatica</groupId>
-          <artifactId>avatica-core</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.calcite.avatica</groupId>
-      <artifactId>avatica</artifactId>
-      <version>${avatica.version}</version>
     </dependency>
     <!-- test inter-project -->
     <dependency>
@@ -340,7 +325,6 @@
                   <include>org.jdbi:*</include>
                   <include>net.jpountz.lz4:*</include>
                   <include>org.apache.commons:*</include>
-                  <include>org.roaringbitmap:*</include>
                 </includes>
               </artifactSet>
               <filters>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
index daee2fe..d4f6865 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
@@ -33,6 +33,7 @@ import com.metamx.common.lifecycle.Lifecycle;
 import com.metamx.http.client.HttpClient;
 import com.metamx.http.client.HttpClientConfig;
 import com.metamx.http.client.HttpClientInit;
+import io.druid.indexer.SQLMetadataStorageUpdaterJobHandler;
 import io.druid.metadata.MetadataStorageConnectorConfig;
 import io.druid.metadata.MetadataStorageTablesConfig;
 import io.druid.metadata.SQLMetadataConnector;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hive.metastore.HiveMetaHook;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
@@ -94,6 +96,8 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
 
   private final SQLMetadataConnector connector;
 
+  private final SQLMetadataStorageUpdaterJobHandler druidSqlMetadataStorageUpdaterJobHandler;
+
   private final MetadataStorageTablesConfig druidMetadataStorageTablesConfig;
 
   private HttpClient httpClient;
@@ -147,14 +151,17 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
     } else {
       throw new IllegalStateException(String.format("Unknown metadata storage type [%s]", dbType));
     }
+    druidSqlMetadataStorageUpdaterJobHandler = new SQLMetadataStorageUpdaterJobHandler(connector);
   }
 
   @VisibleForTesting
   public DruidStorageHandler(SQLMetadataConnector connector,
+          SQLMetadataStorageUpdaterJobHandler druidSqlMetadataStorageUpdaterJobHandler,
           MetadataStorageTablesConfig druidMetadataStorageTablesConfig,
           HttpClient httpClient
   ) {
     this.connector = connector;
+    this.druidSqlMetadataStorageUpdaterJobHandler = druidSqlMetadataStorageUpdaterJobHandler;
     this.druidMetadataStorageTablesConfig = druidMetadataStorageTablesConfig;
     this.httpClient = httpClient;
   }
@@ -249,12 +256,6 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
 
   @Override
   public void commitCreateTable(Table table) throws MetaException {
-    LOG.debug(String.format("commit create table [%s]", table.getTableName()));
-    publishSegments(table, true);
-  }
-
-
-  public void publishSegments(Table table, boolean overwrite) throws MetaException {
     if (MetaStoreUtils.isExternalTable(table)) {
       return;
     }
@@ -265,19 +266,15 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
       List<DataSegment> segmentList = DruidStorageHandlerUtils
               .getPublishedSegments(tableDir, getConf());
       LOG.info(String.format("Found [%d] segments under path [%s]", segmentList.size(), tableDir));
-      final String dataSourceName = table.getParameters().get(Constants.DRUID_DATA_SOURCE);
-
-      DruidStorageHandlerUtils.publishSegments(
-              connector,
-              druidMetadataStorageTablesConfig,
-              dataSourceName,
+      druidSqlMetadataStorageUpdaterJobHandler.publishSegments(
+              druidMetadataStorageTablesConfig.getSegmentsTable(),
               segmentList,
-              DruidStorageHandlerUtils.JSON_MAPPER,
-              overwrite
+              DruidStorageHandlerUtils.JSON_MAPPER
       );
       final String coordinatorAddress = HiveConf
               .getVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS);
       int maxTries = HiveConf.getIntVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_MAX_TRIES);
+      final String dataSourceName = table.getParameters().get(Constants.DRUID_DATA_SOURCE);
       LOG.info(String.format("checking load status from coordinator [%s]", coordinatorAddress));
 
       // check if the coordinator is up
@@ -491,7 +488,7 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
   public void commitInsertTable(Table table, boolean overwrite) throws MetaException {
     if (overwrite) {
       LOG.debug(String.format("commit insert overwrite into table [%s]", table.getTableName()));
-      this.publishSegments(table, overwrite);
+      this.commitCreateTable(table);
     } else {
       throw new MetaException("Insert into is not supported yet");
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index adf013b..8d48e14 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -22,7 +22,6 @@ import com.fasterxml.jackson.databind.jsontype.NamedType;
 import com.fasterxml.jackson.dataformat.smile.SmileFactory;
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Interner;
 import com.google.common.collect.Interners;
 import com.google.common.collect.Lists;
@@ -44,8 +43,6 @@ import io.druid.segment.IndexMergerV9;
 import io.druid.segment.column.ColumnConfig;
 import io.druid.timeline.DataSegment;
 import io.druid.timeline.partition.LinearShardSpec;
-import io.druid.timeline.partition.NoneShardSpec;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -57,11 +54,9 @@ import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.util.StringUtils;
 import org.jboss.netty.handler.codec.http.HttpHeaders;
 import org.jboss.netty.handler.codec.http.HttpMethod;
-import org.joda.time.DateTime;
 import org.skife.jdbi.v2.FoldController;
 import org.skife.jdbi.v2.Folder3;
 import org.skife.jdbi.v2.Handle;
-import org.skife.jdbi.v2.PreparedBatch;
 import org.skife.jdbi.v2.StatementContext;
 import org.skife.jdbi.v2.TransactionCallback;
 import org.skife.jdbi.v2.TransactionStatus;
@@ -340,7 +335,14 @@ public final class DruidStorageHandlerUtils {
               new HandleCallback<Void>() {
                 @Override
                 public Void withHandle(Handle handle) throws Exception {
-                  disableDataSourceWithHandle(handle, metadataStorageTablesConfig, dataSource);
+                  handle.createStatement(
+                          String.format("UPDATE %s SET used=false WHERE dataSource = :dataSource",
+                                  metadataStorageTablesConfig.getSegmentsTable()
+                          )
+                  )
+                          .bind("dataSource", dataSource)
+                          .execute();
+
                   return null;
                 }
               }
@@ -353,64 +355,6 @@ public final class DruidStorageHandlerUtils {
     return true;
   }
 
-  public static void publishSegments(final SQLMetadataConnector connector,
-      final MetadataStorageTablesConfig metadataStorageTablesConfig,
-      final String dataSource,
-      final List<DataSegment> segments, final ObjectMapper mapper, boolean overwrite)
-  {
-    connector.getDBI().inTransaction(
-        new TransactionCallback<Void>()
-        {
-          @Override
-          public Void inTransaction(Handle handle, TransactionStatus transactionStatus) throws Exception
-          {
-            if(overwrite){
-              disableDataSourceWithHandle(handle, metadataStorageTablesConfig, dataSource);
-            }
-            final PreparedBatch batch = handle.prepareBatch(
-                String.format(
-                    "INSERT INTO %1$s (id, dataSource, created_date, start, \"end\", partitioned, version, used, payload) "
-                        + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)",
-                    metadataStorageTablesConfig.getSegmentsTable()
-                )
-            );
-            for (final DataSegment segment : segments) {
-
-              batch.add(
-                  new ImmutableMap.Builder<String, Object>()
-                      .put("id", segment.getIdentifier())
-                      .put("dataSource", segment.getDataSource())
-                      .put("created_date", new DateTime().toString())
-                      .put("start", segment.getInterval().getStart().toString())
-                      .put("end", segment.getInterval().getEnd().toString())
-                      .put("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true)
-                      .put("version", segment.getVersion())
-                      .put("used", true)
-                      .put("payload", mapper.writeValueAsBytes(segment))
-                      .build()
-              );
-
-              LOG.info("Published %s", segment.getIdentifier());
-
-            }
-            batch.execute();
-
-            return null;
-          }
-        }
-    );
-  }
-
-  public static void disableDataSourceWithHandle(Handle handle, MetadataStorageTablesConfig metadataStorageTablesConfig, String dataSource){
-    handle.createStatement(
-        String.format("UPDATE %s SET used=false WHERE dataSource = :dataSource",
-            metadataStorageTablesConfig.getSegmentsTable()
-        )
-    )
-        .bind("dataSource", dataSource)
-        .execute();
-  }
-
   /**
    * @param connector                   SQL connector to metadata
    * @param metadataStorageTablesConfig Tables configuration

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
index fbdd4c9..40a2022 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
@@ -33,9 +33,6 @@ import io.druid.granularity.QueryGranularity;
 import io.druid.query.aggregation.AggregatorFactory;
 import io.druid.query.aggregation.DoubleSumAggregatorFactory;
 import io.druid.query.aggregation.LongSumAggregatorFactory;
-import io.druid.segment.IndexSpec;
-import io.druid.segment.data.ConciseBitmapSerdeFactory;
-import io.druid.segment.data.RoaringBitmapSerdeFactory;
 import io.druid.segment.indexing.DataSchema;
 import io.druid.segment.indexing.RealtimeTuningConfig;
 import io.druid.segment.indexing.granularity.GranularitySpec;
@@ -203,12 +200,6 @@ public class DruidOutputFormat<K, V> implements HiveOutputFormat<K, DruidWritabl
     }
     Integer maxRowInMemory = HiveConf.getIntVar(jc, HiveConf.ConfVars.HIVE_DRUID_MAX_ROW_IN_MEMORY);
 
-    IndexSpec indexSpec;
-    if ("concise".equals(HiveConf.getVar(jc, HiveConf.ConfVars.HIVE_DRUID_BITMAP_FACTORY_TYPE))) {
-      indexSpec = new IndexSpec(new ConciseBitmapSerdeFactory(), null, null, null);
-    } else {
-      indexSpec = new IndexSpec(new RoaringBitmapSerdeFactory(true), null, null, null);
-    }
     RealtimeTuningConfig realtimeTuningConfig = new RealtimeTuningConfig(maxRowInMemory,
             null,
             null,
@@ -217,7 +208,7 @@ public class DruidOutputFormat<K, V> implements HiveOutputFormat<K, DruidWritabl
             null,
             null,
             null,
-            indexSpec,
+            null,
             true,
             0,
             0,
@@ -241,6 +232,6 @@ public class DruidOutputFormat<K, V> implements HiveOutputFormat<K, DruidWritabl
 
   @Override
   public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException {
-    // NOOP
+    throw new UnsupportedOperationException("not implemented yet");
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
index 53624e1..0b35428 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.druid.io;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.URL;
-import java.net.URLEncoder;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -171,8 +170,7 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
     // Create Select query
     SelectQueryBuilder builder = new Druids.SelectQueryBuilder();
     builder.dataSource(dataSource);
-    final List<Interval> intervals = Arrays.asList();
-    builder.intervals(intervals);
+    builder.intervals(Arrays.asList(DruidTable.DEFAULT_INTERVAL));
     builder.pagingSpec(PagingSpec.newSpec(1));
     Map<String, Object> context = new HashMap<>();
     context.put(Constants.DRUID_QUERY_FETCH, false);
@@ -214,7 +212,7 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
             StringUtils.join(query.getIntervals(), ","); // Comma-separated intervals without brackets
     final String request = String.format(
             "http://%s/druid/v2/datasources/%s/candidates?intervals=%s",
-            address, query.getDataSource().getNames().get(0), URLEncoder.encode(intervals, "UTF-8"));
+            address, query.getDataSource().getNames().get(0), intervals);
     final InputStream response;
     try {
       response = DruidStorageHandlerUtils.submitRequest(client, new Request(HttpMethod.GET, new URL(request)));
@@ -415,15 +413,11 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
 
   private static List<List<Interval>> createSplitsIntervals(List<Interval> intervals, int numSplits
   ) {
-
+    final long totalTime = DruidDateTimeUtils.extractTotalTime(intervals);
     long startTime = intervals.get(0).getStartMillis();
     long endTime = startTime;
     long currTime = 0;
     List<List<Interval>> newIntervals = new ArrayList<>();
-    long totalTime = 0;
-    for (Interval interval: intervals) {
-      totalTime += interval.getEndMillis() - interval.getStartMillis();
-    }
     for (int i = 0, posIntervals = 0; i < numSplits; i++) {
       final long rangeSize = Math.round((double) (totalTime * (i + 1)) / numSplits) -
               Math.round((double) (totalTime * i) / numSplits);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
index f0bdb9e..9e8b439 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
@@ -99,7 +99,7 @@ public class DruidGroupByQueryRecordReader
         indexes[i]--;
         for (int j = i + 1; j < indexes.length; j++) {
           indexes[j] = current.getDimension(
-                  query.getDimensions().get(j).getOutputName()).size() - 1;
+                  query.getDimensions().get(j).getDimension()).size() - 1;
         }
         return true;
       }
@@ -110,7 +110,7 @@ public class DruidGroupByQueryRecordReader
       indexes = new int[query.getDimensions().size()];
       for (int i = 0; i < query.getDimensions().size(); i++) {
         DimensionSpec ds = query.getDimensions().get(i);
-        indexes[i] = current.getDimension(ds.getOutputName()).size() - 1;
+        indexes[i] = current.getDimension(ds.getDimension()).size() - 1;
       }
       return true;
     }
@@ -131,7 +131,7 @@ public class DruidGroupByQueryRecordReader
     // 2) The dimension columns
     for (int i = 0; i < query.getDimensions().size(); i++) {
       DimensionSpec ds = query.getDimensions().get(i);
-      List<String> dims = current.getDimension(ds.getOutputName());
+      List<String> dims = current.getDimension(ds.getDimension());
       if (dims.size() == 0) {
         // NULL value for dimension
         value.getValue().put(ds.getOutputName(), null);
@@ -170,7 +170,7 @@ public class DruidGroupByQueryRecordReader
       // 2) The dimension columns
       for (int i = 0; i < query.getDimensions().size(); i++) {
         DimensionSpec ds = query.getDimensions().get(i);
-        List<String> dims = current.getDimension(ds.getOutputName());
+        List<String> dims = current.getDimension(ds.getDimension());
         if (dims.size() == 0) {
           // NULL value for dimension
           value.getValue().put(ds.getOutputName(), null);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
index 05e3ec5..da6610a 100644
--- a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
+++ b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
@@ -18,13 +18,10 @@
 
 package org.apache.hadoop.hive.druid;
 
-import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import io.druid.indexer.JobHelper;
 import io.druid.indexer.SQLMetadataStorageUpdaterJobHandler;
-import io.druid.metadata.MetadataStorageTablesConfig;
-import io.druid.metadata.SQLMetadataSegmentManager;
 import io.druid.segment.loading.SegmentLoadingException;
 import io.druid.timeline.DataSegment;
 import io.druid.timeline.partition.NoneShardSpec;
@@ -45,16 +42,10 @@ import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 import org.mockito.Mockito;
 import org.skife.jdbi.v2.Handle;
-import org.skife.jdbi.v2.StatementContext;
-import org.skife.jdbi.v2.tweak.HandleCallback;
-import org.skife.jdbi.v2.tweak.ResultSetMapper;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.sql.ResultSet;
-import java.sql.SQLException;
 import java.util.Arrays;
-import java.util.List;
 import java.util.Map;
 import java.util.UUID;
 
@@ -94,6 +85,7 @@ public class TestDruidStorageHandler {
   public void testPreCreateTableWillCreateSegmentsTable() throws MetaException {
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
             derbyConnectorRule.getConnector(),
+            new SQLMetadataStorageUpdaterJobHandler(derbyConnectorRule.getConnector()),
             derbyConnectorRule.metadataTablesConfigSupplier().get(),
             null
     );
@@ -122,6 +114,7 @@ public class TestDruidStorageHandler {
     );
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
             derbyConnectorRule.getConnector(),
+            new SQLMetadataStorageUpdaterJobHandler(derbyConnectorRule.getConnector()),
             derbyConnectorRule.metadataTablesConfigSupplier().get(),
             null
     );
@@ -133,6 +126,7 @@ public class TestDruidStorageHandler {
           throws MetaException, IOException {
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
             derbyConnectorRule.getConnector(),
+            new SQLMetadataStorageUpdaterJobHandler(derbyConnectorRule.getConnector()),
             derbyConnectorRule.metadataTablesConfigSupplier().get(),
             null
     );
@@ -164,6 +158,7 @@ public class TestDruidStorageHandler {
   public void testCommitInsertTable() throws MetaException, IOException {
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
             derbyConnectorRule.getConnector(),
+            new SQLMetadataStorageUpdaterJobHandler(derbyConnectorRule.getConnector()),
             derbyConnectorRule.metadataTablesConfigSupplier().get(),
             null
     );
@@ -189,6 +184,7 @@ public class TestDruidStorageHandler {
   public void testDeleteSegment() throws IOException, SegmentLoadingException {
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
             derbyConnectorRule.getConnector(),
+            new SQLMetadataStorageUpdaterJobHandler(derbyConnectorRule.getConnector()),
             derbyConnectorRule.metadataTablesConfigSupplier().get(),
             null
     );
@@ -225,68 +221,4 @@ public class TestDruidStorageHandler {
             localFileSystem.exists(segmentOutputPath.getParent().getParent().getParent())
     );
   }
-
-  @Test
-  public void testCommitInsertOverwriteTable() throws MetaException, IOException {
-    DerbyConnectorTestUtility connector = derbyConnectorRule.getConnector();
-    MetadataStorageTablesConfig metadataStorageTablesConfig = derbyConnectorRule
-        .metadataTablesConfigSupplier().get();
-
-    DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
-        connector,
-        metadataStorageTablesConfig,
-        null
-    );
-    druidStorageHandler.preCreateTable(tableMock);
-    Configuration config = new Configuration();
-    config.set(String.valueOf(HiveConf.ConfVars.HIVEQUERYID), UUID.randomUUID().toString());
-    config.set(String.valueOf(HiveConf.ConfVars.DRUID_WORKING_DIR), tableWorkingPath);
-    druidStorageHandler.setConf(config);
-    LocalFileSystem localFileSystem = FileSystem.getLocal(config);
-    Path taskDirPath = new Path(tableWorkingPath, druidStorageHandler.makeStagingName());
-    Path descriptorPath = DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment,
-        new Path(taskDirPath, DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME)
-    );
-    List<DataSegment> existingSegments = Arrays.asList(DataSegment.builder().dataSource(DATA_SOURCE_NAME).version("v0")
-        .interval(new Interval(1, 10)).shardSpec(NoneShardSpec.instance()).build());
-    DruidStorageHandlerUtils.publishSegments(connector, metadataStorageTablesConfig, DATA_SOURCE_NAME,
-        existingSegments,
-        DruidStorageHandlerUtils.JSON_MAPPER,
-        true
-        );
-    DruidStorageHandlerUtils.writeSegmentDescriptor(localFileSystem, dataSegment, descriptorPath);
-    druidStorageHandler.commitInsertTable(tableMock, true);
-    Assert.assertArrayEquals(Lists.newArrayList(DATA_SOURCE_NAME).toArray(), Lists.newArrayList(
-        DruidStorageHandlerUtils.getAllDataSourceNames(connector,
-            metadataStorageTablesConfig
-        )).toArray());
-
-    final List<DataSegment> dataSegmentList = connector.getDBI()
-        .withHandle(new HandleCallback<List<DataSegment>>() {
-          @Override
-          public List<DataSegment> withHandle(Handle handle) throws Exception {
-            return handle
-                .createQuery(String.format("SELECT payload FROM %s WHERE used=true",
-                    metadataStorageTablesConfig.getSegmentsTable()))
-                .map(new ResultSetMapper<DataSegment>() {
-
-                  @Override
-                  public DataSegment map(int i, ResultSet resultSet,
-                      StatementContext statementContext)
-                      throws SQLException {
-                    try {
-                      return DruidStorageHandlerUtils.JSON_MAPPER.readValue(
-                          resultSet.getBytes("payload"),
-                          DataSegment.class
-                      );
-                    } catch (IOException e) {
-                      throw Throwables.propagate(e);
-                    }
-                  }
-                }).list();
-          }
-        });
-    Assert.assertEquals(1, dataSegmentList.size());
-
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java b/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
index d9e01fe..9ec82c0 100644
--- a/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
+++ b/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
@@ -37,10 +37,8 @@ import io.druid.granularity.QueryGranularities;
 import io.druid.query.aggregation.AggregatorFactory;
 import io.druid.query.aggregation.LongSumAggregatorFactory;
 import io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory;
-import io.druid.segment.IndexSpec;
 import io.druid.segment.QueryableIndex;
 import io.druid.segment.QueryableIndexStorageAdapter;
-import io.druid.segment.data.RoaringBitmapSerdeFactory;
 import io.druid.segment.indexing.DataSchema;
 import io.druid.segment.indexing.RealtimeTuningConfig;
 import io.druid.segment.indexing.granularity.UniformGranularitySpec;
@@ -141,10 +139,8 @@ public class TestDruidRecordWriter {
             objectMapper
     );
 
-    IndexSpec indexSpec = new IndexSpec(new RoaringBitmapSerdeFactory(true), null, null, null);
-    RealtimeTuningConfig tuningConfig = new RealtimeTuningConfig(null, null, null,
-            temporaryFolder.newFolder(), null, null, null, null, indexSpec, null, 0, 0, null, null
-    );
+    RealtimeTuningConfig tuningConfig = RealtimeTuningConfig
+            .makeDefaultTuningConfig(temporaryFolder.newFolder());
     LocalFileSystem localFileSystem = FileSystem.getLocal(config);
     DataSegmentPusher dataSegmentPusher = new LocalDataSegmentPusher(
             new LocalDataSegmentPusherConfig() {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/errata.txt
----------------------------------------------------------------------
diff --git a/errata.txt b/errata.txt
index 949ed9a..6f464cf 100644
--- a/errata.txt
+++ b/errata.txt
@@ -1,9 +1,6 @@
 Commits with the wrong or no JIRA referenced:
 
 git commit                               branch     jira       url
-f1aae85f197de09d4b86143f7f13d5aa21d2eb85 master     HIVE-16431 https://issues.apache.org/jira/browse/HIVE-16431
-cbab5b29f26ceb3d4633ade9647ce8bcb2f020a0 master     HIVE-16422 https://issues.apache.org/jira/browse/HIVE-16422
-e6143de2b0c3f53d32db8a743119e3a8080d4f85 master     HIVE-16425 https://issues.apache.org/jira/browse/HIVE-16425
 3f90794d872e90c29a068f16cdf3f45b1cf52c74 master     HIVE-15579 https://issues.apache.org/jira/browse/HIVE-15579
 5a576b6fbf1680ab4dd8f275cad484a2614ef2c1 master     HIVE-10391 https://issues.apache.org/jira/browse/HIVE-10391
 582f4e1bc39b9605d11f762480b29561a44688ae llap       HIVE-10217 https://issues.apache.org/jira/browse/HIVE-10217
@@ -89,4 +86,3 @@ d8298e1c85a515150562b0df68af89c18c468638 llap       HIVE-9418  https://issues.ap
 d16d4f1bcc43d6ebcab0eaf5bc635fb88b60be5f master     HIVE-9423  https://issues.apache.org/jira/browse/HIVE-9423
 130617443bb05d79c18420c0c4e903a76da3651c master     HIVE-14909 https://issues.apache.org/jira/browse/HIVE-14909
 6dace60af4b6ab4d5200310a0ad94c4530c2bec3 master     HIVE-13335 https://issues.apache.org/jira/browse/HIVE-13335
-5facfbb863366d7a661c21c57011b8dbe43f52e0 master     HIVE-16307 https://issues.apache.org/jira/browse/HIVE-16307
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hbase-handler/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-handler/pom.xml b/hbase-handler/pom.xml
index 7f57b77..8dc47e9 100644
--- a/hbase-handler/pom.xml
+++ b/hbase-handler/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hbase-handler/src/test/queries/negative/hbase_ddl.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/negative/hbase_ddl.q b/hbase-handler/src/test/queries/negative/hbase_ddl.q
deleted file mode 100644
index 2913bcd..0000000
--- a/hbase-handler/src/test/queries/negative/hbase_ddl.q
+++ /dev/null
@@ -1,9 +0,0 @@
-DROP TABLE hbase_table_1;
-CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
-STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0");
-
-DESCRIBE EXTENDED hbase_table_1;
-
-alter table hbase_table_1 change column key newkey string;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hbase-handler/src/test/queries/positive/hbase_ddl.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_ddl.q b/hbase-handler/src/test/queries/positive/hbase_ddl.q
deleted file mode 100644
index a8bae75..0000000
--- a/hbase-handler/src/test/queries/positive/hbase_ddl.q
+++ /dev/null
@@ -1,20 +0,0 @@
-DROP TABLE hbase_table_1;
-CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
-STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0");
-
-DESCRIBE EXTENDED hbase_table_1;
-
-select * from hbase_table_1;
-
-EXPLAIN FROM src INSERT OVERWRITE TABLE hbase_table_1 SELECT * WHERE (key%2)=0;
-FROM src INSERT OVERWRITE TABLE hbase_table_1 SELECT * WHERE (key%2)=0;
-
-ALTER TABLE hbase_table_1 SET TBLPROPERTIES('hbase.mapred.output.outputtable'='kkk');
-
-desc formatted hbase_table_1;
-
-ALTER TABLE hbase_table_1 unset TBLPROPERTIES('hbase.mapred.output.outputtable');
-
-desc formatted hbase_table_1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hbase-handler/src/test/queries/positive/hbase_queries.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_queries.q b/hbase-handler/src/test/queries/positive/hbase_queries.q
index 43efd6c..49fa829 100644
--- a/hbase-handler/src/test/queries/positive/hbase_queries.q
+++ b/hbase-handler/src/test/queries/positive/hbase_queries.q
@@ -180,7 +180,6 @@ DROP TABLE IF EXISTS hbase_table_10;
 CREATE TABLE hbase_table_10 (id bigint, data map<int, int>, str string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ("hbase.columns.mapping" = ":key,cf:map_col2,cf:str2_col");
-set hive.cbo.enable=false;
 insert overwrite table hbase_table_10 select 1 as id, map(10, cast(null as int)) as data , null as str from src limit 1;
 insert into table hbase_table_10 select 2 as id, map(20, cast(null as int)) as data , '1234' as str from src limit 1;
 insert into table hbase_table_10 select 3 as id, map(30, 31) as data , '1234' as str from src limit 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hbase-handler/src/test/results/negative/hbase_ddl.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/negative/hbase_ddl.q.out b/hbase-handler/src/test/results/negative/hbase_ddl.q.out
deleted file mode 100644
index b5aad70..0000000
--- a/hbase-handler/src/test/results/negative/hbase_ddl.q.out
+++ /dev/null
@@ -1,29 +0,0 @@
-PREHOOK: query: DROP TABLE hbase_table_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE hbase_table_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
-STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@hbase_table_1
-POSTHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
-STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@hbase_table_1
-PREHOOK: query: DESCRIBE EXTENDED hbase_table_1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@hbase_table_1
-POSTHOOK: query: DESCRIBE EXTENDED hbase_table_1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@hbase_table_1
-key                 	int                 	It is a column key  
-value               	string              	It is the column string value
-	 	 
-#### A masked pattern was here ####
-FAILED: SemanticException [Error 10134]: ALTER TABLE can only be used for [ADDPROPS, DROPPROPS] to a non-native table  hbase_table_1

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hbase-handler/src/test/results/positive/hbase_ddl.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_ddl.q.out b/hbase-handler/src/test/results/positive/hbase_ddl.q.out
deleted file mode 100644
index 8cb88ed..0000000
--- a/hbase-handler/src/test/results/positive/hbase_ddl.q.out
+++ /dev/null
@@ -1,186 +0,0 @@
-PREHOOK: query: DROP TABLE hbase_table_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE hbase_table_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
-STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@hbase_table_1
-POSTHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
-STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@hbase_table_1
-PREHOOK: query: DESCRIBE EXTENDED hbase_table_1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@hbase_table_1
-POSTHOOK: query: DESCRIBE EXTENDED hbase_table_1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@hbase_table_1
-key                 	int                 	It is a column key  
-value               	string              	It is the column string value
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: select * from hbase_table_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@hbase_table_1
-#### A masked pattern was here ####
-POSTHOOK: query: select * from hbase_table_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@hbase_table_1
-#### A masked pattern was here ####
-PREHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE hbase_table_1 SELECT * WHERE (key%2)=0
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE hbase_table_1 SELECT * WHERE (key%2)=0
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-  Stage-2
-  Stage-1 is a root stage
-  Stage-3 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-      Alter Table Operator:
-        Alter Table
-          type: drop props
-          old name: default.hbase_table_1
-          properties:
-            COLUMN_STATS_ACCURATE 
-
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
-  Stage: Stage-1
-      Pre Insert operator:
-        Pre-Insert task
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((UDFToDouble(key) % 2.0) = 0.0) (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: UDFToInteger(key) (type: int), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.hive.hbase.HiveHBaseTableInputFormat
-                      output format: org.apache.hadoop.hive.hbase.HiveHBaseTableOutputFormat
-                      serde: org.apache.hadoop.hive.hbase.HBaseSerDe
-                      name: default.hbase_table_1
-
-PREHOOK: query: FROM src INSERT OVERWRITE TABLE hbase_table_1 SELECT * WHERE (key%2)=0
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@hbase_table_1
-POSTHOOK: query: FROM src INSERT OVERWRITE TABLE hbase_table_1 SELECT * WHERE (key%2)=0
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@hbase_table_1
-PREHOOK: query: ALTER TABLE hbase_table_1 SET TBLPROPERTIES('hbase.mapred.output.outputtable'='kkk')
-PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@hbase_table_1
-PREHOOK: Output: default@hbase_table_1
-POSTHOOK: query: ALTER TABLE hbase_table_1 SET TBLPROPERTIES('hbase.mapred.output.outputtable'='kkk')
-POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@hbase_table_1
-POSTHOOK: Output: default@hbase_table_1
-PREHOOK: query: desc formatted hbase_table_1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@hbase_table_1
-POSTHOOK: query: desc formatted hbase_table_1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@hbase_table_1
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	It is a column key  
-value               	string              	It is the column string value
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	hbase.mapred.output.outputtable	kkk                 
-	hbase.table.name    	hbase_table_0       
-#### A masked pattern was here ####
-	numFiles            	0                   
-	numRows             	0                   
-	rawDataSize         	0                   
-	storage_handler     	org.apache.hadoop.hive.hbase.HBaseStorageHandler
-	totalSize           	0                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.hbase.HBaseSerDe	 
-InputFormat:        	null                	 
-OutputFormat:       	null                	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	hbase.columns.mapping	cf:string           
-	serialization.format	1                   
-PREHOOK: query: ALTER TABLE hbase_table_1 unset TBLPROPERTIES('hbase.mapred.output.outputtable')
-PREHOOK: type: ALTERTABLE_PROPERTIES
-PREHOOK: Input: default@hbase_table_1
-PREHOOK: Output: default@hbase_table_1
-POSTHOOK: query: ALTER TABLE hbase_table_1 unset TBLPROPERTIES('hbase.mapred.output.outputtable')
-POSTHOOK: type: ALTERTABLE_PROPERTIES
-POSTHOOK: Input: default@hbase_table_1
-POSTHOOK: Output: default@hbase_table_1
-PREHOOK: query: desc formatted hbase_table_1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@hbase_table_1
-POSTHOOK: query: desc formatted hbase_table_1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@hbase_table_1
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	It is a column key  
-value               	string              	It is the column string value
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	hbase.table.name    	hbase_table_0       
-#### A masked pattern was here ####
-	numFiles            	0                   
-	numRows             	0                   
-	rawDataSize         	0                   
-	storage_handler     	org.apache.hadoop.hive.hbase.HBaseStorageHandler
-	totalSize           	0                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.hbase.HBaseSerDe	 
-InputFormat:        	null                	 
-OutputFormat:       	null                	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	hbase.columns.mapping	cf:string           
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/build.properties
----------------------------------------------------------------------
diff --git a/hcatalog/build.properties b/hcatalog/build.properties
index 3767cf3..dea1a44 100644
--- a/hcatalog/build.properties
+++ b/hcatalog/build.properties
@@ -49,7 +49,7 @@ clover.report.dir=${build.dir}/test/clover/reports
 clover.pdf.report.dir=${build.dir}/test/clover/pdf/reports
 
 # junit jvm args
-junit.jvm.args=-XX:-UseSplitVerifier -XX:+CMSClassUnloadingEnabled
+junit.jvm.args=-XX:-UseSplitVerifier -XX:+CMSClassUnloadingEnabled -XX:MaxPermSize=128M
 
 apache-rat.version=0.8
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/core/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/core/pom.xml b/hcatalog/core/pom.xml
index d151374..506bf22 100644
--- a/hcatalog/core/pom.xml
+++ b/hcatalog/core/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -191,7 +191,7 @@
             <artifactId>commons-logging</artifactId>
           </exclusion>
          </exclusions>
-    </dependency>
+   </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
@@ -208,19 +208,13 @@
             <artifactId>commons-logging</artifactId>
           </exclusion>
          </exclusions>
-    </dependency>
+   </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-yarn-server-tests</artifactId>
       <version>${hadoop.version}</version>
       <classifier>tests</classifier>
       <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.pig</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
index 66a5dd4..8aa510f 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
@@ -116,7 +116,7 @@ public class TestPermsGrp extends TestCase {
       Table tbl = getTable(dbName, tblName, typeName);
       msc.createTable(tbl);
       Database db = Hive.get(hcatConf).getDatabase(dbName);
-      Path dfsPath = clientWH.getDefaultTablePath(db, tblName);
+      Path dfsPath = clientWH.getTablePath(db, tblName);
       cleanupTbl(dbName, tblName, typeName);
 
       // Next user did specify perms.
@@ -126,7 +126,7 @@ public class TestPermsGrp extends TestCase {
         assertTrue(e instanceof ExitException);
         assertEquals(((ExitException) e).getStatus(), 0);
       }
-      dfsPath = clientWH.getDefaultTablePath(db, tblName);
+      dfsPath = clientWH.getTablePath(db, tblName);
       assertTrue(dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath).getPermission().equals(FsPermission.valueOf("drwx-wx---")));
 
       cleanupTbl(dbName, tblName, typeName);
@@ -141,7 +141,7 @@ public class TestPermsGrp extends TestCase {
         assertTrue(me instanceof ExitException);
       }
       // No physical dir gets created.
-      dfsPath = clientWH.getDefaultTablePath(db, tblName);
+      dfsPath = clientWH.getTablePath(db, tblName);
       try {
         dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath);
         assert false;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/hcatalog-pig-adapter/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/hcatalog-pig-adapter/pom.xml b/hcatalog/hcatalog-pig-adapter/pom.xml
index c50a4d5..3a72260 100644
--- a/hcatalog/hcatalog-pig-adapter/pom.xml
+++ b/hcatalog/hcatalog-pig-adapter/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/pom.xml b/hcatalog/pom.xml
index 9a73c84..34de177 100644
--- a/hcatalog/pom.xml
+++ b/hcatalog/pom.xml
@@ -24,7 +24,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -71,28 +71,6 @@
       <version>${pig.version}</version>
       <classifier>h2</classifier>
       <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-	  <artifactId>jetty-util</artifactId>
-	</exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-	  <artifactId>jetty</artifactId>
-	</exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-	  <artifactId>jsp-api-2.1</artifactId>
-	</exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-	  <artifactId>jsp-2.1</artifactId>
-	</exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-	  <artifactId>servlet-api-2.5</artifactId>
-	</exclusion>
-      </exclusions>
     </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/server-extensions/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/server-extensions/pom.xml b/hcatalog/server-extensions/pom.xml
index 797341c..7d34543 100644
--- a/hcatalog/server-extensions/pom.xml
+++ b/hcatalog/server-extensions/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 


[30/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index 03bc3ce..ce0ffe6 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -135,7 +135,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
   private final OrcMetadataCache metadataCache;
   private final LowLevelCache lowLevelCache;
   private final BufferUsageManager bufferManager;
-  private final Configuration daemonConf, jobConf;
+  private final Configuration conf;
   private final FileSplit split;
   private List<Integer> includedColumnIds;
   private final SearchArgument sarg;
@@ -166,14 +166,13 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
   boolean[] globalIncludes = null;
 
   public OrcEncodedDataReader(LowLevelCache lowLevelCache, BufferUsageManager bufferManager,
-      OrcMetadataCache metadataCache, Configuration daemonConf, Configuration jobConf,
-      FileSplit split, List<Integer> columnIds, SearchArgument sarg, String[] columnNames,
-      OrcEncodedDataConsumer consumer, QueryFragmentCounters counters,
-      TypeDescription readerSchema) throws IOException {
+      OrcMetadataCache metadataCache, Configuration conf, FileSplit split, List<Integer> columnIds,
+      SearchArgument sarg, String[] columnNames, OrcEncodedDataConsumer consumer,
+      QueryFragmentCounters counters, TypeDescription readerSchema) throws IOException {
     this.lowLevelCache = lowLevelCache;
     this.metadataCache = metadataCache;
     this.bufferManager = bufferManager;
-    this.daemonConf = daemonConf;
+    this.conf = conf;
     this.split = split;
     this.includedColumnIds = columnIds;
     if (this.includedColumnIds != null) {
@@ -194,22 +193,15 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     orcReader = null;
     // 1. Get file metadata from cache, or create the reader and read it.
     // Don't cache the filesystem object for now; Tez closes it and FS cache will fix all that
-    fs = split.getPath().getFileSystem(jobConf);
+    fs = split.getPath().getFileSystem(conf);
     fileKey = determineFileId(fs, split,
-        HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_CACHE_ALLOW_SYNTHETIC_FILEID));
+        HiveConf.getBoolVar(conf, ConfVars.LLAP_CACHE_ALLOW_SYNTHETIC_FILEID));
     fileMetadata = getOrReadFileMetadata();
     if (readerSchema == null) {
       readerSchema = fileMetadata.getSchema();
     }
     globalIncludes = OrcInputFormat.genIncludedColumns(readerSchema, includedColumnIds);
-    // Do not allow users to override zero-copy setting. The rest can be taken from user config.
-    boolean useZeroCopy = OrcConf.USE_ZEROCOPY.getBoolean(daemonConf);
-    if (useZeroCopy != OrcConf.USE_ZEROCOPY.getBoolean(jobConf)) {
-      jobConf = new Configuration(jobConf);
-      jobConf.setBoolean(OrcConf.USE_ZEROCOPY.getAttribute(), useZeroCopy);
-    }
-    this.jobConf = jobConf;
-    Reader.Options options = new Reader.Options(jobConf).include(globalIncludes);
+    Reader.Options options = new Reader.Options(conf).include(globalIncludes);
     evolution = new SchemaEvolution(fileMetadata.getSchema(), readerSchema, options);
     consumer.setFileMetadata(fileMetadata);
     consumer.setIncludedColumns(globalIncludes);
@@ -489,7 +481,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
   private void validateFileMetadata() throws IOException {
     if (fileMetadata.getCompressionKind() == CompressionKind.NONE) return;
     int bufferSize = fileMetadata.getCompressionBufferSize();
-    long minAllocSize = HiveConf.getSizeVar(daemonConf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC);
+    long minAllocSize = HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC);
     if (bufferSize < minAllocSize) {
       LOG.warn("ORC compression buffer size (" + bufferSize + ") is smaller than LLAP low-level "
             + "cache minimum allocation size (" + minAllocSize + "). Decrease the value for "
@@ -571,13 +563,12 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
   private void ensureOrcReader() throws IOException {
     if (orcReader != null) return;
     path = split.getPath();
-    if (fileKey instanceof Long && HiveConf.getBoolVar(
-        daemonConf, ConfVars.LLAP_IO_USE_FILEID_PATH)) {
+    if (fileKey instanceof Long && HiveConf.getBoolVar(conf, ConfVars.LLAP_IO_USE_FILEID_PATH)) {
       path = HdfsUtils.getFileIdPath(fs, path, (long)fileKey);
     }
     LlapIoImpl.ORC_LOGGER.trace("Creating reader for {} ({})", path, split.getPath());
     long startTime = counters.startTimeCounter();
-    ReaderOptions opts = OrcFile.readerOptions(jobConf).filesystem(fs).fileMetadata(fileMetadata);
+    ReaderOptions opts = OrcFile.readerOptions(conf).filesystem(fs).fileMetadata(fileMetadata);
     if (split instanceof OrcSplit) {
       OrcTail orcTail = ((OrcSplit) split).getOrcTail();
       if (orcTail != null) {
@@ -664,7 +655,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     ensureOrcReader();
     if (metadataReader != null) return;
     long startTime = counters.startTimeCounter();
-    boolean useZeroCopy = (daemonConf != null) && OrcConf.USE_ZEROCOPY.getBoolean(daemonConf);
+    boolean useZeroCopy = (conf != null) && OrcConf.USE_ZEROCOPY.getBoolean(conf);
     metadataReader = RecordReaderUtils.createDefaultDataReader(
         DataReaderProperties.builder()
         .withBufferSize(orcReader.getCompressionSize())
@@ -705,7 +696,10 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
       ArrayList<OrcStripeMetadata> metadata) throws IOException {
     RecordReaderImpl.SargApplier sargApp = null;
     if (sarg != null && rowIndexStride != 0) {
-      sargApp = new RecordReaderImpl.SargApplier(sarg,
+      List<OrcProto.Type> types = fileMetadata.getTypes();
+      String[] colNamesForSarg = OrcInputFormat.getSargColumnNames(
+          columnNames, types, globalIncludes, fileMetadata.isOriginalFormat());
+      sargApp = new RecordReaderImpl.SargApplier(sarg, colNamesForSarg,
           rowIndexStride, evolution,
           OrcFile.WriterVersion.from(fileMetadata.getWriterVersionNum()));
     }
@@ -720,7 +714,6 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
         OrcStripeMetadata stripeMetadata = metadata.get(stripeIxMod);
         rgsToRead = sargApp.pickRowGroups(stripe, stripeMetadata.getRowIndexes(),
             stripeMetadata.getBloomFilterKinds(),
-            stripeMetadata.getEncodings(),
             stripeMetadata.getBloomFilterIndexes(), true);
       }
       boolean isNone = rgsToRead == RecordReaderImpl.SargApplier.READ_NO_RGS,

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
index 907200a..6aab6de 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hive.llap.io.encoded;
 
-import org.apache.orc.impl.MemoryManager;
-
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
@@ -220,7 +218,7 @@ public class SerDeEncodedDataReader extends CallableWithNdc<Void>
     this.jobConf = jobConf;
     this.schema = schema;
     this.writerIncludes = OrcInputFormat.genIncludedColumns(schema, columnIds);
-    SchemaEvolution evolution = new SchemaEvolution(schema, null,
+    SchemaEvolution evolution = new SchemaEvolution(schema,
         new Reader.Options(jobConf).include(writerIncludes));
     consumer.setSchemaEvolution(evolution);
   }
@@ -1534,29 +1532,13 @@ public class SerDeEncodedDataReader extends CallableWithNdc<Void>
     }
   }
 
-  private static final class NoopMemoryManager extends MemoryManager {
-    public NoopMemoryManager() {
-      super(null);
-    }
-
-    @Override
-    public void addedRow(int rows) {}
-    @Override
-    public void addWriter(Path path, long requestedAllocation, Callback callback) {}
-    @Override
-    public void notifyWriters() {}
-    @Override
-    public void removeWriter(Path path) throws IOException {}
-  }
-  private static final NoopMemoryManager MEMORY_MANAGER = new NoopMemoryManager();
-
   static WriterOptions createOrcWriterOptions(ObjectInspector sourceOi,
       Configuration conf, CacheWriter cacheWriter, int allocSize) throws IOException {
     return OrcFile.writerOptions(conf).stripeSize(Long.MAX_VALUE).blockSize(Long.MAX_VALUE)
         .rowIndexStride(Integer.MAX_VALUE) // For now, do not limit this - one RG per split
         .blockPadding(false).compress(CompressionKind.NONE).version(Version.CURRENT)
         .encodingStrategy(EncodingStrategy.SPEED).bloomFilterColumns(null).inspector(sourceOi)
-        .physicalWriter(cacheWriter).memory(MEMORY_MANAGER).bufferSize(allocSize);
+        .physicalWriter(cacheWriter).bufferSize(allocSize);
   }
 
   private ObjectInspector getOiFromSerDe() throws IOException {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcMetadataCache.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcMetadataCache.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcMetadataCache.java
index 601b622..73a1721 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcMetadataCache.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcMetadataCache.java
@@ -27,13 +27,13 @@ import org.apache.hadoop.hive.common.io.DiskRange;
 import org.apache.hadoop.hive.common.io.DiskRangeList;
 import org.apache.hadoop.hive.common.io.DataCache.BooleanRef;
 import org.apache.hadoop.hive.common.io.DataCache.DiskRangeListFactory;
-import org.apache.hadoop.hive.llap.cache.LlapOomDebugDump;
 import org.apache.hadoop.hive.llap.cache.LowLevelCachePolicy;
 import org.apache.hadoop.hive.llap.cache.MemoryManager;
 import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
 import org.apache.hadoop.hive.ql.io.orc.encoded.OrcBatchKey;
+import org.apache.hadoop.hive.ql.util.JavaDataModel;
 
-public class OrcMetadataCache implements LlapOomDebugDump {
+public class OrcMetadataCache {
   private final ConcurrentHashMap<Object, OrcFileMetadata> metadata = new ConcurrentHashMap<>();
   private final ConcurrentHashMap<OrcBatchKey, OrcStripeMetadata> stripeMetadata =
       new ConcurrentHashMap<>();
@@ -51,7 +51,7 @@ public class OrcMetadataCache implements LlapOomDebugDump {
 
   public OrcFileMetadata putFileMetadata(OrcFileMetadata metaData) {
     long memUsage = metaData.getMemoryUsage();
-    memoryManager.reserveMemory(memUsage);
+    memoryManager.reserveMemory(memUsage, false);
     OrcFileMetadata val = metadata.putIfAbsent(metaData.getFileKey(), metaData);
     // See OrcFileMetadata; it is always unlocked, so we just "touch" it here to simulate use.
     return touchOnPut(metaData, val, memUsage);
@@ -59,7 +59,7 @@ public class OrcMetadataCache implements LlapOomDebugDump {
 
   public OrcStripeMetadata putStripeMetadata(OrcStripeMetadata metaData) {
     long memUsage = metaData.getMemoryUsage();
-    memoryManager.reserveMemory(memUsage);
+    memoryManager.reserveMemory(memUsage, false);
     OrcStripeMetadata val = stripeMetadata.putIfAbsent(metaData.getKey(), metaData);
     // See OrcStripeMetadata; it is always unlocked, so we just "touch" it here to simulate use.
     return touchOnPut(metaData, val, memUsage);
@@ -90,7 +90,7 @@ public class OrcMetadataCache implements LlapOomDebugDump {
         errorData.addError(range.getOffset(), range.getLength(), baseOffset);
       }
       long memUsage = errorData.estimateMemoryUsage();
-      memoryManager.reserveMemory(memUsage);
+      memoryManager.reserveMemory(memUsage, false);
       OrcFileEstimateErrors old = estimateErrors.putIfAbsent(fileKey, errorData);
       if (old != null) {
         errorData = old;
@@ -146,18 +146,4 @@ public class OrcMetadataCache implements LlapOomDebugDump {
   public void notifyEvicted(OrcFileEstimateErrors buffer) {
     estimateErrors.remove(buffer.getFileKey());
   }
-
-  @Override
-  public String debugDumpForOom() {
-    StringBuilder sb = new StringBuilder();
-    debugDumpShort(sb);
-    return sb.toString();
-  }
-
-  @Override
-  public void debugDumpShort(StringBuilder sb) {
-    sb.append("\nORC metadata cache state: ").append(metadata.size()).append(" files, ")
-      .append(stripeMetadata.size()).append(" stripes, ").append(estimateErrors.size())
-      .append(" files w/ORC estimate");
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
index 435b4de..7a0ecc9 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorMetrics.java
@@ -58,7 +58,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
 import com.google.common.collect.Maps;
-import org.apache.hadoop.hive.common.JvmMetrics;
 import org.apache.hadoop.hive.llap.daemon.impl.ContainerRunnerImpl;
 import org.apache.hadoop.metrics2.MetricsCollector;
 import org.apache.hadoop.metrics2.MetricsInfo;
@@ -72,6 +71,7 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 import org.apache.hadoop.metrics2.lib.MutableQuantiles;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
 
 /**
  * Metrics about the llap daemon executors.

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
index 085c977..dce0c56 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.tez.common.security.JobTokenIdentifier;
@@ -87,7 +88,6 @@ import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelFactory;
 import org.jboss.netty.channel.ChannelFuture;
 import org.jboss.netty.channel.ChannelFutureListener;
-import org.jboss.netty.channel.ChannelHandler;
 import org.jboss.netty.channel.ChannelHandlerContext;
 import org.jboss.netty.channel.ChannelPipeline;
 import org.jboss.netty.channel.ChannelPipelineFactory;
@@ -109,15 +109,9 @@ import org.jboss.netty.handler.codec.http.HttpResponse;
 import org.jboss.netty.handler.codec.http.HttpResponseEncoder;
 import org.jboss.netty.handler.codec.http.HttpResponseStatus;
 import org.jboss.netty.handler.codec.http.QueryStringDecoder;
-import org.jboss.netty.handler.timeout.IdleState;
-import org.jboss.netty.handler.timeout.IdleStateAwareChannelHandler;
-import org.jboss.netty.handler.timeout.IdleStateEvent;
-import org.jboss.netty.handler.timeout.IdleStateHandler;
 import org.jboss.netty.handler.ssl.SslHandler;
 import org.jboss.netty.handler.stream.ChunkedWriteHandler;
 import org.jboss.netty.util.CharsetUtil;
-import org.jboss.netty.util.HashedWheelTimer;
-import org.jboss.netty.util.Timer;
 
 public class ShuffleHandler implements AttemptRegistrationListener {
 
@@ -214,7 +208,6 @@ public class ShuffleHandler implements AttemptRegistrationListener {
   final boolean connectionKeepAliveEnabled;
   final int connectionKeepAliveTimeOut;
   final int mapOutputMetaInfoCacheSize;
-  Timer timer;
   private final LocalDirAllocator lDirAlloc =
       new LocalDirAllocator(SHUFFLE_HANDLER_LOCAL_DIRS);
   private final Shuffle shuffle;
@@ -318,10 +311,8 @@ public class ShuffleHandler implements AttemptRegistrationListener {
 
   public void start() throws Exception {
     ServerBootstrap bootstrap = new ServerBootstrap(selector);
-    // Timer is shared across entire factory and must be released separately
-    timer = new HashedWheelTimer();
     try {
-      pipelineFact = new HttpPipelineFactory(conf, timer);
+      pipelineFact = new HttpPipelineFactory(conf);
     } catch (Exception ex) {
       throw new RuntimeException(ex);
     }
@@ -485,10 +476,6 @@ public class ShuffleHandler implements AttemptRegistrationListener {
     if (pipelineFact != null) {
       pipelineFact.destroy();
     }
-    if (timer != null) {
-      // Release this shared timer resource
-      timer.stop();
-    }
     if (dirWatcher != null) {
       dirWatcher.stop();
     }
@@ -519,22 +506,12 @@ public class ShuffleHandler implements AttemptRegistrationListener {
     userRsrc.remove(appIdString);
   }
 
-  private static class TimeoutHandler extends IdleStateAwareChannelHandler {
-    @Override
-    public void channelIdle(ChannelHandlerContext ctx, IdleStateEvent e) {
-      if (e.getState() == IdleState.WRITER_IDLE) {
-        e.getChannel().close();
-      }
-    }
-  }
-
   class HttpPipelineFactory implements ChannelPipelineFactory {
 
     final Shuffle SHUFFLE;
     private SSLFactory sslFactory;
-    private final ChannelHandler idleStateHandler;
 
-    public HttpPipelineFactory(Configuration conf, Timer timer) throws Exception {
+    public HttpPipelineFactory(Configuration conf) throws Exception {
       SHUFFLE = getShuffle(conf);
       // TODO Setup SSL Shuffle
 //      if (conf.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY,
@@ -543,7 +520,6 @@ public class ShuffleHandler implements AttemptRegistrationListener {
 //        sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
 //        sslFactory.init();
 //      }
-      this.idleStateHandler = new IdleStateHandler(timer, 0, connectionKeepAliveTimeOut, 0);
     }
 
     public void destroy() {
@@ -563,8 +539,6 @@ public class ShuffleHandler implements AttemptRegistrationListener {
       pipeline.addLast("encoder", new HttpResponseEncoder());
       pipeline.addLast("chunking", new ChunkedWriteHandler());
       pipeline.addLast("shuffle", SHUFFLE);
-      pipeline.addLast("idle", idleStateHandler);
-      pipeline.addLast("timeout", new TimeoutHandler());
       return pipeline;
       // TODO factor security manager into pipeline
       // TODO factor out encode/decode to permit binary shuffle
@@ -778,10 +752,7 @@ public class ShuffleHandler implements AttemptRegistrationListener {
           return;
         }
       }
-      // If Keep alive is enabled, do not close the connection.
-      if (!keepAliveParam && !connectionKeepAliveEnabled) {
-        lastMap.addListener(ChannelFutureListener.CLOSE);
-      }
+      lastMap.addListener(ChannelFutureListener.CLOSE);
     }
 
     private String getErrorMessage(Throwable t) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/main/resources/hive-webapps/llap/js/metrics.js
----------------------------------------------------------------------
diff --git a/llap-server/src/main/resources/hive-webapps/llap/js/metrics.js b/llap-server/src/main/resources/hive-webapps/llap/js/metrics.js
index 0db9a05..4c41c34 100644
--- a/llap-server/src/main/resources/hive-webapps/llap/js/metrics.js
+++ b/llap-server/src/main/resources/hive-webapps/llap/js/metrics.js
@@ -103,8 +103,8 @@ llap.model.LlapDaemonInfo = new function() {
    this.push = function(jmx) {
       var bean = jmxbean(jmx, this.name); 
       this.executors = bean["NumExecutors"];
-      this.active = bean["NumActive"];
-      this.active_rate.add(this.active);
+      this.active = bean["ExecutorsStatus"];
+      this.active_rate.add(this.active.length);
    }
 }
 
@@ -175,7 +175,7 @@ llap.view.Cache = new function () {
 llap.view.Executors = new function () {
    this.refresh = function() {
       var model = llap.model.LlapDaemonInfo;
-      $("#executors-used").text(model.active);
+      $("#executors-used").text(model.active.length);
       $("#executors-max").text(model.executors);
       $("#executors-rate").text(((model.active_rate.peek() * 100.0)/model.executors).toFixed(0));
       $("#executors-trend").sparkline(model.active_rate);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/main/resources/llap-cli-log4j2.properties
----------------------------------------------------------------------
diff --git a/llap-server/src/main/resources/llap-cli-log4j2.properties b/llap-server/src/main/resources/llap-cli-log4j2.properties
index 687c973..483c81f 100644
--- a/llap-server/src/main/resources/llap-cli-log4j2.properties
+++ b/llap-server/src/main/resources/llap-cli-log4j2.properties
@@ -19,14 +19,13 @@ name = LlapCliLog4j2
 packages = org.apache.hadoop.hive.ql.log
 
 # list of properties
-property.hive.log.level = WARN
+property.hive.log.level = INFO
 property.hive.root.logger = console
 property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
 property.hive.log.file = llap-cli.log
-property.hive.llapstatus.consolelogger.level = INFO
 
 # list of all appenders
-appenders = console, DRFA, llapstatusconsole
+appenders = console, DRFA
 
 # console appender
 appender.console.type = Console
@@ -35,18 +34,11 @@ appender.console.target = SYSTEM_ERR
 appender.console.layout.type = PatternLayout
 appender.console.layout.pattern = %p %c{2}: %m%n
 
-# llapstatusconsole appender
-appender.llapstatusconsole.type = Console
-appender.llapstatusconsole.name = llapstatusconsole
-appender.llapstatusconsole.target = SYSTEM_ERR
-appender.llapstatusconsole.layout.type = PatternLayout
-appender.llapstatusconsole.layout.pattern = %m%n
-
 # daily rolling file appender
 appender.DRFA.type = RollingRandomAccessFile
 appender.DRFA.name = DRFA
 appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
-# Use %pidn in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session
+# Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session
 appender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}
 appender.DRFA.layout.type = PatternLayout
 appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
@@ -58,7 +50,7 @@ appender.DRFA.strategy.type = DefaultRolloverStrategy
 appender.DRFA.strategy.max = 30
 
 # list of all loggers
-loggers = ZooKeeper, DataNucleus, Datastore, JPOX, HadoopConf, LlapStatusServiceDriverConsole
+loggers = ZooKeeper, DataNucleus, Datastore, JPOX, HadoopConf
 
 logger.ZooKeeper.name = org.apache.zookeeper
 logger.ZooKeeper.level = WARN
@@ -75,17 +67,8 @@ logger.JPOX.level = ERROR
 logger.HadoopConf.name = org.apache.hadoop.conf.Configuration
 logger.HadoopConf.level = ERROR
 
-logger.LlapStatusServiceDriverConsole.name = LlapStatusServiceDriverConsole
-logger.LlapStatusServiceDriverConsole.additivity = false
-logger.LlapStatusServiceDriverConsole.level = ${sys:hive.llapstatus.consolelogger.level}
-
-
 # root logger
 rootLogger.level = ${sys:hive.log.level}
 rootLogger.appenderRefs = root, DRFA
 rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
 rootLogger.appenderRef.DRFA.ref = DRFA
-logger.LlapStatusServiceDriverConsole.appenderRefs = llapstatusconsole, DRFA
-logger.LlapStatusServiceDriverConsole.appenderRef.llapstatusconsole.ref = llapstatusconsole
-logger.LlapStatusServiceDriverConsole.appenderRef.DRFA.ref = DRFA
-

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/main/resources/llap-daemon-log4j2.properties
----------------------------------------------------------------------
diff --git a/llap-server/src/main/resources/llap-daemon-log4j2.properties b/llap-server/src/main/resources/llap-daemon-log4j2.properties
index c61b901..1c797dc 100644
--- a/llap-server/src/main/resources/llap-daemon-log4j2.properties
+++ b/llap-server/src/main/resources/llap-daemon-log4j2.properties
@@ -64,7 +64,7 @@ appender.RFA.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
 appender.HISTORYAPPENDER.type = RollingRandomAccessFile
 appender.HISTORYAPPENDER.name = HISTORYAPPENDER
 appender.HISTORYAPPENDER.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}
-appender.HISTORYAPPENDER.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}_%d{yyyy-MM-dd-HH}_%i.done
+appender.HISTORYAPPENDER.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}_%d{yyyy-MM-dd}_%i.done
 appender.HISTORYAPPENDER.layout.type = PatternLayout
 appender.HISTORYAPPENDER.layout.pattern = %m%n
 appender.HISTORYAPPENDER.policies.type = Policies
@@ -100,10 +100,8 @@ appender.query-routing.routes.route-mdc.file-mdc.app.layout.type = PatternLayout
 appender.query-routing.routes.route-mdc.file-mdc.app.layout.pattern = %d{ISO8601} %5p [%t (%X{fragmentId})] %c{2}: %m%n
 
 # list of all loggers
-loggers = PerfLogger, EncodedReader, NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, HistoryLogger, LlapIoImpl, LlapIoOrc, LlapIoCache, LlapIoLocking, TezSM, TezSS, TezHC, LlapDaemon
+loggers = PerfLogger, EncodedReader, NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, HistoryLogger, LlapIoImpl, LlapIoOrc, LlapIoCache, LlapIoLocking, TezSM, TezSS, TezHC
 
-logger.LlapDaemon.name = org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon
-logger.LlapDaemon.level = INFO
 
 # shut up the Tez logs that log debug-level stuff on INFO
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
index a6080e6..04ba273 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
@@ -58,7 +58,8 @@ public class TestBuddyAllocator {
 
   private static class DummyMemoryManager implements MemoryManager {
     @Override
-    public void reserveMemory(long memoryToReserve) {
+    public boolean reserveMemory(long memoryToReserve, boolean waitForEviction) {
+      return true;
     }
 
     @Override
@@ -75,12 +76,7 @@ public class TestBuddyAllocator {
     }
 
     @Override
-    public long forceReservedMemory(int allocationSize, int count) {
-      return allocationSize * count;
-    }
-
-    @Override
-    public void debugDumpShort(StringBuilder sb) {
+    public void forceReservedMemory(int allocationSize, int count) {
     }
   }
 
@@ -252,7 +248,7 @@ public class TestBuddyAllocator {
     try {
       a.allocateMultiple(allocs[index], size);
     } catch (AllocatorOutOfMemoryException ex) {
-      LOG.error("Failed to allocate " + allocCount + " of " + size + "; " + a.debugDumpForOomInternal());
+      LOG.error("Failed to allocate " + allocCount + " of " + size + "; " + a.debugDump());
       throw ex;
     }
     // LOG.info("Allocated " + allocCount + " of " + size + "; " + a.debugDump());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
index e95f807..6c3ec03 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelCacheImpl.java
@@ -116,12 +116,8 @@ public class TestLowLevelCacheImpl {
     }
 
     @Override
-    public long tryEvictContiguousData(int allocationSize, int count) {
-      return count * allocationSize;
-    }
-
-    @Override
-    public void debugDumpShort(StringBuilder sb) {
+    public int tryEvictContiguousData(int allocationSize, int count) {
+      return count;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java
index 0cce624..f0de7c4 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java
@@ -230,25 +230,18 @@ public class TestLowLevelLrfuCachePolicy {
     // Now we should have two in the heap and two in the list, which is an implementation detail.
     // Evict only big blocks.
     et.evicted.clear();
-    assertEquals(8, lrfu.tryEvictContiguousData(2, 4));
+    assertEquals(4, lrfu.tryEvictContiguousData(2, 4));
     for (int i = 0; i < sizeTwo.size(); ++i) {
       LlapDataBuffer block = et.evicted.get(i);
       assertTrue(block.isInvalid());
       assertSame(sizeTwo.get(i), block);
     }
     et.evicted.clear();
-    // Evict small blocks when no big ones are available.
-    assertEquals(2, lrfu.tryEvictContiguousData(2, 1));
-    for (int i = 0; i < 2; ++i) {
-	  LlapDataBuffer block = et.evicted.get(i);
-	  assertTrue(block.isInvalid());
-	  assertSame(sizeOne.get(i), block);
-	}
-    et.evicted.clear();
-    // Evict the rest.
-    assertEquals(2, lrfu.evictSomeBlocks(3));
-    for (int i = 2; i < sizeOne.size(); ++i) {
-      LlapDataBuffer block = et.evicted.get(i - 2);
+    // Cannot evict any more size 2.
+    assertEquals(0, lrfu.tryEvictContiguousData(2, 1));
+    assertEquals(4, lrfu.evictSomeBlocks(4));
+    for (int i = 0; i < sizeOne.size(); ++i) {
+      LlapDataBuffer block = et.evicted.get(i);
       assertTrue(block.isInvalid());
       assertSame(sizeOne.get(i), block);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
index 3059382..3408dff 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
@@ -63,21 +63,18 @@ public class TestOrcMetadataCache {
     }
 
     @Override
-    public long tryEvictContiguousData(int allocationSize, int count) {
+    public int tryEvictContiguousData(int allocationSize, int count) {
       return 0;
     }
-
-    @Override
-    public void debugDumpShort(StringBuilder sb) {
-    }
   }
 
   private static class DummyMemoryManager implements MemoryManager {
     int allocs = 0;
 
     @Override
-    public void reserveMemory(long memoryToReserve) {
+    public boolean reserveMemory(long memoryToReserve, boolean waitForEviction) {
       ++allocs;
+      return true;
     }
 
     @Override
@@ -95,12 +92,7 @@ public class TestOrcMetadataCache {
     }
 
     @Override
-    public long forceReservedMemory(int allocationSize, int count) {
-      return allocationSize * count;
-    }
-
-    @Override
-    public void debugDumpShort(StringBuilder sb) {
+    public void forceReservedMemory(int allocationSize, int count) {
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
index 6f1305e..06f6dac 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
@@ -15,7 +15,6 @@
 package org.apache.hadoop.hive.llap.daemon;
 
 import javax.annotation.Nullable;
-
 import java.io.File;
 import java.io.IOException;
 
@@ -27,7 +26,6 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.LlapDaemonInfo;
 import org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon;
 import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler;
 import org.apache.hadoop.service.AbstractService;
@@ -120,9 +118,6 @@ public class MiniLlapCluster extends AbstractService {
     this.llapIoEnabled = llapIoEnabled;
     this.ioBytesPerService = ioBytesPerService;
 
-    LlapDaemonInfo.initialize("mini-llap-cluster", numExecutorsPerService, execMemoryPerService,
-        ioBytesPerService, ioIsDirect, llapIoEnabled);
-
     // Setup Local Dirs
     localDirs = new String[numLocalDirs];
     for (int i = 0 ; i < numLocalDirs ; i++) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-tez/pom.xml
----------------------------------------------------------------------
diff --git a/llap-tez/pom.xml b/llap-tez/pom.xml
index 1e5b235..c0fbe08 100644
--- a/llap-tez/pom.xml
+++ b/llap-tez/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerMetrics.java
----------------------------------------------------------------------
diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerMetrics.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerMetrics.java
index 478f949..04fd815 100644
--- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerMetrics.java
+++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerMetrics.java
@@ -31,7 +31,6 @@ import static org.apache.hadoop.hive.llap.tezplugins.metrics.LlapTaskSchedulerIn
 import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
 import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
 
-import org.apache.hadoop.hive.common.JvmMetrics;
 import org.apache.hadoop.hive.llap.metrics.LlapMetricsSystem;
 import org.apache.hadoop.hive.llap.metrics.MetricsUtils;
 import org.apache.hadoop.metrics2.MetricsCollector;
@@ -44,6 +43,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterInt;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
 
 /**
  * Metrics about the llap task scheduler.

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift
index 2800e23..a0c6e59 100755
--- a/metastore/if/hive_metastore.thrift
+++ b/metastore/if/hive_metastore.thrift
@@ -638,9 +638,8 @@ struct GetOpenTxnsInfoResponse {
 
 struct GetOpenTxnsResponse {
     1: required i64 txn_high_water_mark,
-    2: required list<i64> open_txns,  // set<i64> changed to list<i64> since 3.0
+    2: required set<i64> open_txns,
     3: optional i64 min_open_txn, //since 1.3,2.2
-    4: required binary abortedBits,   // since 3.0
 }
 
 struct OpenTxnRequest {
@@ -815,10 +814,9 @@ struct CurrentNotificationEventId {
 }
 
 struct InsertEventRequestData {
-    1: optional bool replace,
-    2: required list<string> filesAdded,
+    1: required list<string> filesAdded,
     // Checksum of files (hex string of checksum byte payload)
-    3: optional list<string> filesAddedChecksum,
+    2: optional list<string> filesAddedChecksum,
 }
 
 union FireEventRequestData {
@@ -1114,8 +1112,6 @@ service ThriftHiveMetastore extends fb303.FacebookService
   void drop_table_with_environment_context(1:string dbname, 2:string name, 3:bool deleteData,
       4:EnvironmentContext environment_context)
                        throws(1:NoSuchObjectException o1, 2:MetaException o3)
-  void truncate_table(1:string dbName, 2:string tableName, 3:list<string> partNames)
-                          throws(1:MetaException o1)
   list<string> get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1)
   list<string> get_tables_by_type(1: string db_name, 2: string pattern, 3: string tableType) throws (1: MetaException o1)
   list<TableMeta> get_table_meta(1: string db_patterns, 2: string tbl_patterns, 3: list<string> tbl_types)

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/pom.xml
----------------------------------------------------------------------
diff --git a/metastore/pom.xml b/metastore/pom.xml
index 733f891..35752ff 100644
--- a/metastore/pom.xml
+++ b/metastore/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -143,22 +143,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-auth</artifactId>
-      <version>${hadoop.version}</version>
-      <optional>true</optional>
-      <exclusions>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>commmons-logging</groupId>
-          <artifactId>commons-logging</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
       <version>${hadoop.version}</version>
       <optional>true</optional>
@@ -271,12 +255,6 @@
       <version>${disruptor.version}</version>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.skyscreamer</groupId>
-      <artifactId>jsonassert</artifactId>
-      <version>1.4.0</version>
-      <scope>test</scope>
-    </dependency>
   </dependencies>
 
   <profiles>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/022-HIVE-11107.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/022-HIVE-11107.derby.sql b/metastore/scripts/upgrade/derby/022-HIVE-11107.derby.sql
index 4a13ea9..ef08acb 100644
--- a/metastore/scripts/upgrade/derby/022-HIVE-11107.derby.sql
+++ b/metastore/scripts/upgrade/derby/022-HIVE-11107.derby.sql
@@ -1,2 +1,2 @@
-CREATE TABLE "APP"."TAB_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),"BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL);
-CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+CREATE TABLE "APP"."TAB_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(128) NOT NULL, "COLUMN_NAME" VARCHAR(1000) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),"BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL);
+CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/039-HIVE-12274.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/039-HIVE-12274.derby.sql b/metastore/scripts/upgrade/derby/039-HIVE-12274.derby.sql
deleted file mode 100644
index c6637c1..0000000
--- a/metastore/scripts/upgrade/derby/039-HIVE-12274.derby.sql
+++ /dev/null
@@ -1,32 +0,0 @@
--- add a new temp column,type clob, drop the old column and rename temp to old col
--- change COLUMNS_V2.TYPE_NAME to CLOB
-ALTER TABLE "APP"."COLUMNS_V2" ADD COLUMN "TYPE_NAME_CLOB" CLOB;
-UPDATE "APP"."COLUMNS_V2" SET TYPE_NAME_CLOB=CAST(TYPE_NAME AS CLOB);
-ALTER TABLE "APP"."COLUMNS_V2" DROP COLUMN TYPE_NAME;
-RENAME COLUMN "APP"."COLUMNS_V2"."TYPE_NAME_CLOB" TO "TYPE_NAME";
-
--- change TABLE_PARAMS.PARAM_VALUE to CLOB
-ALTER TABLE "APP"."TABLE_PARAMS" ADD COLUMN "PARAM_VALUE_CLOB" CLOB;
-UPDATE "APP"."TABLE_PARAMS" SET PARAM_VALUE_CLOB=CAST(PARAM_VALUE AS CLOB);
-ALTER TABLE "APP"."TABLE_PARAMS" DROP COLUMN PARAM_VALUE;
-RENAME COLUMN "APP"."TABLE_PARAMS"."PARAM_VALUE_CLOB" TO "PARAM_VALUE";
-
--- change SERDE_PARAMS.PARAM_VALUE to CLOB
-ALTER TABLE "APP"."SERDE_PARAMS" ADD COLUMN "SERDE_PV_CLOB" CLOB;
-UPDATE "APP"."SERDE_PARAMS" SET SERDE_PV_CLOB=CAST(PARAM_VALUE AS CLOB);
-ALTER TABLE "APP"."SERDE_PARAMS" DROP COLUMN PARAM_VALUE;
-RENAME COLUMN "APP"."SERDE_PARAMS"."SERDE_PV_CLOB" TO "PARAM_VALUE";
-
--- change SD_PARAMS.PARAM_VALUE to CLOB
-ALTER TABLE "APP"."SD_PARAMS" ADD COLUMN "SD_PV_CLOB" CLOB;
-UPDATE "APP"."SD_PARAMS" SET SD_PV_CLOB=CAST(PARAM_VALUE AS CLOB);
-ALTER TABLE "APP"."SD_PARAMS" DROP COLUMN PARAM_VALUE;
-RENAME COLUMN "APP"."SD_PARAMS"."SD_PV_CLOB" TO "PARAM_VALUE";
-
--- expand a hive table name length to 256 chars
-ALTER TABLE "APP"."TBLS" ALTER COLUMN "TBL_NAME" SET DATA TYPE VARCHAR(256);
-ALTER TABLE "APP"."NOTIFICATION_LOG" ALTER COLUMN "TBL_NAME" SET DATA TYPE VARCHAR(256);
-ALTER TABLE "APP"."PARTITION_EVENTS" ALTER COLUMN "TBL_NAME" SET DATA TYPE VARCHAR(256);
-ALTER TABLE "APP"."TAB_COL_STATS" ALTER COLUMN "TABLE_NAME" SET DATA TYPE VARCHAR(256);
-ALTER TABLE "APP"."PART_COL_STATS" ALTER COLUMN "TABLE_NAME" SET DATA TYPE VARCHAR(256);
-ALTER TABLE "APP"."COMPLETED_TXN_COMPONENTS" ALTER COLUMN "CTC_TABLE" SET DATA TYPE VARCHAR(256);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/040-HIVE-16399.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/040-HIVE-16399.derby.sql b/metastore/scripts/upgrade/derby/040-HIVE-16399.derby.sql
deleted file mode 100644
index f6cc31f..0000000
--- a/metastore/scripts/upgrade/derby/040-HIVE-16399.derby.sql
+++ /dev/null
@@ -1 +0,0 @@
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
index a4977b6..7f1a64b 100644
--- a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
@@ -22,13 +22,13 @@ CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" IN
 
 CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
 
-CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
+CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
 
-CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
 
-CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL);
+CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(1000) NOT NULL, "TYPE_NAME" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
 
-CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
 
 CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
 
@@ -54,17 +54,17 @@ CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME
 
 CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
 
-CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(256));
+CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(128));
 
 CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
 
 CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
 
-CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL, "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1, "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0);
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL, "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1, "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0);
 
 CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
 
-CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128));
+CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128));
 
 CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL);
 
@@ -78,7 +78,7 @@ CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR
 
 CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
 
-CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
 
 CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL);
 
@@ -94,7 +94,7 @@ CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as
 
 CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767));
 
-CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL);
+CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(128) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(1000) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL);
 
 CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
 
@@ -102,7 +102,7 @@ CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000
 
 CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
 
-CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" CLOB, "TBL_NAME" VARCHAR(256), "MESSAGE_FORMAT" VARCHAR(16));
+CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" CLOB, "TBL_NAME" VARCHAR(128), "MESSAGE_FORMAT" VARCHAR(16));
 
 CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/hive-schema-2.3.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.3.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-2.3.0.derby.sql
deleted file mode 100644
index b2916c8..0000000
--- a/metastore/scripts/upgrade/derby/hive-schema-2.3.0.derby.sql
+++ /dev/null
@@ -1,340 +0,0 @@
--- Timestamp: 2011-09-22 15:32:02.024
--- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
--- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
--- Specified schema is: APP
--- appendLogs: false
-
--- ----------------------------------------------
--- DDL Statements for functions
--- ----------------------------------------------
-
-CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
-
-CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
-
--- ----------------------------------------------
--- DDL Statements for tables
--- ----------------------------------------------
-
-CREATE TABLE "APP"."DBS" ("DB_ID" BIGINT NOT NULL, "DESC" VARCHAR(4000), "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, "NAME" VARCHAR(128), "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
-
-CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
-
-CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-
-CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
-
-CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
-
-CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128));
-
-CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
-
-CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-
-CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
-
-CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000));
-
-CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128));
-
-CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT);
-
-CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767));
-
-CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128));
-
-CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-
-CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(256));
-
-CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
-
-CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL);
-
-CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128));
-
-CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL);
-
-CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL);
-
-RUN '022-HIVE-11107.derby.sql';
-
-CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
-
-CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
-
-CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000));
-
-CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767));
-
-CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767));
-
-CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
-
-CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
-
-CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" CLOB, "TBL_NAME" VARCHAR(256), "MESSAGE_FORMAT" VARCHAR(16));
-
-CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER NOT NULL, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL,  "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL);
-
-ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION");
-
-CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID");
-
--- ----------------------------------------------
--- DDL Statements for indexes
--- ----------------------------------------------
-
-CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID");
-
-CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
-
-CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");
-
-CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME");
-
-CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME");
-
-CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
-
-CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID");
-
-CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID");
-
-CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID");
-
--- ----------------------------------------------
--- DDL Statements for keys
--- ----------------------------------------------
-
--- primary/unique
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID");
-
-ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
-
-ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID");
-
-ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID");
-
-ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
-
-ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME");
-
-ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID");
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID");
-
-ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID");
-
-ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
-
-ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID");
-
-ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
-
-ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID");
-
-ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID");
-
-ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME");
-
-ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID");
-
-ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID");
-
-ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID");
-
-ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID");
-
-ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID");
-
-ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
-
-ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
-
-ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID");
-
-ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID");
-
-ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
-
-ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID");
-
-ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID");
-
-ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID");
-
-ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID");
-
-ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID");
-
--- foreign
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID");
-
-ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
--- ----------------------------------------------
--- DDL Statements for checks
--- ----------------------------------------------
-
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N'));
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N'));
-
--- ----------------------------
--- Transaction and Lock Tables
--- ----------------------------
-RUN 'hive-txn-schema-2.3.0.derby.sql';
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.3.0', 'Hive release version 2.3.0');
-


[02/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
index d7a83f7..a231543 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hive.ql.exec.UnionOperator;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
@@ -69,16 +68,10 @@ public class GenMRFileSink1 implements NodeProcessor {
     GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
     ParseContext parseCtx = ctx.getParseCtx();
     boolean chDir = false;
-    // we should look take the parent of fsOp's task as the current task.
-    FileSinkOperator fsOp = (FileSinkOperator) nd;
-    Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx
-        .getMapCurrCtx();
-    GenMapRedCtx mapredCtx = mapCurrCtx.get(fsOp.getParentOperators().get(0));
-    Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
-    
-    ctx.setCurrTask(currTask);
+    Task<? extends Serializable> currTask = ctx.getCurrTask();
     ctx.addRootIfPossible(currTask);
 
+    FileSinkOperator fsOp = (FileSinkOperator) nd;
     boolean isInsertTable = // is INSERT OVERWRITE TABLE
         GenMapRedUtils.isInsertInto(parseCtx, fsOp);
     HiveConf hconf = parseCtx.getConf();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 87fff3e..38157a6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -131,7 +131,11 @@ import com.google.common.collect.Interner;
  * map-reduce tasks.
  */
 public final class GenMapRedUtils {
-  private static final Logger LOG = LoggerFactory.getLogger("org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils");
+  private static Logger LOG;
+
+  static {
+    LOG = LoggerFactory.getLogger("org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils");
+  }
 
   public static boolean needsTagging(ReduceWork rWork) {
     return rWork != null && (rWork.getReducer().getClass() == JoinOperator.class ||

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
index 85d46f3..b2893e7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
 import org.apache.hadoop.hive.ql.lib.Rule;
 import org.apache.hadoop.hive.ql.lib.RuleRegExp;
 import org.apache.hadoop.hive.ql.parse.GenMapRedWalker;
+import org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
@@ -79,6 +80,8 @@ import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 
+import com.clearspring.analytics.util.Lists;
+
 /**
  * Implementation of one of the rule-based map join optimization. User passes hints to specify
  * map-joins and during this optimization, all user specified map joins are converted to MapJoins -
@@ -431,8 +434,7 @@ public class MapJoinProcessor extends Transform {
         smbJoinDesc.getValueTblDescs(), smbJoinDesc.getValueTblDescs(),
         smbJoinDesc.getOutputColumnNames(),
         bigTablePos, smbJoinDesc.getConds(),
-        smbJoinDesc.getFilters(), smbJoinDesc.isNoOuterJoin(), smbJoinDesc.getDumpFilePrefix(),
-        smbJoinDesc.getNoConditionalTaskSize());
+        smbJoinDesc.getFilters(), smbJoinDesc.isNoOuterJoin(), smbJoinDesc.getDumpFilePrefix());
 
     mapJoinDesc.setStatistics(smbJoinDesc.getStatistics());
 
@@ -1185,7 +1187,7 @@ public class MapJoinProcessor extends Transform {
     MapJoinDesc mapJoinDescriptor =
         new MapJoinDesc(keyExprMap, keyTableDesc, newValueExprs, valueTableDescs,
             valueFilteredTableDescs, outputColumnNames, mapJoinPos, joinCondns, filters, op
-                .getConf().getNoOuterJoin(), dumpFilePrefix, op.getConf().getNoConditionalTaskSize());
+                .getConf().getNoOuterJoin(), dumpFilePrefix);
     mapJoinDescriptor.setStatistics(op.getConf().getStatistics());
     mapJoinDescriptor.setTagOrder(tagOrder);
     mapJoinDescriptor.setNullSafes(desc.getNullSafes());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
index 92225ac..a3a19f4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
@@ -210,7 +210,7 @@ public class Optimizer {
     if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCORRELATION) &&
         !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEGROUPBYSKEW) &&
         !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME) &&
-        !isTezExecEngine && !isSparkExecEngine) {
+        !isTezExecEngine) {
       transformations.add(new CorrelationOptimizer());
     }
     if (HiveConf.getFloatVar(hiveConf, HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE) > 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java
index b51af55..60a8604 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hive.ql.stats.StatsUtils;
 
 import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.AUTOPARALLEL;
 import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.UNIFORM;
-import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.FIXED;
 
 /**
  * SetReducerParallelism determines how many reducers should
@@ -107,7 +106,6 @@ public class SetReducerParallelism implements NodeProcessor {
       }
     } else {
       LOG.info("Number of reducers determined to be: "+desc.getNumReducers());
-      desc.setReducerTraits(EnumSet.of(FIXED)); // usually controlled by bucketing
     }
 
     return false;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSemanticException.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSemanticException.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSemanticException.java
index 8c43774..5b2c9c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSemanticException.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSemanticException.java
@@ -31,11 +31,10 @@ public class CalciteSemanticException extends SemanticException {
 
   public enum UnsupportedFeature {
     Distinct_without_an_aggreggation, Duplicates_in_RR, Filter_expression_with_non_boolean_return_type,
-    Having_clause_without_any_groupby, Invalid_column_reference, Invalid_decimal,
+    Having_clause_without_any_groupby, Hint, Invalid_column_reference, Invalid_decimal,
     Less_than_equal_greater_than, Others, Same_name_in_multiple_expressions,
     Schema_less_table, Select_alias_in_having_clause, Select_transform, Subquery,
-    Table_sample_clauses, UDTF, Union_type, Unique_join,
-    HighPrecissionTimestamp // CALCITE-1690
+    Table_sample_clauses, UDTF, Union_type, Unique_join
   };
 
   private UnsupportedFeature unsupportedFeature;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index e339d0a..6ccd879 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -48,7 +48,6 @@ import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexLocalRef;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexOver;
-import org.apache.calcite.rex.RexPatternFieldRef;
 import org.apache.calcite.rex.RexRangeRef;
 import org.apache.calcite.rex.RexSubQuery;
 import org.apache.calcite.rex.RexUtil;
@@ -1075,11 +1074,6 @@ public class HiveCalciteUtil {
       // it seems that it is not used by anything.
       return false;
     }
-
-    @Override
-    public Boolean visitPatternFieldRef(RexPatternFieldRef fieldRef) {
-      return false;
-    }
   }
 
   public static Set<Integer> getInputRefs(RexNode expr) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
index d0b1757..9a65de3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite;
 
-import org.apache.calcite.config.CalciteConnectionConfig;
 import org.apache.calcite.plan.Context;
 import org.apache.calcite.rel.RelNode;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf;
@@ -28,14 +27,11 @@ import java.util.Set;
 public class HivePlannerContext implements Context {
   private HiveAlgorithmsConf algoConfig;
   private HiveRulesRegistry registry;
-  private CalciteConnectionConfig calciteConfig;
   private Set<RelNode> corrScalarRexSQWithAgg;
 
-  public HivePlannerContext(HiveAlgorithmsConf algoConfig, HiveRulesRegistry registry,
-      CalciteConnectionConfig calciteConfig, Set<RelNode> corrScalarRexSQWithAgg) {
+  public HivePlannerContext(HiveAlgorithmsConf algoConfig, HiveRulesRegistry registry, Set<RelNode> corrScalarRexSQWithAgg) {
     this.algoConfig = algoConfig;
     this.registry = registry;
-    this.calciteConfig = calciteConfig;
     // this is to keep track if a subquery is correlated and contains aggregate
     // this is computed in CalcitePlanner while planning and is later required by subuery remove rule
     // hence this is passed using HivePlannerContext
@@ -49,9 +45,6 @@ public class HivePlannerContext implements Context {
     if (clazz.isInstance(registry)) {
       return clazz.cast(registry);
     }
-    if (clazz.isInstance(calciteConfig)) {
-      return clazz.cast(calciteConfig);
-    }
     if(clazz.isInstance(corrScalarRexSQWithAgg)) {
       return clazz.cast(corrScalarRexSQWithAgg);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelShuttleImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelShuttleImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelShuttleImpl.java
index 8e52d88..2aadf50 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelShuttleImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelShuttleImpl.java
@@ -27,7 +27,6 @@ import org.apache.calcite.rel.logical.LogicalExchange;
 import org.apache.calcite.rel.logical.LogicalFilter;
 import org.apache.calcite.rel.logical.LogicalIntersect;
 import org.apache.calcite.rel.logical.LogicalJoin;
-import org.apache.calcite.rel.logical.LogicalMatch;
 import org.apache.calcite.rel.logical.LogicalMinus;
 import org.apache.calcite.rel.logical.LogicalProject;
 import org.apache.calcite.rel.logical.LogicalSort;
@@ -141,10 +140,6 @@ public class HiveRelShuttleImpl implements HiveRelShuttle {
     public RelNode visit(RelNode other) {
         return visitChildren(other);
     }
-
-    public RelNode visit(LogicalMatch match) {
-      return visitChildren(match);
-    }
 }
 
 // End RelShuttleImpl.java

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
index 0b94b8a..4edc4df 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
@@ -24,7 +24,6 @@ import org.apache.calcite.sql.SqlFunctionCategory;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.calcite.sql.type.ReturnTypes;
-import org.apache.calcite.sql.type.SqlTypeTransforms;
 
 import com.google.common.collect.Sets;
 
@@ -43,10 +42,9 @@ public class HiveExtractDate extends SqlFunction {
           Sets.newHashSet(YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE, SECOND);
 
   private HiveExtractDate(String name) {
-    super(name, SqlKind.EXTRACT, 
-       ReturnTypes.cascade(ReturnTypes.INTEGER, SqlTypeTransforms.FORCE_NULLABLE), null,
-       OperandTypes.INTERVALINTERVAL_INTERVALDATETIME,
-       SqlFunctionCategory.SYSTEM);
+    super(name, SqlKind.EXTRACT, ReturnTypes.INTEGER_NULLABLE, null,
+            OperandTypes.INTERVALINTERVAL_INTERVALDATETIME,
+            SqlFunctionCategory.SYSTEM);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidProjectFilterTransposeRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidProjectFilterTransposeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidProjectFilterTransposeRule.java
deleted file mode 100644
index dd39056..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidProjectFilterTransposeRule.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
-
-import org.apache.calcite.adapter.druid.DruidQuery;
-import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.rules.ProjectFilterTransposeRule;
-import org.apache.calcite.rel.rules.PushProjector;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
-
-/**
- * Rule to push a {@link org.apache.calcite.rel.core.Project}
- * past a {@link org.apache.calcite.rel.core.Filter}
- * when {@code Filter} is on top of a {@link DruidQuery}.
- * TODO: Replace this class with calcite DruidRules.DruidFilterProjectTransposeRule
- * once we upgrade to calcite 1.13
- */
-public class HiveDruidProjectFilterTransposeRule
-    extends ProjectFilterTransposeRule {
-
-  public static final HiveDruidProjectFilterTransposeRule INSTANCE =
-      new HiveDruidProjectFilterTransposeRule();
-
-  private HiveDruidProjectFilterTransposeRule() {
-    super(
-        operand(Project.class,
-            operand(Filter.class, operand(DruidQuery.class, none()))),
-        PushProjector.ExprCondition.FALSE,
-        HiveRelFactories.HIVE_BUILDER);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterJoinRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterJoinRule.java
index a4da6db..4b8568e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterJoinRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterJoinRule.java
@@ -106,6 +106,57 @@ public abstract class HiveFilterJoinRule extends FilterJoinRule {
     }
   }
 
+  /*
+   * Any predicates pushed down to joinFilters that aren't equality conditions:
+   * put them back as aboveFilters because Hive doesn't support not equi join
+   * conditions.
+   */
+  @Override
+  protected void validateJoinFilters(List<RexNode> aboveFilters, List<RexNode> joinFilters,
+      Join join, JoinRelType joinType) {
+    if (joinType.equals(JoinRelType.INNER)) {
+      ListIterator<RexNode> filterIter = joinFilters.listIterator();
+      while (filterIter.hasNext()) {
+        RexNode exp = filterIter.next();
+
+        if (exp instanceof RexCall) {
+          RexCall c = (RexCall) exp;
+          boolean validHiveJoinFilter = false;
+
+          if ((c.getOperator().getKind() == SqlKind.EQUALS)) {
+            validHiveJoinFilter = true;
+            for (RexNode rn : c.getOperands()) {
+              // NOTE: Hive dis-allows projections from both left & right side
+              // of join condition. Example: Hive disallows
+              // (r1.x +r2.x)=(r1.y+r2.y) on join condition.
+              if (filterRefersToBothSidesOfJoin(rn, join)) {
+                validHiveJoinFilter = false;
+                break;
+              }
+            }
+          } else if ((c.getOperator().getKind() == SqlKind.LESS_THAN)
+              || (c.getOperator().getKind() == SqlKind.GREATER_THAN)
+              || (c.getOperator().getKind() == SqlKind.LESS_THAN_OR_EQUAL)
+              || (c.getOperator().getKind() == SqlKind.GREATER_THAN_OR_EQUAL)) {
+            validHiveJoinFilter = true;
+            // NOTE: Hive dis-allows projections from both left & right side of
+            // join in in equality condition. Example: Hive disallows (r1.x <
+            // r2.x) on join condition.
+            if (filterRefersToBothSidesOfJoin(c, join)) {
+              validHiveJoinFilter = false;
+            }
+          }
+
+          if (validHiveJoinFilter)
+            continue;
+        }
+
+        aboveFilters.add(exp);
+        filterIter.remove();
+      }
+    }
+  }
+
   private boolean filterRefersToBothSidesOfJoin(RexNode filter, Join j) {
     boolean refersToBothSides = false;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
index f3d7293..4cfe782 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
@@ -55,89 +55,36 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Multimap;
 import com.google.common.collect.Sets;
-import org.apache.calcite.plan.RelOptRuleOperand;
-import org.apache.calcite.rel.AbstractRelNode;
-import org.apache.calcite.rel.core.Join;
-import org.apache.calcite.rel.core.JoinRelType;
-
-
-public abstract class HivePointLookupOptimizerRule extends RelOptRule {
 
 /**
- * This optimization will take a Filter or expression, and if its predicate contains
+ * This optimization will take a Filter expression, and if its predicate contains
  * an OR operator whose children are constant equality expressions, it will try
  * to generate an IN clause (which is more efficient). If the OR operator contains
  * AND operator children, the optimization might generate an IN clause that uses
  * structs.
  */
-  public static class FilterCondition extends HivePointLookupOptimizerRule {
-    public FilterCondition (int minNumORClauses) {
-      super(operand(Filter.class, any()), minNumORClauses);
-    }
-
-    public void onMatch(RelOptRuleCall call) {
-      final Filter filter = call.rel(0);
-      final RexBuilder rexBuilder = filter.getCluster().getRexBuilder();
-      final RexNode condition = RexUtil.pullFactors(rexBuilder, filter.getCondition());
-      analyzeCondition(call , rexBuilder, filter, condition);
-    }
-
-    @Override protected RelNode copyNode(AbstractRelNode node, RexNode newCondition) {
-      final Filter filter  = (Filter) node;
-      return filter.copy(filter.getTraitSet(), filter.getInput(), newCondition);
-    }
-  }
-
-/**
- * This optimization will take a Join or expression, and if its join condition contains
- * an OR operator whose children are constant equality expressions, it will try
- * to generate an IN clause (which is more efficient). If the OR operator contains
- * AND operator children, the optimization might generate an IN clause that uses
- * structs.
- */  
-  public static class JoinCondition extends HivePointLookupOptimizerRule {
-    public JoinCondition (int minNumORClauses) {
-      super(operand(Join.class, any()), minNumORClauses);
-    }
-    
-    public void onMatch(RelOptRuleCall call) {
-      final Join join = call.rel(0);
-      final RexBuilder rexBuilder = join.getCluster().getRexBuilder();
-      final RexNode condition = RexUtil.pullFactors(rexBuilder, join.getCondition());
-      analyzeCondition(call , rexBuilder, join, condition);
-    }
-
-    @Override protected RelNode copyNode(AbstractRelNode node, RexNode newCondition) {
-      final Join join = (Join) node;
-      return join.copy(join.getTraitSet(),
-              newCondition,
-              join.getLeft(),
-              join.getRight(),
-              join.getJoinType(),
-              join.isSemiJoinDone());
-    }
-  }
+public class HivePointLookupOptimizerRule extends RelOptRule {
 
   protected static final Log LOG = LogFactory.getLog(HivePointLookupOptimizerRule.class);
 
-  // Minimum number of OR clauses needed to transform into IN clauses
-  protected final int minNumORClauses;
 
-  protected abstract RelNode copyNode(AbstractRelNode node, RexNode newCondition);
+  // Minimum number of OR clauses needed to transform into IN clauses
+  private final int minNumORClauses;
 
-  protected HivePointLookupOptimizerRule(
-    RelOptRuleOperand operand, int minNumORClauses) {
-    super(operand);
+  public HivePointLookupOptimizerRule(int minNumORClauses) {
+    super(operand(Filter.class, any()));
     this.minNumORClauses = minNumORClauses;
   }
 
-  public void analyzeCondition(RelOptRuleCall call,
-          RexBuilder rexBuilder,
-          AbstractRelNode node, 
-          RexNode condition) {
+  public void onMatch(RelOptRuleCall call) {
+    final Filter filter = call.rel(0);
+
+    final RexBuilder rexBuilder = filter.getCluster().getRexBuilder();
+
+    final RexNode condition = RexUtil.pullFactors(rexBuilder, filter.getCondition());
 
     // 1. We try to transform possible candidates
-    RexTransformIntoInClause transformIntoInClause = new RexTransformIntoInClause(rexBuilder, node,
+    RexTransformIntoInClause transformIntoInClause = new RexTransformIntoInClause(rexBuilder, filter,
             minNumORClauses);
     RexNode newCondition = transformIntoInClause.apply(condition);
 
@@ -150,10 +97,10 @@ public abstract class HivePointLookupOptimizerRule extends RelOptRule {
       return;
     }
 
-    // 4. We create the Filter/Join with the new condition
-    RelNode newNode = copyNode(node, newCondition);
+    // 4. We create the filter with the new condition
+    RelNode newFilter = filter.copy(filter.getTraitSet(), filter.getInput(), newCondition);
 
-    call.transformTo(newNode);
+    call.transformTo(newFilter);
   }
 
 
@@ -162,11 +109,11 @@ public abstract class HivePointLookupOptimizerRule extends RelOptRule {
    */
   protected static class RexTransformIntoInClause extends RexShuttle {
     private final RexBuilder rexBuilder;
-    private final AbstractRelNode nodeOp;
+    private final Filter filterOp;
     private final int minNumORClauses;
 
-    RexTransformIntoInClause(RexBuilder rexBuilder, AbstractRelNode nodeOp, int minNumORClauses) {
-      this.nodeOp = nodeOp;
+    RexTransformIntoInClause(RexBuilder rexBuilder, Filter filterOp, int minNumORClauses) {
+      this.filterOp = filterOp;
       this.rexBuilder = rexBuilder;
       this.minNumORClauses = minNumORClauses;
     }
@@ -182,7 +129,7 @@ public abstract class HivePointLookupOptimizerRule extends RelOptRule {
             if (operand.getKind() == SqlKind.OR) {
               try {
                 newOperand = transformIntoInClauseCondition(rexBuilder,
-                        nodeOp.getRowType(), operand, minNumORClauses);
+                        filterOp.getRowType(), operand, minNumORClauses);
                 if (newOperand == null) {
                   newOperand = operand;
                 }
@@ -200,7 +147,7 @@ public abstract class HivePointLookupOptimizerRule extends RelOptRule {
         case OR:
           try {
             node = transformIntoInClauseCondition(rexBuilder,
-                    nodeOp.getRowType(), call, minNumORClauses);
+                    filterOp.getRowType(), call, minNumORClauses);
             if (node == null) {
               return call;
             }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectSortTransposeRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectSortTransposeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectSortTransposeRule.java
index 1487ed4..fd19d99 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectSortTransposeRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectSortTransposeRule.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
 
-import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptRuleCall;
 import org.apache.calcite.plan.RelOptRuleOperand;
@@ -26,12 +25,7 @@ import org.apache.calcite.rel.RelCollation;
 import org.apache.calcite.rel.RelCollationTraitDef;
 import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rex.RexCall;
-import org.apache.calcite.rex.RexCallBinding;
-import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.validate.SqlMonotonicity;
 import org.apache.calcite.util.mapping.Mappings;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
@@ -65,28 +59,16 @@ public class HiveProjectSortTransposeRule extends RelOptRule {
   public void onMatch(RelOptRuleCall call) {
     final HiveProject project = call.rel(0);
     final HiveSortLimit sort = call.rel(1);
-    final RelOptCluster cluster = project.getCluster();
 
     // Determine mapping between project input and output fields. If sort
     // relies on non-trivial expressions, we can't push.
     final Mappings.TargetMapping map =
-        RelOptUtil.permutationIgnoreCast(
+        RelOptUtil.permutation(
             project.getProjects(), project.getInput().getRowType()).inverse();
     for (RelFieldCollation fc : sort.getCollation().getFieldCollations()) {
       if (map.getTarget(fc.getFieldIndex()) < 0) {
         return;
       }
-      final RexNode node = project.getProjects().get(map.getTarget(fc.getFieldIndex()));
-      if (node.isA(SqlKind.CAST)) {
-        // Check whether it is a monotonic preserving cast, otherwise we cannot push
-        final RexCall cast = (RexCall) node;
-        final RexCallBinding binding =
-            RexCallBinding.create(cluster.getTypeFactory(), cast,
-                ImmutableList.of(RexUtil.apply(map, sort.getCollation())));
-        if (cast.getOperator().getMonotonicity(binding) == SqlMonotonicity.NOT_MONOTONIC) {
-          return;
-        }
-      }
     }
 
     // Create new collation

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortProjectTransposeRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortProjectTransposeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortProjectTransposeRule.java
index d1be4bc..fe29850 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortProjectTransposeRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortProjectTransposeRule.java
@@ -17,13 +17,23 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
 
+import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.rel.rules.SortProjectTransposeRule;
+import org.apache.calcite.plan.RelOptRuleOperand;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelCollationTraitDef;
+import org.apache.calcite.rel.RelFieldCollation;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.util.mapping.Mappings;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 
-public class HiveSortProjectTransposeRule extends SortProjectTransposeRule {
+import com.google.common.collect.ImmutableList;
+
+public class HiveSortProjectTransposeRule extends RelOptRule {
 
   public static final HiveSortProjectTransposeRule INSTANCE =
       new HiveSortProjectTransposeRule();
@@ -40,6 +50,10 @@ public class HiveSortProjectTransposeRule extends SortProjectTransposeRule {
             operand(HiveProject.class, any())));
   }
 
+  protected HiveSortProjectTransposeRule(RelOptRuleOperand operand) {
+    super(operand);
+  }
+
   //~ Methods ----------------------------------------------------------------
 
   @Override
@@ -54,4 +68,34 @@ public class HiveSortProjectTransposeRule extends SortProjectTransposeRule {
     return true;
   }
 
+  // implement RelOptRule
+  public void onMatch(RelOptRuleCall call) {
+    final HiveSortLimit sort = call.rel(0);
+    final HiveProject project = call.rel(1);
+
+    // Determine mapping between project input and output fields. If sort
+    // relies on non-trivial expressions, we can't push.
+    final Mappings.TargetMapping map =
+        RelOptUtil.permutation(
+            project.getProjects(), project.getInput().getRowType());
+    for (RelFieldCollation fc : sort.getCollation().getFieldCollations()) {
+      if (map.getTargetOpt(fc.getFieldIndex()) < 0) {
+        return;
+      }
+    }
+
+    // Create new collation
+    final RelCollation newCollation =
+        RelCollationTraitDef.INSTANCE.canonize(
+            RexUtil.apply(map, sort.getCollation()));
+
+    // New operators
+    final HiveSortLimit newSort = sort.copy(sort.getTraitSet().replace(newCollation),
+            project.getInput(), newCollation, sort.offset, sort.fetch);
+    final RelNode newProject = project.copy(sort.getTraitSet(),
+            ImmutableList.<RelNode>of(newSort));
+
+    call.transformTo(newProject);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
index c692cc0..76e0780 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
@@ -29,7 +29,6 @@ import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.rex.LogicVisitor;
 import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexVisitorImpl;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexShuttle;
 import org.apache.calcite.rex.RexSubQuery;
@@ -44,12 +43,10 @@ import org.apache.calcite.sql.type.ReturnTypes;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.tools.RelBuilderFactory;
 import org.apache.calcite.util.Pair;
-import org.apache.calcite.util.Util;
 
-import com.google.common.base.Predicate;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
 
-import java.math.BigDecimal;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
@@ -75,65 +72,64 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
  */
 public abstract class HiveSubQueryRemoveRule extends RelOptRule{
 
-    public static final HiveSubQueryRemoveRule REL_NODE =
-        new HiveSubQueryRemoveRule(
-            operand(RelNode.class, null, HiveSubQueryFinder.RELNODE_PREDICATE,
-                any()),
-            HiveRelFactories.HIVE_BUILDER, "SubQueryRemoveRule:Filter") {
-            public void onMatch(RelOptRuleCall call) {
-                final RelNode relNode = call.rel(0);
-                //TODO: replace HiveSubQRemoveRelBuilder with calcite's once calcite 1.11.0 is released
-                final HiveSubQRemoveRelBuilder builder = new HiveSubQRemoveRelBuilder(null, call.rel(0).getCluster(), null);
-
-                // if subquery is in FILTER
-                if(relNode instanceof Filter) {
-                    final Filter filter = call.rel(0);
+    public static final HiveSubQueryRemoveRule PROJECT =
+            new HiveSubQueryRemoveRule(
+                    operand(Project.class, null, RexUtil.SubQueryFinder.PROJECT_PREDICATE,
+                            any()),
+                    HiveRelFactories.HIVE_BUILDER, "SubQueryRemoveRule:Project") {
+                public void onMatch(RelOptRuleCall call) {
+                    final Project project = call.rel(0);
+                    //TODO: replace HiveSubQRemoveRelBuilder with calcite's once calcite 1.11.0 is released
+                    final HiveSubQRemoveRelBuilder builder = new HiveSubQRemoveRelBuilder(null, call.rel(0).getCluster(), null);
                     final RexSubQuery e =
-                        RexUtil.SubQueryFinder.find(filter.getCondition());
+                            RexUtil.SubQueryFinder.find(project.getProjects());
                     assert e != null;
-
                     final RelOptUtil.Logic logic =
-                        LogicVisitor.find(RelOptUtil.Logic.TRUE,
-                            ImmutableList.of(filter.getCondition()), e);
-                    builder.push(filter.getInput());
+                            LogicVisitor.find(RelOptUtil.Logic.TRUE_FALSE_UNKNOWN,
+                                    project.getProjects(), e);
+                    builder.push(project.getInput());
                     final int fieldCount = builder.peek().getRowType().getFieldCount();
-
-                    assert(filter instanceof HiveFilter);
-                    Set<RelNode> corrScalarQueries = filter.getCluster().getPlanner().getContext().unwrap(Set.class);
-                    boolean isCorrScalarQuery = corrScalarQueries.contains(e.rel);
-
-                    final RexNode target = apply(e, HiveFilter.getVariablesSet(e), logic,
-                        builder, 1, fieldCount, isCorrScalarQuery);
+                    final RexNode target = apply(e, HiveFilter.getVariablesSet(e),
+                            logic, builder, 1, fieldCount, false);
                     final RexShuttle shuttle = new ReplaceSubQueryShuttle(e, target);
-                    builder.filter(shuttle.apply(filter.getCondition()));
-                    builder.project(fields(builder, filter.getRowType().getFieldCount()));
+                    builder.project(shuttle.apply(project.getProjects()),
+                            project.getRowType().getFieldNames());
                     call.transformTo(builder.build());
                 }
-                // if subquery is in PROJECT
-                else if(relNode instanceof Project) {
-                    final Project project = call.rel(0);
+            };
+
+    public static final HiveSubQueryRemoveRule FILTER =
+            new HiveSubQueryRemoveRule(
+                    operand(Filter.class, null, RexUtil.SubQueryFinder.FILTER_PREDICATE,
+                            any()),
+                    HiveRelFactories.HIVE_BUILDER, "SubQueryRemoveRule:Filter") {
+                public void onMatch(RelOptRuleCall call) {
+                    final Filter filter = call.rel(0);
+                    //final RelBuilder builder = call.builder();
+                    //TODO: replace HiveSubQRemoveRelBuilder with calcite's once calcite 1.11.0 is released
+                    final HiveSubQRemoveRelBuilder builder = new HiveSubQRemoveRelBuilder(null, call.rel(0).getCluster(), null);
                     final RexSubQuery e =
-                        RexUtil.SubQueryFinder.find(project.getProjects());
+                            RexUtil.SubQueryFinder.find(filter.getCondition());
                     assert e != null;
 
                     final RelOptUtil.Logic logic =
-                        LogicVisitor.find(RelOptUtil.Logic.TRUE_FALSE_UNKNOWN,
-                            project.getProjects(), e);
-                    builder.push(project.getInput());
+                            LogicVisitor.find(RelOptUtil.Logic.TRUE,
+                                    ImmutableList.of(filter.getCondition()), e);
+                    builder.push(filter.getInput());
                     final int fieldCount = builder.peek().getRowType().getFieldCount();
 
-                    Set<RelNode> corrScalarQueries = project.getCluster().getPlanner().getContext().unwrap(Set.class);
+                    assert(filter instanceof HiveFilter);
+                    Set<RelNode> corrScalarQueries = filter.getCluster().getPlanner().getContext().unwrap(Set.class);
                     boolean isCorrScalarQuery = corrScalarQueries.contains(e.rel);
 
-                    final RexNode target = apply(e, HiveFilter.getVariablesSet(e),
-                        logic, builder, 1, fieldCount, isCorrScalarQuery);
+                    final RexNode target = apply(e, HiveFilter.getVariablesSet(e), logic,
+                            builder, 1, fieldCount, isCorrScalarQuery);
                     final RexShuttle shuttle = new ReplaceSubQueryShuttle(e, target);
-                    builder.project(shuttle.apply(project.getProjects()),
-                        project.getRowType().getFieldNames());
+                    builder.filter(shuttle.apply(filter.getCondition()));
+                    builder.project(fields(builder, filter.getRowType().getFieldCount()));
                     call.transformTo(builder.build());
                 }
-            }
-        };
+            };
 
     private HiveSubQueryRemoveRule(RelOptRuleOperand operand,
                                RelBuilderFactory relBuilderFactory,
@@ -168,25 +164,6 @@ public abstract class HiveSubQueryRemoveRule extends RelOptRule{
                             boolean isCorrScalarAgg) {
         switch (e.getKind()) {
             case SCALAR_QUERY:
-                builder.push(e.rel);
-                // returns single row/column
-                builder.aggregate(builder.groupKey(),
-                        builder.count(false, "cnt"));
-
-                SqlFunction countCheck = new SqlFunction("sq_count_check", SqlKind.OTHER_FUNCTION, ReturnTypes.BIGINT,
-                        InferTypes.RETURN_TYPE, OperandTypes.NUMERIC, SqlFunctionCategory.USER_DEFINED_FUNCTION);
-
-                // we create FILTER (sq_count_check(count()) <= 1) instead of PROJECT because RelFieldTrimmer
-                //  ends up getting rid of Project since it is not used further up the tree
-                builder.filter(builder.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
-                        builder.call(countCheck, builder.field("cnt")),
-                        builder.literal(1)));
-                if( !variablesSet.isEmpty())
-                {
-                    builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
-                }
-                else
-                    builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
                 if(isCorrScalarAgg) {
                     // Transformation :
                     // Outer Query Left Join (inner query) on correlated predicate and preserve rows only from left side.
@@ -204,9 +181,7 @@ public abstract class HiveSubQueryRemoveRule extends RelOptRule{
                     final ImmutableList.Builder<RexNode> operands = ImmutableList.builder();
                     RexNode literal;
                     if(isAggZeroOnEmpty(e)) {
-                        // since count has a return type of BIG INT we need to make a literal of type big int
-                        // relbuilder's literal doesn't allow this
-                        literal = e.rel.getCluster().getRexBuilder().makeBigintLiteral(new BigDecimal(0));
+                        literal = builder.literal(0);
                     }
                     else {
                         literal = e.rel.getCluster().getRexBuilder().makeNullLiteral(getAggTypeForScalarSub(e));
@@ -218,7 +193,26 @@ public abstract class HiveSubQueryRemoveRule extends RelOptRule{
 
                 //Transformation is to left join for correlated predicates and inner join otherwise,
                 // but do a count on inner side before that to make sure it generates atmost 1 row.
+                builder.push(e.rel);
+                // returns single row/column
+                builder.aggregate(builder.groupKey(),
+                        builder.count(false, "cnt"));
+
+                SqlFunction countCheck = new SqlFunction("sq_count_check", SqlKind.OTHER_FUNCTION, ReturnTypes.BIGINT,
+                        InferTypes.RETURN_TYPE, OperandTypes.NUMERIC, SqlFunctionCategory.USER_DEFINED_FUNCTION);
 
+                // we create FILTER (sq_count_check(count()) <= 1) instead of PROJECT because RelFieldTrimmer
+                //  ends up getting rid of Project since it is not used further up the tree
+                builder.filter(builder.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
+                        builder.call(countCheck, builder.field("cnt")),
+                        builder.literal(1)));
+
+                if( !variablesSet.isEmpty())
+                {
+                    builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
+                }
+                else
+                    builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
                 builder.push(e.rel);
                 builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
                 offset++;
@@ -461,72 +455,6 @@ public abstract class HiveSubQueryRemoveRule extends RelOptRule{
             return RexUtil.eq(subQuery, this.subQuery) ? replacement : subQuery;
         }
     }
-
-    // TODO:
-    // Following HiveSubQueryFinder has been copied from RexUtil::SubQueryFinder
-    // since there is BUG in there (CALCITE-1726).
-    // Once CALCITE-1726 is fixed we should get rid of the following code
-    /** Visitor that throws {@link org.apache.calcite.util.Util.FoundOne} if
-     * applied to an expression that contains a {@link RexSubQuery}. */
-    public static class HiveSubQueryFinder extends RexVisitorImpl<Void> {
-        public static final HiveSubQueryFinder INSTANCE = new HiveSubQueryFinder();
-
-        /** Returns whether a {@link Project} contains a sub-query. */
-        public static final Predicate<RelNode> RELNODE_PREDICATE=
-            new Predicate<RelNode>() {
-                public boolean apply(RelNode relNode) {
-                    if (relNode instanceof Project) {
-                        Project project = (Project)relNode;
-                        for (RexNode node : project.getProjects()) {
-                            try {
-                                node.accept(INSTANCE);
-                            } catch (Util.FoundOne e) {
-                                return true;
-                            }
-                        }
-                        return false;
-                    }
-                    else if (relNode instanceof Filter) {
-                        try {
-                            ((Filter)relNode).getCondition().accept(INSTANCE);
-                            return false;
-                        } catch (Util.FoundOne e) {
-                            return true;
-                        }
-                    }
-                    return false;
-                }
-            };
-
-        private HiveSubQueryFinder() {
-            super(true);
-        }
-
-        @Override public Void visitSubQuery(RexSubQuery subQuery) {
-            throw new Util.FoundOne(subQuery);
-        }
-
-        public static RexSubQuery find(Iterable<RexNode> nodes) {
-            for (RexNode node : nodes) {
-                try {
-                    node.accept(INSTANCE);
-                } catch (Util.FoundOne e) {
-                    return (RexSubQuery) e.getNode();
-                }
-            }
-            return null;
-        }
-
-        public static RexSubQuery find(RexNode node) {
-            try {
-                node.accept(INSTANCE);
-                return null;
-            } catch (Util.FoundOne e) {
-                return (RexSubQuery) e.getNode();
-            }
-        }
-    }
-
 }
 
 // End SubQueryRemoveRule.java

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java
index 81de33f..38d7906 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java
@@ -21,7 +21,6 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.calcite.plan.RelOptMaterialization;
-import org.apache.calcite.plan.RelOptMaterializations;
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptRuleCall;
@@ -78,7 +77,7 @@ public class HiveMaterializedViewFilterScanRule extends RelOptRule {
       // Costing is done in transformTo(), so we call it repeatedly with all applicable
       // materialized views and cheapest one will be picked
       List<RelOptMaterialization> applicableMaterializations =
-          RelOptMaterializations.getApplicableMaterializations(root, materializations);
+          VolcanoPlanner.getApplicableMaterializations(root, materializations);
       for (RelOptMaterialization materialization : applicableMaterializations) {
         List<RelNode> subs = new MaterializedViewSubstitutionVisitor(
             materialization.queryRel, root, relBuilderFactory).go(materialization.tableRel);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/FilterSelectivityEstimator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/FilterSelectivityEstimator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/FilterSelectivityEstimator.java
index a25b58b..6f26d7d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/FilterSelectivityEstimator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/FilterSelectivityEstimator.java
@@ -29,7 +29,6 @@ import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexVisitorImpl;
 import org.apache.calcite.sql.SqlKind;
@@ -302,15 +301,4 @@ public class FilterSelectivityEstimator extends RexVisitorImpl<Double> {
 
     return op;
   }
-
-  public Double visitLiteral(RexLiteral literal) {
-    if (literal.isAlwaysFalse()) {
-      return 0.0;
-    } else if (literal.isAlwaysTrue()) {
-      return 1.0;
-    } else {
-      assert false;
-    }
-    return null;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
index 9bcdd0c..69e157e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
@@ -165,7 +165,7 @@ public class HiveRelMdPredicates implements MetadataHandler<BuiltInMetadata.Pred
             rexBuilder.makeInputRef(project, expr.i), expr.e));
       }
     }
-    return RelOptPredicateList.of(rexBuilder, projectPullUpPredicates);
+    return RelOptPredicateList.of(projectPullUpPredicates);
   }
 
   /** Infers predicates for a {@link org.apache.calcite.rel.core.Join}. */
@@ -202,7 +202,6 @@ public class HiveRelMdPredicates implements MetadataHandler<BuiltInMetadata.Pred
     final RelNode input = agg.getInput();
     final RelOptPredicateList inputInfo = mq.getPulledUpPredicates(input);
     final List<RexNode> aggPullUpPredicates = new ArrayList<>();
-    final RexBuilder rexBuilder = agg.getCluster().getRexBuilder(); 
 
     ImmutableBitSet groupKeys = agg.getGroupSet();
     Mapping m = Mappings.create(MappingType.PARTIAL_FUNCTION,
@@ -220,7 +219,7 @@ public class HiveRelMdPredicates implements MetadataHandler<BuiltInMetadata.Pred
         aggPullUpPredicates.add(r);
       }
     }
-    return RelOptPredicateList.of(rexBuilder, aggPullUpPredicates);
+    return RelOptPredicateList.of(aggPullUpPredicates);
   }
 
   /**
@@ -272,7 +271,7 @@ public class HiveRelMdPredicates implements MetadataHandler<BuiltInMetadata.Pred
     if (!disjPred.isAlwaysTrue()) {
       preds.add(disjPred);
     }
-    return RelOptPredicateList.of(rB, preds);
+    return RelOptPredicateList.of(preds);
   }
 
   /**
@@ -412,7 +411,6 @@ public class HiveRelMdPredicates implements MetadataHandler<BuiltInMetadata.Pred
       final JoinRelType joinType = joinRel.getJoinType();
       final List<RexNode> leftPreds = ImmutableList.copyOf(RelOptUtil.conjunctions(leftChildPredicates));
       final List<RexNode> rightPreds = ImmutableList.copyOf(RelOptUtil.conjunctions(rightChildPredicates));
-      final RexBuilder rexBuilder = joinRel.getCluster().getRexBuilder();
       switch (joinType) {
       case INNER:
       case LEFT:
@@ -478,13 +476,13 @@ public class HiveRelMdPredicates implements MetadataHandler<BuiltInMetadata.Pred
           pulledUpPredicates = Iterables.concat(leftPreds, rightPreds,
                 RelOptUtil.conjunctions(joinRel.getCondition()), inferredPredicates);
         }
-        return RelOptPredicateList.of(rexBuilder,
+        return RelOptPredicateList.of(
           pulledUpPredicates, leftInferredPredicates, rightInferredPredicates);
       case LEFT:    
-        return RelOptPredicateList.of(rexBuilder, 
+        return RelOptPredicateList.of(    
           leftPreds, EMPTY_LIST, rightInferredPredicates);
       case RIGHT:   
-        return RelOptPredicateList.of(rexBuilder,
+        return RelOptPredicateList.of(    
           rightPreds, leftInferredPredicates, EMPTY_LIST);
       default:
         assert inferredPredicates.size() == 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
index a43d2be..0dc0c24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
@@ -37,8 +37,6 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.ParseDriver;
 import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
 
 public class ASTBuilder {
 
@@ -271,23 +269,19 @@ public class ASTBuilder {
       type = ((Boolean) val).booleanValue() ? HiveParser.KW_TRUE : HiveParser.KW_FALSE;
       break;
     case DATE: {
-      //Calcite Calendar is always GMT, Hive atm uses JVM local
-      final Calendar c = (Calendar) literal.getValue();
-      final DateTime dt = new DateTime(c.getTimeInMillis(), DateTimeZone.forTimeZone(c.getTimeZone()));
+      val = literal.getValue();
       type = HiveParser.TOK_DATELITERAL;
       DateFormat df = new SimpleDateFormat("yyyy-MM-dd");
-      val = df.format(dt.toDateTime(DateTimeZone.getDefault()).toDate());
+      val = df.format(((Calendar) val).getTime());
       val = "'" + val + "'";
     }
       break;
     case TIME:
     case TIMESTAMP: {
-      //Calcite Calendar is always GMT, Hive atm uses JVM local
-      final Calendar c = (Calendar) literal.getValue();
-      final DateTime dt = new DateTime(c.getTimeInMillis(), DateTimeZone.forTimeZone(c.getTimeZone()));
+      val = literal.getValue();
       type = HiveParser.TOK_TIMESTAMPLITERAL;
       DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
-      val = df.format(dt.toDateTime(DateTimeZone.getDefault()).toDate());
+      val = df.format(((Calendar) val).getTime());
       val = "'" + val + "'";
     }
       break;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
index 165f8c4..27990a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.calcite.adapter.druid.DruidQuery;
+import org.apache.calcite.avatica.util.TimeUnitRange;
 import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelVisitor;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index b1efbbd..e840938 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 
 import java.math.BigDecimal;
+import java.sql.Date;
 import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.Calendar;
@@ -74,8 +75,6 @@ import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -292,17 +291,16 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
       case DOUBLE:
         return new ExprNodeConstantDesc(TypeInfoFactory.doubleTypeInfo,
             Double.valueOf(((Number) literal.getValue3()).doubleValue()));
-      case DATE: {
-        final Calendar c = (Calendar) literal.getValue();
+      case DATE:
         return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo,
-            new java.sql.Date(c.getTimeInMillis()));
-      }
+          new Date(((Calendar)literal.getValue()).getTimeInMillis()));
       case TIME:
       case TIMESTAMP: {
-        final Calendar c = (Calendar) literal.getValue();
-        final DateTime dt = new DateTime(c.getTimeInMillis(), DateTimeZone.forTimeZone(c.getTimeZone()));
-        return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo,
-            new Timestamp(dt.getMillis()));
+        Object value = literal.getValue3();
+        if (value instanceof Long) {
+          value = new Timestamp((Long)value);
+        }
+        return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo, value);
       }
       case BINARY:
         return new ExprNodeConstantDesc(TypeInfoFactory.binaryTypeInfo, literal.getValue3());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
index b9b600d..73a9b0f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 
 
-import org.apache.hadoop.hive.ql.parse.*;
-
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -74,8 +72,19 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortExchange
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
+import org.apache.hadoop.hive.ql.parse.JoinCond;
+import org.apache.hadoop.hive.ql.parse.JoinType;
+import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression;
+import org.apache.hadoop.hive.ql.parse.PTFTranslator;
+import org.apache.hadoop.hive.ql.parse.ParseUtils;
+import org.apache.hadoop.hive.ql.parse.RowResolver;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.parse.UnparseTranslator;
+import org.apache.hadoop.hive.ql.parse.WindowingComponentizer;
+import org.apache.hadoop.hive.ql.parse.WindowingSpec;
 import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -339,7 +348,6 @@ public class HiveOpConverter {
     // through Hive
     String[] baseSrc = new String[joinRel.getInputs().size()];
     String tabAlias = getHiveDerivedTableAlias();
-
     // 1. Convert inputs
     OpAttr[] inputs = new OpAttr[joinRel.getInputs().size()];
     List<Operator<?>> children = new ArrayList<Operator<?>>(joinRel.getInputs().size());
@@ -718,7 +726,7 @@ public class HiveOpConverter {
       List<String> keepColNames) throws SemanticException {
     // 1. Generate RS operator
     // 1.1 Prune the tableNames, only count the tableNames that are not empty strings
-  // as empty string in table aliases is only allowed for virtual columns.
+	// as empty string in table aliases is only allowed for virtual columns.
     String tableAlias = null;
     Set<String> tableNames = input.getSchema().getTableNames();
     for (String tableName : tableNames) {
@@ -877,8 +885,7 @@ public class HiveOpConverter {
 
   private static JoinOperator genJoin(RelNode join, ExprNodeDesc[][] joinExpressions,
       List<List<ExprNodeDesc>> filterExpressions, List<Operator<?>> children,
-      String[] baseSrc, String tabAlias)
-          throws SemanticException {
+      String[] baseSrc, String tabAlias) throws SemanticException {
 
     // 1. Extract join type
     JoinCondDesc[] joinCondns;
@@ -1003,7 +1010,7 @@ public class HiveOpConverter {
 
     // 4. We create the join operator with its descriptor
     JoinDesc desc = new JoinDesc(exprMap, outputColumnNames, noOuterJoin, joinCondns,
-            filters, joinExpressions, 0);
+            filters, joinExpressions);
     desc.setReversedExprs(reversedExprs);
     desc.setFilterMap(filterMap);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index 52ca3b0..a05b89c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -23,12 +23,11 @@ import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.Date;
+import java.util.GregorianCalendar;
 import java.util.LinkedHashMap;
 import java.util.List;
-import java.util.Locale;
 import java.util.Map;
 
-import org.apache.calcite.avatica.util.DateTimeUtils;
 import org.apache.calcite.avatica.util.TimeUnit;
 import org.apache.calcite.avatica.util.TimeUnitRange;
 import org.apache.calcite.plan.RelOptCluster;
@@ -39,8 +38,8 @@ import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexSubQuery;
 import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.rex.RexSubQuery;
 import org.apache.calcite.sql.SqlCollation;
 import org.apache.calcite.sql.SqlIntervalQualifier;
 import org.apache.calcite.sql.SqlKind;
@@ -77,10 +76,8 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeSubQueryDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseBinary;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCase;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFTimestamp;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToBinary;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToChar;
@@ -99,8 +96,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableList.Builder;
@@ -253,8 +248,6 @@ public class RexNodeConverter {
     boolean isWhenCase = tgtUdf instanceof GenericUDFWhen || tgtUdf instanceof GenericUDFCase;
     boolean isTransformableTimeStamp = func.getGenericUDF() instanceof GenericUDFUnixTimeStamp &&
             func.getChildren().size() != 0;
-    boolean isBetween = !isNumeric && tgtUdf instanceof GenericUDFBetween;
-    boolean isIN = !isNumeric && tgtUdf instanceof GenericUDFIn;
 
     if (isNumeric) {
       tgtDT = func.getTypeInfo();
@@ -273,33 +266,15 @@ public class RexNodeConverter {
     } else if (isTransformableTimeStamp) {
       // unix_timestamp(args) -> to_unix_timestamp(args)
       func = ExprNodeGenericFuncDesc.newInstance(new GenericUDFToUnixTimeStamp(), func.getChildren());
-    } else if (isBetween) {
-      assert func.getChildren().size() == 4;
-      // We skip first child as is not involved (is the revert boolean)
-      // The target type needs to account for all 3 operands
-      tgtDT = FunctionRegistry.getCommonClassForComparison(
-              func.getChildren().get(1).getTypeInfo(),
-              FunctionRegistry.getCommonClassForComparison(
-                func.getChildren().get(2).getTypeInfo(),
-                func.getChildren().get(3).getTypeInfo()));
-    } else if (isIN) {
-      // We're only considering the first element of the IN list for the type
-      assert func.getChildren().size() > 1;
-      tgtDT = FunctionRegistry.getCommonClassForComparison(func.getChildren().get(0)
-            .getTypeInfo(), func.getChildren().get(1).getTypeInfo());
     }
 
-    for (int i =0; i < func.getChildren().size(); ++i) {
-      ExprNodeDesc childExpr = func.getChildren().get(i);
+    for (ExprNodeDesc childExpr : func.getChildren()) {
       tmpExprNode = childExpr;
       if (tgtDT != null
           && TypeInfoUtils.isConversionRequiredForComparison(tgtDT, childExpr.getTypeInfo())) {
-        if (isCompare || isBetween || isIN) {
+        if (isCompare) {
           // For compare, we will convert requisite children
-          // For BETWEEN skip the first child (the revert boolean)
-          if (!isBetween || i > 0) {
-            tmpExprNode = ParseUtils.createConversionCast(childExpr, (PrimitiveTypeInfo) tgtDT);
-          }
+          tmpExprNode = ParseUtils.createConversionCast(childExpr, (PrimitiveTypeInfo) tgtDT);
         } else if (isNumeric) {
           // For numeric, we'll do minimum necessary cast - if we cast to the type
           // of expression, bad things will happen.
@@ -659,40 +634,20 @@ public class RexNodeConverter {
       calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
       break;
     case DATE:
-        // The Calcite literal is in GMT, this will be converted back to JVM locale 
-        // by ASTBuilder.literal during Calcite->Hive plan conversion
-        final Calendar cal = Calendar.getInstance(DateTimeUtils.GMT_ZONE, Locale.getDefault());
-        cal.setTime((Date) value);
-        calciteLiteral = rexBuilder.makeDateLiteral(cal);
-        break;
-      case TIMESTAMP:
-        // The Calcite literal is in GMT, this will be converted back to JVM locale 
-        // by ASTBuilder.literal during Calcite->Hive plan conversion
-        final Calendar calt = Calendar.getInstance(DateTimeUtils.GMT_ZONE, Locale.getDefault());
-        if (value instanceof Calendar) {
-          final Calendar c = (Calendar) value;
-          long timeMs = c.getTimeInMillis();
-          calt.setTimeInMillis(timeMs);
-        } else {
-          final Timestamp ts = (Timestamp) value;
-          // CALCITE-1690
-          // Calcite cannot represent TIMESTAMP literals with precision higher than 3
-          if (ts.getNanos() % 1000000 != 0) {
-            throw new CalciteSemanticException(
-              "High Precision Timestamp: " + String.valueOf(ts),
-              UnsupportedFeature.HighPrecissionTimestamp);
-          }
-          calt.setTimeInMillis(ts.getTime());
-        }
-        // Must call makeLiteral, not makeTimestampLiteral 
-        // to have the RexBuilder.roundTime logic kick in
-        calciteLiteral = rexBuilder.makeLiteral(
-          calt,
-          rexBuilder.getTypeFactory().createSqlType(
-            SqlTypeName.TIMESTAMP,
-            rexBuilder.getTypeFactory().getTypeSystem().getDefaultPrecision(SqlTypeName.TIMESTAMP)),
-          false);
-        break;
+      Calendar cal = new GregorianCalendar();
+      cal.setTime((Date) value);
+      calciteLiteral = rexBuilder.makeDateLiteral(cal);
+      break;
+    case TIMESTAMP:
+      Calendar c = null;
+      if (value instanceof Calendar) {
+        c = (Calendar)value;
+      } else {
+        c = Calendar.getInstance();
+        c.setTimeInMillis(((Timestamp)value).getTime());
+      }
+      calciteLiteral = rexBuilder.makeTimestampLiteral(c, RelDataType.PRECISION_NOT_SPECIFIED);
+      break;
     case INTERVAL_YEAR_MONTH:
       // Calcite year-month literal value is months as BigDecimal
       BigDecimal totalMonths = BigDecimal.valueOf(((HiveIntervalYearMonth) value).getTotalMonths());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index 10f5eb3..85450c9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -352,7 +352,6 @@ public class SqlFunctionConverter {
       registerFunction("struct", SqlStdOperatorTable.ROW, hToken(HiveParser.Identifier, "struct"));
       registerFunction("isnotnull", SqlStdOperatorTable.IS_NOT_NULL, hToken(HiveParser.TOK_ISNOTNULL, "TOK_ISNOTNULL"));
       registerFunction("isnull", SqlStdOperatorTable.IS_NULL, hToken(HiveParser.TOK_ISNULL, "TOK_ISNULL"));
-      registerFunction("is not distinct from", SqlStdOperatorTable.IS_NOT_DISTINCT_FROM, hToken(HiveParser.EQUAL_NS, "<=>"));
       registerFunction("when", SqlStdOperatorTable.CASE, hToken(HiveParser.Identifier, "when"));
       registerDuplicateFunction("case", SqlStdOperatorTable.CASE, hToken(HiveParser.Identifier, "when"));
       // timebased

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
index 701bde4..2b075be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
@@ -194,7 +194,7 @@ public class ReduceSinkDeDuplication extends Transform {
       ReduceSinkDesc cRSc = cRS.getConf();
       for (ReduceSinkOperator pRSNs : pRSs) {
         ReduceSinkDesc pRSNc = pRSNs.getConf();
-        if (cRSc.getKeyCols().size() != pRSNc.getKeyCols().size()) {
+        if (cRSc.getKeyCols().size() < pRSNc.getKeyCols().size()) {
           return false;
         }
         if (cRSc.getPartitionCols().size() != pRSNc.getPartitionCols().size()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java
index 88b8119..4d3e74e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java
@@ -37,10 +37,10 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
 public final class ListBucketingPrunerUtils {
 
   /* Default list bucketing directory name. internal use only not for client. */
-  public static final String HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME =
+  public static String HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME =
       "HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME";
   /* Default list bucketing directory key. internal use only not for client. */
-  public static final String HIVE_LIST_BUCKETING_DEFAULT_KEY = "HIVE_DEFAULT_LIST_BUCKETING_KEY";
+  public static String HIVE_LIST_BUCKETING_DEFAULT_KEY = "HIVE_DEFAULT_LIST_BUCKETING_KEY";
 
   /**
    * Decide if pruner skips the skewed directory

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java
index 2143718..6d0ee92 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java
@@ -281,8 +281,7 @@ public final class GenMRSkewJoinProcessor {
       MapJoinDesc mapJoinDescriptor = new MapJoinDesc(newJoinKeys, keyTblDesc,
           newJoinValues, newJoinValueTblDesc, newJoinValueTblDesc,joinDescriptor
           .getOutputColumnNames(), i, joinDescriptor.getConds(),
-          joinDescriptor.getFilters(), joinDescriptor.getNoOuterJoin(), dumpFilePrefix,
-          joinDescriptor.getNoConditionalTaskSize());
+          joinDescriptor.getFilters(), joinDescriptor.getNoOuterJoin(), dumpFilePrefix);
       mapJoinDescriptor.setTagOrder(tags);
       mapJoinDescriptor.setHandleSkewJoin(false);
       mapJoinDescriptor.setNullSafes(joinDescriptor.getNullSafes());
@@ -384,11 +383,11 @@ public final class GenMRSkewJoinProcessor {
     return true;
   }
 
-  private static final String skewJoinPrefix = "hive_skew_join";
-  private static final String UNDERLINE = "_";
-  private static final String BIGKEYS = "bigkeys";
-  private static final String SMALLKEYS = "smallkeys";
-  private static final String RESULTS = "results";
+  private static String skewJoinPrefix = "hive_skew_join";
+  private static String UNDERLINE = "_";
+  private static String BIGKEYS = "bigkeys";
+  private static String SMALLKEYS = "smallkeys";
+  private static String RESULTS = "results";
 
   static Path getBigKeysDir(Path baseDir, Byte srcTbl) {
     return StringInternUtils.internUriStringsInPath(

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java
index b705f5a..38bb847 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java
@@ -240,8 +240,7 @@ public class GenSparkSkewJoinProcessor {
       MapJoinDesc mapJoinDescriptor = new MapJoinDesc(newJoinKeys, keyTblDesc,
           newJoinValues, newJoinValueTblDesc, newJoinValueTblDesc, joinDescriptor
           .getOutputColumnNames(), i, joinDescriptor.getConds(),
-          joinDescriptor.getFilters(), joinDescriptor.getNoOuterJoin(), dumpFilePrefix,
-          joinDescriptor.getNoConditionalTaskSize());
+          joinDescriptor.getFilters(), joinDescriptor.getNoOuterJoin(), dumpFilePrefix);
       mapJoinDescriptor.setTagOrder(tags);
       mapJoinDescriptor.setHandleSkewJoin(false);
       mapJoinDescriptor.setNullSafes(joinDescriptor.getNullSafes());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapClusterStateForCompile.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapClusterStateForCompile.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapClusterStateForCompile.java
deleted file mode 100644
index a5ed308..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapClusterStateForCompile.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.physical;
-
-import java.util.concurrent.ExecutionException;
-
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import java.io.IOException;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.registry.ServiceInstance;
-import org.apache.hadoop.hive.llap.registry.ServiceInstanceSet;
-import org.apache.hadoop.hive.llap.registry.impl.InactiveServiceInstance;
-import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LlapClusterStateForCompile {
-  protected static final Logger LOG = LoggerFactory.getLogger(LlapClusterStateForCompile.class);
-
-  private static final long CLUSTER_UPDATE_INTERVAL_NS = 120 * 1000000000L; // 2 minutes.
-  private Long lastClusterUpdateNs;
-  private Integer noConfigNodeCount, executorCount;
-  private int numExecutorsPerNode = -1;
-  private LlapRegistryService svc;
-  private final Configuration conf;
-
-  // It's difficult to impossible to pass global things to compilation, so we have a static cache.
-  private static final Cache<String, LlapClusterStateForCompile> CACHE =
-      CacheBuilder.newBuilder().initialCapacity(10).maximumSize(100).build();
-
-  public static LlapClusterStateForCompile getClusterInfo(final Configuration conf) {
-    final String nodes = HiveConf.getTrimmedVar(conf, HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS);
-    final String userName = HiveConf.getVar(
-            conf, ConfVars.LLAP_ZK_REGISTRY_USER, LlapRegistryService.currentUser());
-    Callable<LlapClusterStateForCompile> generator = new Callable<LlapClusterStateForCompile>() {
-      @Override
-      public LlapClusterStateForCompile call() throws Exception {
-        LOG.info("Creating cluster info for " + userName + ":" + nodes);
-        return new LlapClusterStateForCompile(conf);
-      }
-    };
-    try {
-      return CACHE.get(userName + ":" + nodes, generator);
-    } catch (ExecutionException e) {
-      throw new RuntimeException(e); // Should never happen... ctor is just assignments.
-    }
-  }
-
-  private LlapClusterStateForCompile(Configuration conf) {
-    this.conf = conf;
-  }
-
-  public boolean hasClusterInfo() {
-    return lastClusterUpdateNs != null;
-  }
-
-  public int getKnownExecutorCount() {
-    return executorCount;
-  }
-
-  public int getNodeCountWithUnknownExecutors() {
-    return noConfigNodeCount;
-  }
-
-  public int getNumExecutorsPerNode() {
-    return numExecutorsPerNode;
-  }
-
-  public synchronized void initClusterInfo() {
-    if (lastClusterUpdateNs != null) {
-      long elapsed = System.nanoTime() - lastClusterUpdateNs;
-      if (elapsed < CLUSTER_UPDATE_INTERVAL_NS) return;
-    }
-    if (svc == null) {
-      try {
-        svc = LlapRegistryService.getClient(conf);
-      } catch (Throwable t) {
-        LOG.info("Cannot create the client; ignoring", t);
-        return; // Don't fail; this is best-effort.
-      }
-    }
-    ServiceInstanceSet instances;
-    try {
-      instances = svc.getInstances(10);
-    } catch (IOException e) {
-      LOG.info("Cannot update cluster information; ignoring", e);
-      return; // Don't wait for the cluster if not started; this is best-effort.
-    }
-    int executorsLocal = 0, noConfigNodesLocal = 0;
-    for (ServiceInstance si : instances.getAll()) {
-      if (si instanceof InactiveServiceInstance) continue; // Shouldn't happen in getAll.
-      Map<String, String> props = si.getProperties();
-      if (props == null) {
-        ++noConfigNodesLocal;
-        continue;
-      }
-      try {
-        int numExecutors = Integer.parseInt(props.get(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname));
-        executorsLocal += numExecutors;
-        if (numExecutorsPerNode == -1) {
-          numExecutorsPerNode = numExecutors;
-        }
-      } catch (NumberFormatException e) {
-        ++noConfigNodesLocal;
-      }
-    }
-    lastClusterUpdateNs = System.nanoTime();
-    noConfigNodeCount = noConfigNodesLocal;
-    executorCount = executorsLocal;
-  }
-}


[10/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index df05af1..d6460cd 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
@@ -893,13 +892,6 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   }
 
   @Override
-  public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
-      String tableName) throws MetaException, NoSuchObjectException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
   public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java
index 2166c20..f8eed18 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java
@@ -42,6 +42,7 @@ public class TestHiveMetaStoreTimeout {
   public static void setUp() throws Exception {
     HiveMetaStore.TEST_TIMEOUT_ENABLED = true;
     hiveConf = new HiveConf(TestHiveMetaStoreTimeout.class);
+    hiveConf.setBoolean(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, true);
     hiveConf.set(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.varname,
         MockPartitionExpressionForMetastore.class.getCanonicalName());
     hiveConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 10 * 1000,

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
index d008c75..aaa03fb 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hive.common.metrics.metrics2.MetricsReporting;
 import org.apache.hadoop.hive.common.metrics.MetricsTestUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
@@ -42,9 +41,6 @@ import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
@@ -55,7 +51,6 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.messaging.EventMessage;
 import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -67,12 +62,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
-import javax.jdo.Query;
 
 public class TestObjectStore {
   private ObjectStore objectStore = null;
@@ -142,56 +135,6 @@ public class TestObjectStore {
   }
 
   /**
-   * Test notification operations
-   */
-  @Test
-  public void testNotificationOps() throws InterruptedException {
-    final int NO_EVENT_ID = 0;
-    final int FIRST_EVENT_ID = 1;
-    final int SECOND_EVENT_ID = 2;
-
-    NotificationEvent event =
-        new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), "");
-    NotificationEventResponse eventResponse;
-    CurrentNotificationEventId eventId;
-
-    // Verify that there is no notifications available yet
-    eventId = objectStore.getCurrentNotificationEventId();
-    Assert.assertEquals(NO_EVENT_ID, eventId.getEventId());
-
-    // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID
-    objectStore.addNotificationEvent(event);
-    Assert.assertEquals(FIRST_EVENT_ID, event.getEventId());
-    objectStore.addNotificationEvent(event);
-    Assert.assertEquals(SECOND_EVENT_ID, event.getEventId());
-
-    // Verify that objectStore fetches the latest notification event ID
-    eventId = objectStore.getCurrentNotificationEventId();
-    Assert.assertEquals(SECOND_EVENT_ID, eventId.getEventId());
-
-    // Verify that getNextNotification() returns all events
-    eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
-    Assert.assertEquals(2, eventResponse.getEventsSize());
-    Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
-    Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId());
-
-    // Verify that getNextNotification(last) returns events after a specified event
-    eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID));
-    Assert.assertEquals(1, eventResponse.getEventsSize());
-    Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
-
-    // Verify that getNextNotification(last) returns zero events if there are no more notifications available
-    eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID));
-    Assert.assertEquals(0, eventResponse.getEventsSize());
-
-    // Verify that cleanNotificationEvents() cleans up all old notifications
-    Thread.sleep(1);
-    objectStore.cleanNotificationEvents(1);
-    eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
-    Assert.assertEquals(0, eventResponse.getEventsSize());
-  }
-
-  /**
    * Test database operations
    */
   @Test
@@ -582,15 +525,4 @@ public class TestObjectStore {
     } catch (NoSuchObjectException e) {
     }
   }
-
-  @Test
-  public void testQueryCloseOnError() throws Exception {
-    ObjectStore spy = Mockito.spy(objectStore);
-    spy.getAllDatabases();
-    spy.getAllFunctions();
-    spy.getAllTables(DB1);
-    spy.getPartitionCount();
-    Mockito.verify(spy, Mockito.times(3))
-        .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.<Query>anyObject());
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java
index a8c7ac3..9acf9d7 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.thrift.TException;
 
-public class VerifyingObjectStore extends ObjectStore {
+class VerifyingObjectStore extends ObjectStore {
   private static final Logger LOG = LoggerFactory.getLogger(VerifyingObjectStore.class);
 
   public VerifyingObjectStore() {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/test/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
deleted file mode 100644
index 0ab20d6..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.cache;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.ObjectStore;
-import org.apache.hadoop.hive.metastore.TestObjectStore.MockPartitionExpressionProxy;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestCachedStore {
-
-  private CachedStore cachedStore = new CachedStore();
-
-  @Before
-  public void setUp() throws Exception {
-    HiveConf conf = new HiveConf();
-    conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, MockPartitionExpressionProxy.class.getName());
-
-    ObjectStore objectStore = new ObjectStore();
-    objectStore.setConf(conf);
-
-    cachedStore.setRawStore(objectStore);
-
-    SharedCache.getDatabaseCache().clear();
-    SharedCache.getTableCache().clear();
-    SharedCache.getPartitionCache().clear();
-    SharedCache.getSdCache().clear();
-    SharedCache.getPartitionColStatsCache().clear();
-  }
-
-  @Test
-  public void testSharedStoreDb() {
-    Database db1 = new Database();
-    Database db2 = new Database();
-    Database db3 = new Database();
-    Database newDb1 = new Database();
-    newDb1.setName("db1");
-
-    SharedCache.addDatabaseToCache("db1", db1);
-    SharedCache.addDatabaseToCache("db2", db2);
-    SharedCache.addDatabaseToCache("db3", db3);
-
-    Assert.assertEquals(SharedCache.getCachedDatabaseCount(), 3);
-
-    SharedCache.alterDatabaseInCache("db1", newDb1);
-
-    Assert.assertEquals(SharedCache.getCachedDatabaseCount(), 3);
-
-    SharedCache.removeDatabaseFromCache("db2");
-
-    Assert.assertEquals(SharedCache.getCachedDatabaseCount(), 2);
-
-    List<String> dbs = SharedCache.listCachedDatabases();
-    Assert.assertEquals(dbs.size(), 2);
-    Assert.assertTrue(dbs.contains("db1"));
-    Assert.assertTrue(dbs.contains("db3"));
-  }
-
-  @Test
-  public void testSharedStoreTable() {
-    Table tbl1 = new Table();
-    StorageDescriptor sd1 = new StorageDescriptor();
-    List<FieldSchema> cols1 = new ArrayList<FieldSchema>();
-    cols1.add(new FieldSchema("col1", "int", ""));
-    Map<String, String> params1 = new HashMap<String, String>();
-    params1.put("key", "value");
-    sd1.setCols(cols1);
-    sd1.setParameters(params1);
-    sd1.setLocation("loc1");
-    tbl1.setSd(sd1);
-    tbl1.setPartitionKeys(new ArrayList<FieldSchema>());
-
-    Table tbl2 = new Table();
-    StorageDescriptor sd2 = new StorageDescriptor();
-    List<FieldSchema> cols2 = new ArrayList<FieldSchema>();
-    cols2.add(new FieldSchema("col1", "int", ""));
-    Map<String, String> params2 = new HashMap<String, String>();
-    params2.put("key", "value");
-    sd2.setCols(cols2);
-    sd2.setParameters(params2);
-    sd2.setLocation("loc2");
-    tbl2.setSd(sd2);
-    tbl2.setPartitionKeys(new ArrayList<FieldSchema>());
-
-    Table tbl3 = new Table();
-    StorageDescriptor sd3 = new StorageDescriptor();
-    List<FieldSchema> cols3 = new ArrayList<FieldSchema>();
-    cols3.add(new FieldSchema("col3", "int", ""));
-    Map<String, String> params3 = new HashMap<String, String>();
-    params3.put("key2", "value2");
-    sd3.setCols(cols3);
-    sd3.setParameters(params3);
-    sd3.setLocation("loc3");
-    tbl3.setSd(sd3);
-    tbl3.setPartitionKeys(new ArrayList<FieldSchema>());
-
-    Table newTbl1 = new Table();
-    newTbl1.setDbName("db2");
-    newTbl1.setTableName("tbl1");
-    StorageDescriptor newSd1 = new StorageDescriptor();
-    List<FieldSchema> newCols1 = new ArrayList<FieldSchema>();
-    newCols1.add(new FieldSchema("newcol1", "int", ""));
-    Map<String, String> newParams1 = new HashMap<String, String>();
-    newParams1.put("key", "value");
-    newSd1.setCols(newCols1);
-    newSd1.setParameters(params1);
-    newSd1.setLocation("loc1");
-    newTbl1.setSd(newSd1);
-    newTbl1.setPartitionKeys(new ArrayList<FieldSchema>());
-
-    SharedCache.addTableToCache("db1", "tbl1", tbl1);
-    SharedCache.addTableToCache("db1", "tbl2", tbl2);
-    SharedCache.addTableToCache("db1", "tbl3", tbl3);
-    SharedCache.addTableToCache("db2", "tbl1", tbl1);
-
-    Assert.assertEquals(SharedCache.getCachedTableCount(), 4);
-    Assert.assertEquals(SharedCache.getSdCache().size(), 2);
-
-    Table t = SharedCache.getTableFromCache("db1", "tbl1");
-    Assert.assertEquals(t.getSd().getLocation(), "loc1");
-
-    SharedCache.removeTableFromCache("db1", "tbl1");
-    Assert.assertEquals(SharedCache.getCachedTableCount(), 3);
-    Assert.assertEquals(SharedCache.getSdCache().size(), 2);
-
-    SharedCache.alterTableInCache("db2", "tbl1", newTbl1);
-    Assert.assertEquals(SharedCache.getCachedTableCount(), 3);
-    Assert.assertEquals(SharedCache.getSdCache().size(), 3);
-
-    SharedCache.removeTableFromCache("db1", "tbl2");
-    Assert.assertEquals(SharedCache.getCachedTableCount(), 2);
-    Assert.assertEquals(SharedCache.getSdCache().size(), 2);
-  }
-
-  @Test
-  public void testSharedStorePartition() {
-    Partition part1 = new Partition();
-    StorageDescriptor sd1 = new StorageDescriptor();
-    List<FieldSchema> cols1 = new ArrayList<FieldSchema>();
-    cols1.add(new FieldSchema("col1", "int", ""));
-    Map<String, String> params1 = new HashMap<String, String>();
-    params1.put("key", "value");
-    sd1.setCols(cols1);
-    sd1.setParameters(params1);
-    sd1.setLocation("loc1");
-    part1.setSd(sd1);
-    part1.setValues(Arrays.asList("201701"));
-
-    Partition part2 = new Partition();
-    StorageDescriptor sd2 = new StorageDescriptor();
-    List<FieldSchema> cols2 = new ArrayList<FieldSchema>();
-    cols2.add(new FieldSchema("col1", "int", ""));
-    Map<String, String> params2 = new HashMap<String, String>();
-    params2.put("key", "value");
-    sd2.setCols(cols2);
-    sd2.setParameters(params2);
-    sd2.setLocation("loc2");
-    part2.setSd(sd2);
-    part2.setValues(Arrays.asList("201702"));
-
-    Partition part3 = new Partition();
-    StorageDescriptor sd3 = new StorageDescriptor();
-    List<FieldSchema> cols3 = new ArrayList<FieldSchema>();
-    cols3.add(new FieldSchema("col3", "int", ""));
-    Map<String, String> params3 = new HashMap<String, String>();
-    params3.put("key2", "value2");
-    sd3.setCols(cols3);
-    sd3.setParameters(params3);
-    sd3.setLocation("loc3");
-    part3.setSd(sd3);
-    part3.setValues(Arrays.asList("201703"));
-
-    Partition newPart1 = new Partition();
-    newPart1.setDbName("db1");
-    newPart1.setTableName("tbl1");
-    StorageDescriptor newSd1 = new StorageDescriptor();
-    List<FieldSchema> newCols1 = new ArrayList<FieldSchema>();
-    newCols1.add(new FieldSchema("newcol1", "int", ""));
-    Map<String, String> newParams1 = new HashMap<String, String>();
-    newParams1.put("key", "value");
-    newSd1.setCols(newCols1);
-    newSd1.setParameters(params1);
-    newSd1.setLocation("loc1");
-    newPart1.setSd(newSd1);
-    newPart1.setValues(Arrays.asList("201701"));
-
-    SharedCache.addPartitionToCache("db1", "tbl1", part1);
-    SharedCache.addPartitionToCache("db1", "tbl1", part2);
-    SharedCache.addPartitionToCache("db1", "tbl1", part3);
-    SharedCache.addPartitionToCache("db1", "tbl2", part1);
-
-    Assert.assertEquals(SharedCache.getCachedPartitionCount(), 4);
-    Assert.assertEquals(SharedCache.getSdCache().size(), 2);
-
-    Partition t = SharedCache.getPartitionFromCache("db1", "tbl1", Arrays.asList("201701"));
-    Assert.assertEquals(t.getSd().getLocation(), "loc1");
-
-    SharedCache.removePartitionFromCache("db1", "tbl2", Arrays.asList("201701"));
-    Assert.assertEquals(SharedCache.getCachedPartitionCount(), 3);
-    Assert.assertEquals(SharedCache.getSdCache().size(), 2);
-
-    SharedCache.alterPartitionInCache("db1", "tbl1", Arrays.asList("201701"), newPart1);
-    Assert.assertEquals(SharedCache.getCachedPartitionCount(), 3);
-    Assert.assertEquals(SharedCache.getSdCache().size(), 3);
-
-    SharedCache.removePartitionFromCache("db1", "tbl1", Arrays.asList("201702"));
-    Assert.assertEquals(SharedCache.getCachedPartitionCount(), 2);
-    Assert.assertEquals(SharedCache.getSdCache().size(), 2);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/test/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializerTest.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializerTest.java b/metastore/src/test/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializerTest.java
deleted file mode 100644
index c278338..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializerTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.messaging.json;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-import org.json.JSONException;
-import org.junit.Test;
-import org.skyscreamer.jsonassert.JSONAssert;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.junit.Assert.*;
-
-public class JSONMessageDeserializerTest {
-
-  public static class MyClass {
-    @JsonProperty
-    private int a;
-    @JsonProperty
-    private Map<String, String> map;
-    private long l;
-    private String shouldNotSerialize = "shouldNotSerialize";
-
-    //for jackson to instantiate
-    MyClass() {
-    }
-
-    MyClass(int a, Map<String, String> map, long l) {
-      this.a = a;
-      this.map = map;
-      this.l = l;
-    }
-
-    @JsonProperty
-    long getL() {
-      return l;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (this == o)
-        return true;
-      if (o == null || getClass() != o.getClass())
-        return false;
-
-      MyClass myClass = (MyClass) o;
-
-      if (a != myClass.a)
-        return false;
-      if (l != myClass.l)
-        return false;
-      if (!map.equals(myClass.map))
-        return false;
-      return shouldNotSerialize.equals(myClass.shouldNotSerialize);
-    }
-
-    @Override
-    public int hashCode() {
-      int result = a;
-      result = 31 * result + map.hashCode();
-      result = 31 * result + (int) (l ^ (l >>> 32));
-      result = 31 * result + shouldNotSerialize.hashCode();
-      return result;
-    }
-  }
-
-  @Test
-  public void shouldNotSerializePropertiesNotAnnotated() throws IOException, JSONException {
-    MyClass obj = new MyClass(Integer.MAX_VALUE, new HashMap<String, String>() {{
-      put("a", "a");
-      put("b", "b");
-    }}, Long.MAX_VALUE);
-    String json = JSONMessageDeserializer.mapper.writeValueAsString(obj);
-    JSONAssert.assertEquals(
-        "{\"a\":2147483647,\"map\":{\"b\":\"b\",\"a\":\"a\"},\"l\":9223372036854775807}", json,
-        false);
-  }
-
-  @Test
-  public void shouldDeserializeJsonStringToObject() throws IOException {
-    String json = "{\"a\":47,\"map\":{\"a\":\"a\",\"b\":\"a value for b\"},\"l\":98}";
-    MyClass actual = JSONMessageDeserializer.mapper.readValue(json, MyClass.class);
-    MyClass expected = new MyClass(47, new HashMap<String, String>() {{
-      put("a", "a");
-      put("b", "a value for b");
-    }}, 98L);
-    assertEquals(expected, actual);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java
index ec653ed..79ccc6b 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java
@@ -22,64 +22,52 @@ import org.apache.hadoop.hive.common.ValidTxnList;
 import org.junit.Assert;
 import org.junit.Test;
 
-import java.util.BitSet;
-
 public class TestValidCompactorTxnList {
 
   @Test
   public void minTxnHigh() {
-    BitSet bitSet = new BitSet(2);
-    bitSet.set(0, bitSet.length());
-    ValidTxnList txns = new ValidCompactorTxnList(new long[]{3, 4}, bitSet, 2);
+    ValidTxnList txns = new ValidCompactorTxnList(new long[]{3, 4}, 2);
     ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9);
     Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp);
   }
 
   @Test
   public void maxTxnLow() {
-    BitSet bitSet = new BitSet(2);
-    bitSet.set(0, bitSet.length());
-    ValidTxnList txns = new ValidCompactorTxnList(new long[]{13, 14}, bitSet, 12);
+    ValidTxnList txns = new ValidCompactorTxnList(new long[]{13, 14}, 12);
     ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9);
     Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp);
   }
 
   @Test
   public void minTxnHighNoExceptions() {
-    ValidTxnList txns = new ValidCompactorTxnList(new long[0], new BitSet(), 5);
+    ValidTxnList txns = new ValidCompactorTxnList(new long[0], 5);
     ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9);
     Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp);
   }
 
   @Test
   public void maxTxnLowNoExceptions() {
-    ValidTxnList txns = new ValidCompactorTxnList(new long[0], new BitSet(), 15);
+    ValidTxnList txns = new ValidCompactorTxnList(new long[0], 15);
     ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9);
     Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp);
   }
 
   @Test
   public void exceptionsAllBelow() {
-    BitSet bitSet = new BitSet(2);
-    bitSet.set(0, bitSet.length());
-    ValidTxnList txns = new ValidCompactorTxnList(new long[]{3, 6}, bitSet, 3);
+    ValidTxnList txns = new ValidCompactorTxnList(new long[]{3, 6}, 3);
     ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9);
     Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp);
   }
 
   @Test
   public void exceptionsInMidst() {
-    BitSet bitSet = new BitSet(1);
-    bitSet.set(0, bitSet.length());
-    ValidTxnList txns = new ValidCompactorTxnList(new long[]{8}, bitSet, 7);
+    ValidTxnList txns = new ValidCompactorTxnList(new long[]{8}, 7);
     ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9);
     Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp);
   }
   @Test
   public void exceptionsAbveHighWaterMark() {
-    BitSet bitSet = new BitSet(4);
-    bitSet.set(0, bitSet.length());
-    ValidTxnList txns = new ValidCompactorTxnList(new long[]{8, 11, 17, 29}, bitSet, 15);
+    ValidTxnList txns = new ValidCompactorTxnList(new long[]{8, 11, 17, 29}, 15);
     Assert.assertArrayEquals("", new long[]{8, 11}, txns.getInvalidTransactions());
     ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9);
     Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp);
@@ -89,19 +77,17 @@ public class TestValidCompactorTxnList {
 
   @Test
   public void writeToString() {
-    BitSet bitSet = new BitSet(4);
-    bitSet.set(0, bitSet.length());
-    ValidTxnList txns = new ValidCompactorTxnList(new long[]{7, 9, 10, Long.MAX_VALUE}, bitSet, 8);
-    Assert.assertEquals("8:" + Long.MAX_VALUE + ":7:", txns.writeToString());
+    ValidTxnList txns = new ValidCompactorTxnList(new long[]{9, 7, 10, Long.MAX_VALUE}, 8);
+    Assert.assertEquals("8:" + Long.MAX_VALUE + ":7", txns.writeToString());
     txns = new ValidCompactorTxnList();
-    Assert.assertEquals(Long.toString(Long.MAX_VALUE) + ":" + Long.MAX_VALUE + "::", txns.writeToString());
-    txns = new ValidCompactorTxnList(new long[0], new BitSet(), 23);
-    Assert.assertEquals("23:" + Long.MAX_VALUE + "::", txns.writeToString());
+    Assert.assertEquals(Long.toString(Long.MAX_VALUE) + ":" + Long.MAX_VALUE + ":", txns.writeToString());
+    txns = new ValidCompactorTxnList(new long[0], 23);
+    Assert.assertEquals("23:" + Long.MAX_VALUE + ":", txns.writeToString());
   }
 
   @Test
   public void readFromString() {
-    ValidCompactorTxnList txns = new ValidCompactorTxnList("37:" + Long.MAX_VALUE + "::7,9,10");
+    ValidCompactorTxnList txns = new ValidCompactorTxnList("37:" + Long.MAX_VALUE + ":7:9:10");
     Assert.assertEquals(37L, txns.getHighWatermark());
     Assert.assertEquals(Long.MAX_VALUE, txns.getMinOpenTxn());
     Assert.assertArrayEquals(new long[]{7L, 9L, 10L}, txns.getInvalidTransactions());
@@ -110,27 +96,4 @@ public class TestValidCompactorTxnList {
     Assert.assertEquals(Long.MAX_VALUE, txns.getMinOpenTxn());
     Assert.assertEquals(0, txns.getInvalidTransactions().length);
   }
-
-  @Test
-  public void testAbortedTxn() throws Exception {
-    ValidCompactorTxnList txnList = new ValidCompactorTxnList("5:4::1,2,3");
-    Assert.assertEquals(5L, txnList.getHighWatermark());
-    Assert.assertEquals(4, txnList.getMinOpenTxn());
-    Assert.assertArrayEquals(new long[]{1L, 2L, 3L}, txnList.getInvalidTransactions());
-  }
-
-  @Test
-  public void testAbortedRange() throws Exception {
-    ValidCompactorTxnList txnList = new ValidCompactorTxnList("11:4::5,6,7,8");
-    ValidTxnList.RangeResponse rsp = txnList.isTxnRangeAborted(1L, 3L);
-    Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp);
-    rsp = txnList.isTxnRangeAborted(9L, 10L);
-    Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp);
-    rsp = txnList.isTxnRangeAborted(6L, 7L);
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp);
-    rsp = txnList.isTxnRangeAborted(4L, 6L);
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME, rsp);
-    rsp = txnList.isTxnRangeAborted(6L, 13L);
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME, rsp);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/packaging/pom.xml
----------------------------------------------------------------------
diff --git a/packaging/pom.xml b/packaging/pom.xml
index beddd1c..a128036 100644
--- a/packaging/pom.xml
+++ b/packaging/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/packaging/src/main/assembly/src.xml
----------------------------------------------------------------------
diff --git a/packaging/src/main/assembly/src.xml b/packaging/src/main/assembly/src.xml
index 8626922..0529e90 100644
--- a/packaging/src/main/assembly/src.xml
+++ b/packaging/src/main/assembly/src.xml
@@ -67,6 +67,7 @@
         <include>contrib/**/*</include>
         <include>data/**/*</include>
         <include>dev-support/**/*</include>
+        <include>docs/**/*</include>
         <include>druid-handler/**/*</include>
         <include>jdbc-handler/**/*</include>
         <include>find-bugs/**/*</include>
@@ -96,7 +97,6 @@
         <include>spark-client/**/*</include>
         <include>storage-api/**/*</include>
         <include>testutils/**/*</include>
-        <include>vector-code-gen/**/*</include>
       </includes>
       <outputDirectory>/</outputDirectory>
     </fileSet>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index e0aae27..3ea3c77 100644
--- a/pom.xml
+++ b/pom.xml
@@ -17,11 +17,11 @@
   <parent>
     <groupId>org.apache</groupId>
     <artifactId>apache</artifactId>
-    <version>18</version>
+    <version>14</version>
   </parent>
   <groupId>org.apache.hive</groupId>
   <artifactId>hive</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>2.2.0-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <name>Hive</name>
@@ -61,12 +61,10 @@
   </modules>
 
   <properties>
-    <hive.version.shortname>3.0.0</hive.version.shortname>
+    <hive.version.shortname>2.2.0</hive.version.shortname>
 
     <!-- Build Properties -->
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    <maven.compiler.source>1.8</maven.compiler.source>
-    <maven.compiler.target>1.8</maven.compiler.target>
     <maven.compiler.useIncrementalCompilation>false</maven.compiler.useIncrementalCompilation>
     <maven.repo.local>${settings.localRepository}</maven.repo.local>
     <hive.path.to.root>.</hive.path.to.root>
@@ -92,11 +90,11 @@
     <!-- Plugin and Plugin Dependency Versions -->
     <ant.contrib.version>1.0b3</ant.contrib.version>
     <datanucleus.maven.plugin.version>3.3.0-release</datanucleus.maven.plugin.version>
-    <maven.test.jvm.args>-Xmx1024m</maven.test.jvm.args>
+    <maven.test.jvm.args>-Xmx1024m -XX:MaxPermSize=256M</maven.test.jvm.args>
     <maven.antrun.plugin.version>1.7</maven.antrun.plugin.version>
     <maven.assembly.plugin.version>2.3</maven.assembly.plugin.version>
     <maven.checkstyle.plugin.version>2.12.1</maven.checkstyle.plugin.version>
-    <maven.compiler.plugin.version>3.6.1</maven.compiler.plugin.version>
+    <maven.compiler.plugin.version>3.1</maven.compiler.plugin.version>
     <maven.enforcer.plugin.version>1.3.1</maven.enforcer.plugin.version>
     <maven.install.plugin.version>2.4</maven.install.plugin.version>
     <maven.jar.plugin.version>2.4</maven.jar.plugin.version>
@@ -115,10 +113,10 @@
     <antlr.version>3.5.2</antlr.version>
     <apache-directory-server.version>1.5.6</apache-directory-server.version>
     <apache-directory-clientapi.version>0.1</apache-directory-clientapi.version>
-    <avatica.version>1.9.0</avatica.version>
+    <avatica.version>1.8.0</avatica.version>
     <avro.version>1.7.7</avro.version>
     <bonecp.version>0.8.0.RELEASE</bonecp.version>
-    <calcite.version>1.12.0</calcite.version>
+    <calcite.version>1.10.0</calcite.version>
     <datanucleus-api-jdo.version>4.2.4</datanucleus-api-jdo.version>
     <datanucleus-core.version>4.1.17</datanucleus-core.version>
     <datanucleus-rdbms.version>4.1.19</datanucleus-rdbms.version>
@@ -141,16 +139,16 @@
     <guava.version>14.0.1</guava.version>
     <groovy.version>2.4.4</groovy.version>
     <h2database.version>1.3.166</h2database.version>
-    <hadoop.version>2.8.0</hadoop.version>
+    <hadoop.version>2.7.2</hadoop.version>
     <hadoop.bin.path>${basedir}/${hive.path.to.root}/testutils/hadoop</hadoop.bin.path>
     <hamcrest.version>1.1</hamcrest.version>
     <hbase.version>1.1.1</hbase.version>
     <!-- required for logging test to avoid including hbase which pulls disruptor transitively -->
     <disruptor.version>3.3.0</disruptor.version>
-    <hikaricp.version>2.6.1</hikaricp.version>
+    <hikaricp.version>2.5.1</hikaricp.version>
     <!-- httpcomponents are not always in version sync -->
-    <httpcomponents.client.version>4.5.2</httpcomponents.client.version>
-    <httpcomponents.core.version>4.4.4</httpcomponents.core.version>
+    <httpcomponents.client.version>4.4</httpcomponents.client.version>
+    <httpcomponents.core.version>4.4</httpcomponents.core.version>
     <ivy.version>2.4.0</ivy.version>
     <jackson.version>1.9.13</jackson.version>
     <!-- jackson 1 and 2 lines can coexist without issue, as they have different artifactIds -->
@@ -159,10 +157,10 @@
     <jamon.plugin.version>2.3.4</jamon.plugin.version>
     <jamon-runtime.version>2.3.1</jamon-runtime.version>
     <javaewah.version>0.3.2</javaewah.version>
-    <javax-servlet.version>3.1.0</javax-servlet.version>
+    <javax-servlet.version>3.0.0.v201112011016</javax-servlet.version>
     <javolution.version>5.5.1</javolution.version>
     <jdo-api.version>3.0.1</jdo-api.version>
-    <jetty.version>9.3.8.v20160314</jetty.version>
+    <jetty.version>7.6.0.v20120127</jetty.version>
     <jersey.version>1.14</jersey.version>
     <!-- Glassfish jersey is included for Spark client test only -->
     <glassfish.jersey.version>2.22.2</glassfish.jersey.version>
@@ -177,7 +175,7 @@
     <libthrift.version>0.9.3</libthrift.version>
     <log4j2.version>2.6.2</log4j2.version>
     <opencsv.version>2.3</opencsv.version>
-    <orc.version>1.3.3</orc.version>
+    <orc.version>1.3.1</orc.version>
     <mockito-all.version>1.9.5</mockito-all.version>
     <mina.version>2.0.0-M5</mina.version>
     <netty.version>4.0.29.Final</netty.version>
@@ -187,9 +185,9 @@
     <stax.version>1.0.1</stax.version>
     <slf4j.version>1.7.10</slf4j.version>
     <ST4.version>4.0.4</ST4.version>
-    <storage-api.version>3.0.0-SNAPSHOT</storage-api.version>
+    <storage-api.version>2.3.0-SNAPSHOT</storage-api.version>
     <tez.version>0.8.4</tez.version>
-    <slider.version>0.92.0-incubating</slider.version>
+    <slider.version>0.90.2-incubating</slider.version>
     <super-csv.version>2.2.0</super-csv.version>
     <spark.version>2.0.0</spark.version>
     <scala.binary.version>2.11</scala.binary.version>
@@ -610,31 +608,11 @@
         <version>${jackson.version}</version>
       </dependency>
       <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-rewrite</artifactId>
+        <groupId>org.eclipse.jetty.aggregate</groupId>
+        <artifactId>jetty-all-server</artifactId>
         <version>${jetty.version}</version>
       </dependency>
       <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-server</artifactId>
-        <version>${jetty.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-servlet</artifactId>
-        <version>${jetty.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-webapp</artifactId>
-        <version>${jetty.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>javax.servlet</groupId>
-        <artifactId>javax.servlet-api</artifactId>
-        <version>${javax-servlet.version}</version>
-      </dependency>
-      <dependency>
         <groupId>org.datanucleus</groupId>
         <artifactId>datanucleus-api-jdo</artifactId>
         <version>${datanucleus-api-jdo.version}</version>
@@ -684,24 +662,13 @@
             <artifactId>commons-logging</artifactId>
           </exclusion>
          </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-auth</artifactId>
-        <version>${hadoop.version}</version>
-         <exclusions>
-           <exclusion>
-            <groupId>commmons-logging</groupId>
-            <artifactId>commons-logging</artifactId>
-          </exclusion>
-         </exclusions>
-      </dependency>
+     </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-common</artifactId>
         <version>${hadoop.version}</version>
         <exclusions>
-          <exclusion>
+            <exclusion>
             <groupId>org.slf4j</groupId>
             <artifactId>slf4j-log4j12</artifactId>
           </exclusion>
@@ -741,21 +708,6 @@
      </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-client-common</artifactId>
-        <version>${hadoop.version}</version>
-         <exclusions>
-            <exclusion>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-log4j12</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>commmons-logging</groupId>
-            <artifactId>commons-logging</artifactId>
-          </exclusion>
-        </exclusions>
-     </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-core</artifactId>
         <version>${hadoop.version}</version>
          <exclusions>
@@ -768,43 +720,13 @@
             <artifactId>commons-logging</artifactId>
           </exclusion>
         </exclusions>
-      </dependency>
+     </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-minikdc</artifactId>
         <version>${hadoop.version}</version>
       </dependency>
       <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-api</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-client</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-common</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-registry</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-web-common</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-web-proxy</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
         <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-common</artifactId>
         <version>${hbase.version}</version>
@@ -881,6 +803,10 @@
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-compiler-plugin</artifactId>
           <version>${maven.compiler.plugin.version}</version>
+          <configuration>
+            <source>1.7</source>
+            <target>1.7</target>
+          </configuration>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
@@ -1180,12 +1106,12 @@
 	<version>0.10</version>
 	<configuration>
 	  <excludes>
-	    <exclude>binary-package-licenses/**</exclude>
 	    <exclude>data/**</exclude>
 	    <exclude>conf/**</exclude>
 	    <exclude>checkstyle/**</exclude>
 	    <exclude>bin/**</exclude>
 	    <exclude>itests/**</exclude>
+	    <exclude>docs/**</exclude>
             <exclude>**/README.md</exclude>
             <exclude>**/*.iml</exclude>
 	    <exclude>**/*.txt</exclude>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/pom.xml
----------------------------------------------------------------------
diff --git a/ql/pom.xml b/ql/pom.xml
index 40a216b..7db0ede 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -229,13 +229,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-registry</artifactId>
-      <version>${hadoop.version}</version>
-      <optional>true</optional>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-core</artifactId>
       <version>${hadoop.version}</version>
       <optional>true</optional>
@@ -386,22 +379,12 @@
           <groupId>com.fasterxml.jackson.core</groupId>
           <artifactId>jackson-core</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>org.apache.calcite.avatica</groupId>
-          <artifactId>avatica-core</artifactId>
-        </exclusion>
       </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.calcite</groupId>
       <artifactId>calcite-druid</artifactId>
       <version>${calcite.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.calcite.avatica</groupId>
-          <artifactId>avatica-core</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.calcite.avatica</groupId>
@@ -729,12 +712,6 @@
       <version>${glassfish.jersey.version}</version>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.hamcrest</groupId>
-      <artifactId>hamcrest-all</artifactId>
-      <version>${hamcrest.version}</version>
-      <scope>test</scope>
-    </dependency>
   </dependencies>
 
   <profiles>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
index 46cbb5b..4393c3b 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
@@ -471,7 +471,7 @@ public class <ClassName> extends VectorAggregateExpression {
   }     
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
     JavaDataModel model = JavaDataModel.get();
     return JavaDataModel.alignUp(
       model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
index 2261e1b..7468c2f 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
@@ -442,7 +442,7 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     @Override
-    public long getAggregationBufferFixedSize() {
+    public int getAggregationBufferFixedSize() {
     JavaDataModel model = JavaDataModel.get();
     return JavaDataModel.alignUp(
       model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
index 58d2d22..57b7ea5 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
@@ -458,7 +458,7 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     @Override
-    public long getAggregationBufferFixedSize() {
+    public int getAggregationBufferFixedSize() {
     JavaDataModel model = JavaDataModel.get();
     return JavaDataModel.alignUp(
       model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
index 515692e..749e97e 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
@@ -441,7 +441,7 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     @Override
-    public long getAggregationBufferFixedSize() {
+    public int getAggregationBufferFixedSize() {
     JavaDataModel model = JavaDataModel.get();
     return JavaDataModel.alignUp(
       model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
index c210e4c..9dfc147 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
@@ -81,7 +81,7 @@ public class <ClassName> extends VectorAggregateExpression {
       @Override
       public int getVariableSize() {
         JavaDataModel model = JavaDataModel.get();
-        return (int) model.lengthForByteArrayOfSize(bytes.length);
+        return model.lengthForByteArrayOfSize(bytes.length);
       }
 
       @Override
@@ -388,7 +388,7 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     @Override
-    public long getAggregationBufferFixedSize() {
+    public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
index 074aefd..32ecb34 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
@@ -443,7 +443,7 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     @Override
-    public long getAggregationBufferFixedSize() {
+    public int getAggregationBufferFixedSize() {
     JavaDataModel model = JavaDataModel.get();
     return JavaDataModel.alignUp(
       model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
index a89ae0a..bd0f14d 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
@@ -433,7 +433,7 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object(),

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
index 1e3516b..dc9d4b1 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
@@ -513,7 +513,7 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
index b3ec7e9..01062a9 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
@@ -467,7 +467,7 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object() +
@@ -488,4 +488,4 @@ public class <ClassName> extends VectorAggregateExpression {
   public void setInputExpression(VectorExpression inputExpression) {
     this.inputExpression = inputExpression;
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/Context.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index da1d3a5..758c536 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -356,7 +356,9 @@ public class Context {
 
       if (mkdir) {
         try {
-          if (!FileUtils.mkdir(fs, dir, conf)) {
+          boolean inheritPerms = HiveConf.getBoolVar(conf,
+              HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+          if (!FileUtils.mkdir(fs, dir, inheritPerms, conf)) {
             throw new IllegalStateException("Cannot create staging directory  '" + dir.toString() + "'");
           }
 
@@ -949,13 +951,6 @@ public class Context {
   public ExplainConfiguration getExplainConfig() {
     return explainConfig;
   }
-  private boolean isExplainPlan = false;
-  public boolean isExplainPlan() {
-    return isExplainPlan;
-  }
-  public void setExplainPlan(boolean t) {
-    this.isExplainPlan = t;
-  }
 
   public void setExplainConfig(ExplainConfiguration explainConfig) {
     this.explainConfig = explainConfig;


[18/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index a750a1c..4997c51 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -202,13 +202,6 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
    */
   public function drop_table_with_environment_context($dbname, $name, $deleteData, \metastore\EnvironmentContext $environment_context);
   /**
-   * @param string $dbName
-   * @param string $tableName
-   * @param string[] $partNames
-   * @throws \metastore\MetaException
-   */
-  public function truncate_table($dbName, $tableName, array $partNames);
-  /**
    * @param string $db_name
    * @param string $pattern
    * @return string[]
@@ -2595,59 +2588,6 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
     return;
   }
 
-  public function truncate_table($dbName, $tableName, array $partNames)
-  {
-    $this->send_truncate_table($dbName, $tableName, $partNames);
-    $this->recv_truncate_table();
-  }
-
-  public function send_truncate_table($dbName, $tableName, array $partNames)
-  {
-    $args = new \metastore\ThriftHiveMetastore_truncate_table_args();
-    $args->dbName = $dbName;
-    $args->tableName = $tableName;
-    $args->partNames = $partNames;
-    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
-    if ($bin_accel)
-    {
-      thrift_protocol_write_binary($this->output_, 'truncate_table', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
-    }
-    else
-    {
-      $this->output_->writeMessageBegin('truncate_table', TMessageType::CALL, $this->seqid_);
-      $args->write($this->output_);
-      $this->output_->writeMessageEnd();
-      $this->output_->getTransport()->flush();
-    }
-  }
-
-  public function recv_truncate_table()
-  {
-    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
-    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_truncate_table_result', $this->input_->isStrictRead());
-    else
-    {
-      $rseqid = 0;
-      $fname = null;
-      $mtype = 0;
-
-      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
-      if ($mtype == TMessageType::EXCEPTION) {
-        $x = new TApplicationException();
-        $x->read($this->input_);
-        $this->input_->readMessageEnd();
-        throw $x;
-      }
-      $result = new \metastore\ThriftHiveMetastore_truncate_table_result();
-      $result->read($this->input_);
-      $this->input_->readMessageEnd();
-    }
-    if ($result->o1 !== null) {
-      throw $result->o1;
-    }
-    return;
-  }
-
   public function get_tables($db_name, $pattern)
   {
     $this->send_get_tables($db_name, $pattern);
@@ -11316,14 +11256,14 @@ class ThriftHiveMetastore_get_databases_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size603 = 0;
-            $_etype606 = 0;
-            $xfer += $input->readListBegin($_etype606, $_size603);
-            for ($_i607 = 0; $_i607 < $_size603; ++$_i607)
+            $_size604 = 0;
+            $_etype607 = 0;
+            $xfer += $input->readListBegin($_etype607, $_size604);
+            for ($_i608 = 0; $_i608 < $_size604; ++$_i608)
             {
-              $elem608 = null;
-              $xfer += $input->readString($elem608);
-              $this->success []= $elem608;
+              $elem609 = null;
+              $xfer += $input->readString($elem609);
+              $this->success []= $elem609;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -11359,9 +11299,9 @@ class ThriftHiveMetastore_get_databases_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter609)
+          foreach ($this->success as $iter610)
           {
-            $xfer += $output->writeString($iter609);
+            $xfer += $output->writeString($iter610);
           }
         }
         $output->writeListEnd();
@@ -11492,14 +11432,14 @@ class ThriftHiveMetastore_get_all_databases_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size610 = 0;
-            $_etype613 = 0;
-            $xfer += $input->readListBegin($_etype613, $_size610);
-            for ($_i614 = 0; $_i614 < $_size610; ++$_i614)
+            $_size611 = 0;
+            $_etype614 = 0;
+            $xfer += $input->readListBegin($_etype614, $_size611);
+            for ($_i615 = 0; $_i615 < $_size611; ++$_i615)
             {
-              $elem615 = null;
-              $xfer += $input->readString($elem615);
-              $this->success []= $elem615;
+              $elem616 = null;
+              $xfer += $input->readString($elem616);
+              $this->success []= $elem616;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -11535,9 +11475,9 @@ class ThriftHiveMetastore_get_all_databases_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter616)
+          foreach ($this->success as $iter617)
           {
-            $xfer += $output->writeString($iter616);
+            $xfer += $output->writeString($iter617);
           }
         }
         $output->writeListEnd();
@@ -12538,18 +12478,18 @@ class ThriftHiveMetastore_get_type_all_result {
         case 0:
           if ($ftype == TType::MAP) {
             $this->success = array();
-            $_size617 = 0;
-            $_ktype618 = 0;
-            $_vtype619 = 0;
-            $xfer += $input->readMapBegin($_ktype618, $_vtype619, $_size617);
-            for ($_i621 = 0; $_i621 < $_size617; ++$_i621)
+            $_size618 = 0;
+            $_ktype619 = 0;
+            $_vtype620 = 0;
+            $xfer += $input->readMapBegin($_ktype619, $_vtype620, $_size618);
+            for ($_i622 = 0; $_i622 < $_size618; ++$_i622)
             {
-              $key622 = '';
-              $val623 = new \metastore\Type();
-              $xfer += $input->readString($key622);
-              $val623 = new \metastore\Type();
-              $xfer += $val623->read($input);
-              $this->success[$key622] = $val623;
+              $key623 = '';
+              $val624 = new \metastore\Type();
+              $xfer += $input->readString($key623);
+              $val624 = new \metastore\Type();
+              $xfer += $val624->read($input);
+              $this->success[$key623] = $val624;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -12585,10 +12525,10 @@ class ThriftHiveMetastore_get_type_all_result {
       {
         $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $kiter624 => $viter625)
+          foreach ($this->success as $kiter625 => $viter626)
           {
-            $xfer += $output->writeString($kiter624);
-            $xfer += $viter625->write($output);
+            $xfer += $output->writeString($kiter625);
+            $xfer += $viter626->write($output);
           }
         }
         $output->writeMapEnd();
@@ -12792,15 +12732,15 @@ class ThriftHiveMetastore_get_fields_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size626 = 0;
-            $_etype629 = 0;
-            $xfer += $input->readListBegin($_etype629, $_size626);
-            for ($_i630 = 0; $_i630 < $_size626; ++$_i630)
+            $_size627 = 0;
+            $_etype630 = 0;
+            $xfer += $input->readListBegin($_etype630, $_size627);
+            for ($_i631 = 0; $_i631 < $_size627; ++$_i631)
             {
-              $elem631 = null;
-              $elem631 = new \metastore\FieldSchema();
-              $xfer += $elem631->read($input);
-              $this->success []= $elem631;
+              $elem632 = null;
+              $elem632 = new \metastore\FieldSchema();
+              $xfer += $elem632->read($input);
+              $this->success []= $elem632;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -12852,9 +12792,9 @@ class ThriftHiveMetastore_get_fields_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter632)
+          foreach ($this->success as $iter633)
           {
-            $xfer += $iter632->write($output);
+            $xfer += $iter633->write($output);
           }
         }
         $output->writeListEnd();
@@ -13096,15 +13036,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size633 = 0;
-            $_etype636 = 0;
-            $xfer += $input->readListBegin($_etype636, $_size633);
-            for ($_i637 = 0; $_i637 < $_size633; ++$_i637)
+            $_size634 = 0;
+            $_etype637 = 0;
+            $xfer += $input->readListBegin($_etype637, $_size634);
+            for ($_i638 = 0; $_i638 < $_size634; ++$_i638)
             {
-              $elem638 = null;
-              $elem638 = new \metastore\FieldSchema();
-              $xfer += $elem638->read($input);
-              $this->success []= $elem638;
+              $elem639 = null;
+              $elem639 = new \metastore\FieldSchema();
+              $xfer += $elem639->read($input);
+              $this->success []= $elem639;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13156,9 +13096,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter639)
+          foreach ($this->success as $iter640)
           {
-            $xfer += $iter639->write($output);
+            $xfer += $iter640->write($output);
           }
         }
         $output->writeListEnd();
@@ -13372,15 +13312,15 @@ class ThriftHiveMetastore_get_schema_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size640 = 0;
-            $_etype643 = 0;
-            $xfer += $input->readListBegin($_etype643, $_size640);
-            for ($_i644 = 0; $_i644 < $_size640; ++$_i644)
+            $_size641 = 0;
+            $_etype644 = 0;
+            $xfer += $input->readListBegin($_etype644, $_size641);
+            for ($_i645 = 0; $_i645 < $_size641; ++$_i645)
             {
-              $elem645 = null;
-              $elem645 = new \metastore\FieldSchema();
-              $xfer += $elem645->read($input);
-              $this->success []= $elem645;
+              $elem646 = null;
+              $elem646 = new \metastore\FieldSchema();
+              $xfer += $elem646->read($input);
+              $this->success []= $elem646;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13432,9 +13372,9 @@ class ThriftHiveMetastore_get_schema_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter646)
+          foreach ($this->success as $iter647)
           {
-            $xfer += $iter646->write($output);
+            $xfer += $iter647->write($output);
           }
         }
         $output->writeListEnd();
@@ -13676,15 +13616,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size647 = 0;
-            $_etype650 = 0;
-            $xfer += $input->readListBegin($_etype650, $_size647);
-            for ($_i651 = 0; $_i651 < $_size647; ++$_i651)
+            $_size648 = 0;
+            $_etype651 = 0;
+            $xfer += $input->readListBegin($_etype651, $_size648);
+            for ($_i652 = 0; $_i652 < $_size648; ++$_i652)
             {
-              $elem652 = null;
-              $elem652 = new \metastore\FieldSchema();
-              $xfer += $elem652->read($input);
-              $this->success []= $elem652;
+              $elem653 = null;
+              $elem653 = new \metastore\FieldSchema();
+              $xfer += $elem653->read($input);
+              $this->success []= $elem653;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13736,9 +13676,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter653)
+          foreach ($this->success as $iter654)
           {
-            $xfer += $iter653->write($output);
+            $xfer += $iter654->write($output);
           }
         }
         $output->writeListEnd();
@@ -14346,15 +14286,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->primaryKeys = array();
-            $_size654 = 0;
-            $_etype657 = 0;
-            $xfer += $input->readListBegin($_etype657, $_size654);
-            for ($_i658 = 0; $_i658 < $_size654; ++$_i658)
+            $_size655 = 0;
+            $_etype658 = 0;
+            $xfer += $input->readListBegin($_etype658, $_size655);
+            for ($_i659 = 0; $_i659 < $_size655; ++$_i659)
             {
-              $elem659 = null;
-              $elem659 = new \metastore\SQLPrimaryKey();
-              $xfer += $elem659->read($input);
-              $this->primaryKeys []= $elem659;
+              $elem660 = null;
+              $elem660 = new \metastore\SQLPrimaryKey();
+              $xfer += $elem660->read($input);
+              $this->primaryKeys []= $elem660;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -14364,15 +14304,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->foreignKeys = array();
-            $_size660 = 0;
-            $_etype663 = 0;
-            $xfer += $input->readListBegin($_etype663, $_size660);
-            for ($_i664 = 0; $_i664 < $_size660; ++$_i664)
+            $_size661 = 0;
+            $_etype664 = 0;
+            $xfer += $input->readListBegin($_etype664, $_size661);
+            for ($_i665 = 0; $_i665 < $_size661; ++$_i665)
             {
-              $elem665 = null;
-              $elem665 = new \metastore\SQLForeignKey();
-              $xfer += $elem665->read($input);
-              $this->foreignKeys []= $elem665;
+              $elem666 = null;
+              $elem666 = new \metastore\SQLForeignKey();
+              $xfer += $elem666->read($input);
+              $this->foreignKeys []= $elem666;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -14408,9 +14348,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->primaryKeys));
         {
-          foreach ($this->primaryKeys as $iter666)
+          foreach ($this->primaryKeys as $iter667)
           {
-            $xfer += $iter666->write($output);
+            $xfer += $iter667->write($output);
           }
         }
         $output->writeListEnd();
@@ -14425,9 +14365,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->foreignKeys));
         {
-          foreach ($this->foreignKeys as $iter667)
+          foreach ($this->foreignKeys as $iter668)
           {
-            $xfer += $iter667->write($output);
+            $xfer += $iter668->write($output);
           }
         }
         $output->writeListEnd();
@@ -15613,230 +15553,6 @@ class ThriftHiveMetastore_drop_table_with_environment_context_result {
 
 }
 
-class ThriftHiveMetastore_truncate_table_args {
-  static $_TSPEC;
-
-  /**
-   * @var string
-   */
-  public $dbName = null;
-  /**
-   * @var string
-   */
-  public $tableName = null;
-  /**
-   * @var string[]
-   */
-  public $partNames = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'dbName',
-          'type' => TType::STRING,
-          ),
-        2 => array(
-          'var' => 'tableName',
-          'type' => TType::STRING,
-          ),
-        3 => array(
-          'var' => 'partNames',
-          'type' => TType::LST,
-          'etype' => TType::STRING,
-          'elem' => array(
-            'type' => TType::STRING,
-            ),
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['dbName'])) {
-        $this->dbName = $vals['dbName'];
-      }
-      if (isset($vals['tableName'])) {
-        $this->tableName = $vals['tableName'];
-      }
-      if (isset($vals['partNames'])) {
-        $this->partNames = $vals['partNames'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'ThriftHiveMetastore_truncate_table_args';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->dbName);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 2:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->tableName);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 3:
-          if ($ftype == TType::LST) {
-            $this->partNames = array();
-            $_size668 = 0;
-            $_etype671 = 0;
-            $xfer += $input->readListBegin($_etype671, $_size668);
-            for ($_i672 = 0; $_i672 < $_size668; ++$_i672)
-            {
-              $elem673 = null;
-              $xfer += $input->readString($elem673);
-              $this->partNames []= $elem673;
-            }
-            $xfer += $input->readListEnd();
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('ThriftHiveMetastore_truncate_table_args');
-    if ($this->dbName !== null) {
-      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
-      $xfer += $output->writeString($this->dbName);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->tableName !== null) {
-      $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2);
-      $xfer += $output->writeString($this->tableName);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->partNames !== null) {
-      if (!is_array($this->partNames)) {
-        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
-      }
-      $xfer += $output->writeFieldBegin('partNames', TType::LST, 3);
-      {
-        $output->writeListBegin(TType::STRING, count($this->partNames));
-        {
-          foreach ($this->partNames as $iter674)
-          {
-            $xfer += $output->writeString($iter674);
-          }
-        }
-        $output->writeListEnd();
-      }
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
-class ThriftHiveMetastore_truncate_table_result {
-  static $_TSPEC;
-
-  /**
-   * @var \metastore\MetaException
-   */
-  public $o1 = null;
-
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'o1',
-          'type' => TType::STRUCT,
-          'class' => '\metastore\MetaException',
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['o1'])) {
-        $this->o1 = $vals['o1'];
-      }
-    }
-  }
-
-  public function getName() {
-    return 'ThriftHiveMetastore_truncate_table_result';
-  }
-
-  public function read($input)
-  {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
-    {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::STRUCT) {
-            $this->o1 = new \metastore\MetaException();
-            $xfer += $this->o1->read($input);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
-    }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('ThriftHiveMetastore_truncate_table_result');
-    if ($this->o1 !== null) {
-      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
-      $xfer += $this->o1->write($output);
-      $xfer += $output->writeFieldEnd();
-    }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
-  }
-
-}
-
 class ThriftHiveMetastore_get_tables_args {
   static $_TSPEC;
 
@@ -15997,14 +15713,14 @@ class ThriftHiveMetastore_get_tables_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size675 = 0;
-            $_etype678 = 0;
-            $xfer += $input->readListBegin($_etype678, $_size675);
-            for ($_i679 = 0; $_i679 < $_size675; ++$_i679)
+            $_size669 = 0;
+            $_etype672 = 0;
+            $xfer += $input->readListBegin($_etype672, $_size669);
+            for ($_i673 = 0; $_i673 < $_size669; ++$_i673)
             {
-              $elem680 = null;
-              $xfer += $input->readString($elem680);
-              $this->success []= $elem680;
+              $elem674 = null;
+              $xfer += $input->readString($elem674);
+              $this->success []= $elem674;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16040,9 +15756,9 @@ class ThriftHiveMetastore_get_tables_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter681)
+          foreach ($this->success as $iter675)
           {
-            $xfer += $output->writeString($iter681);
+            $xfer += $output->writeString($iter675);
           }
         }
         $output->writeListEnd();
@@ -16244,14 +15960,14 @@ class ThriftHiveMetastore_get_tables_by_type_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size682 = 0;
-            $_etype685 = 0;
-            $xfer += $input->readListBegin($_etype685, $_size682);
-            for ($_i686 = 0; $_i686 < $_size682; ++$_i686)
+            $_size676 = 0;
+            $_etype679 = 0;
+            $xfer += $input->readListBegin($_etype679, $_size676);
+            for ($_i680 = 0; $_i680 < $_size676; ++$_i680)
             {
-              $elem687 = null;
-              $xfer += $input->readString($elem687);
-              $this->success []= $elem687;
+              $elem681 = null;
+              $xfer += $input->readString($elem681);
+              $this->success []= $elem681;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16287,9 +16003,9 @@ class ThriftHiveMetastore_get_tables_by_type_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter688)
+          foreach ($this->success as $iter682)
           {
-            $xfer += $output->writeString($iter688);
+            $xfer += $output->writeString($iter682);
           }
         }
         $output->writeListEnd();
@@ -16394,14 +16110,14 @@ class ThriftHiveMetastore_get_table_meta_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->tbl_types = array();
-            $_size689 = 0;
-            $_etype692 = 0;
-            $xfer += $input->readListBegin($_etype692, $_size689);
-            for ($_i693 = 0; $_i693 < $_size689; ++$_i693)
+            $_size683 = 0;
+            $_etype686 = 0;
+            $xfer += $input->readListBegin($_etype686, $_size683);
+            for ($_i687 = 0; $_i687 < $_size683; ++$_i687)
             {
-              $elem694 = null;
-              $xfer += $input->readString($elem694);
-              $this->tbl_types []= $elem694;
+              $elem688 = null;
+              $xfer += $input->readString($elem688);
+              $this->tbl_types []= $elem688;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16439,9 +16155,9 @@ class ThriftHiveMetastore_get_table_meta_args {
       {
         $output->writeListBegin(TType::STRING, count($this->tbl_types));
         {
-          foreach ($this->tbl_types as $iter695)
+          foreach ($this->tbl_types as $iter689)
           {
-            $xfer += $output->writeString($iter695);
+            $xfer += $output->writeString($iter689);
           }
         }
         $output->writeListEnd();
@@ -16518,15 +16234,15 @@ class ThriftHiveMetastore_get_table_meta_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size696 = 0;
-            $_etype699 = 0;
-            $xfer += $input->readListBegin($_etype699, $_size696);
-            for ($_i700 = 0; $_i700 < $_size696; ++$_i700)
+            $_size690 = 0;
+            $_etype693 = 0;
+            $xfer += $input->readListBegin($_etype693, $_size690);
+            for ($_i694 = 0; $_i694 < $_size690; ++$_i694)
             {
-              $elem701 = null;
-              $elem701 = new \metastore\TableMeta();
-              $xfer += $elem701->read($input);
-              $this->success []= $elem701;
+              $elem695 = null;
+              $elem695 = new \metastore\TableMeta();
+              $xfer += $elem695->read($input);
+              $this->success []= $elem695;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16562,9 +16278,9 @@ class ThriftHiveMetastore_get_table_meta_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter702)
+          foreach ($this->success as $iter696)
           {
-            $xfer += $iter702->write($output);
+            $xfer += $iter696->write($output);
           }
         }
         $output->writeListEnd();
@@ -16720,14 +16436,14 @@ class ThriftHiveMetastore_get_all_tables_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size703 = 0;
-            $_etype706 = 0;
-            $xfer += $input->readListBegin($_etype706, $_size703);
-            for ($_i707 = 0; $_i707 < $_size703; ++$_i707)
+            $_size697 = 0;
+            $_etype700 = 0;
+            $xfer += $input->readListBegin($_etype700, $_size697);
+            for ($_i701 = 0; $_i701 < $_size697; ++$_i701)
             {
-              $elem708 = null;
-              $xfer += $input->readString($elem708);
-              $this->success []= $elem708;
+              $elem702 = null;
+              $xfer += $input->readString($elem702);
+              $this->success []= $elem702;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16763,9 +16479,9 @@ class ThriftHiveMetastore_get_all_tables_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter709)
+          foreach ($this->success as $iter703)
           {
-            $xfer += $output->writeString($iter709);
+            $xfer += $output->writeString($iter703);
           }
         }
         $output->writeListEnd();
@@ -17080,14 +16796,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->tbl_names = array();
-            $_size710 = 0;
-            $_etype713 = 0;
-            $xfer += $input->readListBegin($_etype713, $_size710);
-            for ($_i714 = 0; $_i714 < $_size710; ++$_i714)
+            $_size704 = 0;
+            $_etype707 = 0;
+            $xfer += $input->readListBegin($_etype707, $_size704);
+            for ($_i708 = 0; $_i708 < $_size704; ++$_i708)
             {
-              $elem715 = null;
-              $xfer += $input->readString($elem715);
-              $this->tbl_names []= $elem715;
+              $elem709 = null;
+              $xfer += $input->readString($elem709);
+              $this->tbl_names []= $elem709;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17120,9 +16836,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
       {
         $output->writeListBegin(TType::STRING, count($this->tbl_names));
         {
-          foreach ($this->tbl_names as $iter716)
+          foreach ($this->tbl_names as $iter710)
           {
-            $xfer += $output->writeString($iter716);
+            $xfer += $output->writeString($iter710);
           }
         }
         $output->writeListEnd();
@@ -17187,15 +16903,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size717 = 0;
-            $_etype720 = 0;
-            $xfer += $input->readListBegin($_etype720, $_size717);
-            for ($_i721 = 0; $_i721 < $_size717; ++$_i721)
+            $_size711 = 0;
+            $_etype714 = 0;
+            $xfer += $input->readListBegin($_etype714, $_size711);
+            for ($_i715 = 0; $_i715 < $_size711; ++$_i715)
             {
-              $elem722 = null;
-              $elem722 = new \metastore\Table();
-              $xfer += $elem722->read($input);
-              $this->success []= $elem722;
+              $elem716 = null;
+              $elem716 = new \metastore\Table();
+              $xfer += $elem716->read($input);
+              $this->success []= $elem716;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17223,9 +16939,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter723)
+          foreach ($this->success as $iter717)
           {
-            $xfer += $iter723->write($output);
+            $xfer += $iter717->write($output);
           }
         }
         $output->writeListEnd();
@@ -17891,14 +17607,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size724 = 0;
-            $_etype727 = 0;
-            $xfer += $input->readListBegin($_etype727, $_size724);
-            for ($_i728 = 0; $_i728 < $_size724; ++$_i728)
+            $_size718 = 0;
+            $_etype721 = 0;
+            $xfer += $input->readListBegin($_etype721, $_size718);
+            for ($_i722 = 0; $_i722 < $_size718; ++$_i722)
             {
-              $elem729 = null;
-              $xfer += $input->readString($elem729);
-              $this->success []= $elem729;
+              $elem723 = null;
+              $xfer += $input->readString($elem723);
+              $this->success []= $elem723;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17950,9 +17666,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter730)
+          foreach ($this->success as $iter724)
           {
-            $xfer += $output->writeString($iter730);
+            $xfer += $output->writeString($iter724);
           }
         }
         $output->writeListEnd();
@@ -19265,15 +18981,15 @@ class ThriftHiveMetastore_add_partitions_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size731 = 0;
-            $_etype734 = 0;
-            $xfer += $input->readListBegin($_etype734, $_size731);
-            for ($_i735 = 0; $_i735 < $_size731; ++$_i735)
+            $_size725 = 0;
+            $_etype728 = 0;
+            $xfer += $input->readListBegin($_etype728, $_size725);
+            for ($_i729 = 0; $_i729 < $_size725; ++$_i729)
             {
-              $elem736 = null;
-              $elem736 = new \metastore\Partition();
-              $xfer += $elem736->read($input);
-              $this->new_parts []= $elem736;
+              $elem730 = null;
+              $elem730 = new \metastore\Partition();
+              $xfer += $elem730->read($input);
+              $this->new_parts []= $elem730;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19301,9 +19017,9 @@ class ThriftHiveMetastore_add_partitions_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter737)
+          foreach ($this->new_parts as $iter731)
           {
-            $xfer += $iter737->write($output);
+            $xfer += $iter731->write($output);
           }
         }
         $output->writeListEnd();
@@ -19518,15 +19234,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size738 = 0;
-            $_etype741 = 0;
-            $xfer += $input->readListBegin($_etype741, $_size738);
-            for ($_i742 = 0; $_i742 < $_size738; ++$_i742)
+            $_size732 = 0;
+            $_etype735 = 0;
+            $xfer += $input->readListBegin($_etype735, $_size732);
+            for ($_i736 = 0; $_i736 < $_size732; ++$_i736)
             {
-              $elem743 = null;
-              $elem743 = new \metastore\PartitionSpec();
-              $xfer += $elem743->read($input);
-              $this->new_parts []= $elem743;
+              $elem737 = null;
+              $elem737 = new \metastore\PartitionSpec();
+              $xfer += $elem737->read($input);
+              $this->new_parts []= $elem737;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19554,9 +19270,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter744)
+          foreach ($this->new_parts as $iter738)
           {
-            $xfer += $iter744->write($output);
+            $xfer += $iter738->write($output);
           }
         }
         $output->writeListEnd();
@@ -19806,14 +19522,14 @@ class ThriftHiveMetastore_append_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size745 = 0;
-            $_etype748 = 0;
-            $xfer += $input->readListBegin($_etype748, $_size745);
-            for ($_i749 = 0; $_i749 < $_size745; ++$_i749)
+            $_size739 = 0;
+            $_etype742 = 0;
+            $xfer += $input->readListBegin($_etype742, $_size739);
+            for ($_i743 = 0; $_i743 < $_size739; ++$_i743)
             {
-              $elem750 = null;
-              $xfer += $input->readString($elem750);
-              $this->part_vals []= $elem750;
+              $elem744 = null;
+              $xfer += $input->readString($elem744);
+              $this->part_vals []= $elem744;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19851,9 +19567,9 @@ class ThriftHiveMetastore_append_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter751)
+          foreach ($this->part_vals as $iter745)
           {
-            $xfer += $output->writeString($iter751);
+            $xfer += $output->writeString($iter745);
           }
         }
         $output->writeListEnd();
@@ -20355,14 +20071,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size752 = 0;
-            $_etype755 = 0;
-            $xfer += $input->readListBegin($_etype755, $_size752);
-            for ($_i756 = 0; $_i756 < $_size752; ++$_i756)
+            $_size746 = 0;
+            $_etype749 = 0;
+            $xfer += $input->readListBegin($_etype749, $_size746);
+            for ($_i750 = 0; $_i750 < $_size746; ++$_i750)
             {
-              $elem757 = null;
-              $xfer += $input->readString($elem757);
-              $this->part_vals []= $elem757;
+              $elem751 = null;
+              $xfer += $input->readString($elem751);
+              $this->part_vals []= $elem751;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -20408,9 +20124,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter758)
+          foreach ($this->part_vals as $iter752)
           {
-            $xfer += $output->writeString($iter758);
+            $xfer += $output->writeString($iter752);
           }
         }
         $output->writeListEnd();
@@ -21264,14 +20980,14 @@ class ThriftHiveMetastore_drop_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size759 = 0;
-            $_etype762 = 0;
-            $xfer += $input->readListBegin($_etype762, $_size759);
-            for ($_i763 = 0; $_i763 < $_size759; ++$_i763)
+            $_size753 = 0;
+            $_etype756 = 0;
+            $xfer += $input->readListBegin($_etype756, $_size753);
+            for ($_i757 = 0; $_i757 < $_size753; ++$_i757)
             {
-              $elem764 = null;
-              $xfer += $input->readString($elem764);
-              $this->part_vals []= $elem764;
+              $elem758 = null;
+              $xfer += $input->readString($elem758);
+              $this->part_vals []= $elem758;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21316,9 +21032,9 @@ class ThriftHiveMetastore_drop_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter765)
+          foreach ($this->part_vals as $iter759)
           {
-            $xfer += $output->writeString($iter765);
+            $xfer += $output->writeString($iter759);
           }
         }
         $output->writeListEnd();
@@ -21571,14 +21287,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size766 = 0;
-            $_etype769 = 0;
-            $xfer += $input->readListBegin($_etype769, $_size766);
-            for ($_i770 = 0; $_i770 < $_size766; ++$_i770)
+            $_size760 = 0;
+            $_etype763 = 0;
+            $xfer += $input->readListBegin($_etype763, $_size760);
+            for ($_i764 = 0; $_i764 < $_size760; ++$_i764)
             {
-              $elem771 = null;
-              $xfer += $input->readString($elem771);
-              $this->part_vals []= $elem771;
+              $elem765 = null;
+              $xfer += $input->readString($elem765);
+              $this->part_vals []= $elem765;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21631,9 +21347,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter772)
+          foreach ($this->part_vals as $iter766)
           {
-            $xfer += $output->writeString($iter772);
+            $xfer += $output->writeString($iter766);
           }
         }
         $output->writeListEnd();
@@ -22647,14 +22363,14 @@ class ThriftHiveMetastore_get_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size773 = 0;
-            $_etype776 = 0;
-            $xfer += $input->readListBegin($_etype776, $_size773);
-            for ($_i777 = 0; $_i777 < $_size773; ++$_i777)
+            $_size767 = 0;
+            $_etype770 = 0;
+            $xfer += $input->readListBegin($_etype770, $_size767);
+            for ($_i771 = 0; $_i771 < $_size767; ++$_i771)
             {
-              $elem778 = null;
-              $xfer += $input->readString($elem778);
-              $this->part_vals []= $elem778;
+              $elem772 = null;
+              $xfer += $input->readString($elem772);
+              $this->part_vals []= $elem772;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22692,9 +22408,9 @@ class ThriftHiveMetastore_get_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter779)
+          foreach ($this->part_vals as $iter773)
           {
-            $xfer += $output->writeString($iter779);
+            $xfer += $output->writeString($iter773);
           }
         }
         $output->writeListEnd();
@@ -22936,17 +22652,17 @@ class ThriftHiveMetastore_exchange_partition_args {
         case 1:
           if ($ftype == TType::MAP) {
             $this->partitionSpecs = array();
-            $_size780 = 0;
-            $_ktype781 = 0;
-            $_vtype782 = 0;
-            $xfer += $input->readMapBegin($_ktype781, $_vtype782, $_size780);
-            for ($_i784 = 0; $_i784 < $_size780; ++$_i784)
+            $_size774 = 0;
+            $_ktype775 = 0;
+            $_vtype776 = 0;
+            $xfer += $input->readMapBegin($_ktype775, $_vtype776, $_size774);
+            for ($_i778 = 0; $_i778 < $_size774; ++$_i778)
             {
-              $key785 = '';
-              $val786 = '';
-              $xfer += $input->readString($key785);
-              $xfer += $input->readString($val786);
-              $this->partitionSpecs[$key785] = $val786;
+              $key779 = '';
+              $val780 = '';
+              $xfer += $input->readString($key779);
+              $xfer += $input->readString($val780);
+              $this->partitionSpecs[$key779] = $val780;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -23002,10 +22718,10 @@ class ThriftHiveMetastore_exchange_partition_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
         {
-          foreach ($this->partitionSpecs as $kiter787 => $viter788)
+          foreach ($this->partitionSpecs as $kiter781 => $viter782)
           {
-            $xfer += $output->writeString($kiter787);
-            $xfer += $output->writeString($viter788);
+            $xfer += $output->writeString($kiter781);
+            $xfer += $output->writeString($viter782);
           }
         }
         $output->writeMapEnd();
@@ -23317,17 +23033,17 @@ class ThriftHiveMetastore_exchange_partitions_args {
         case 1:
           if ($ftype == TType::MAP) {
             $this->partitionSpecs = array();
-            $_size789 = 0;
-            $_ktype790 = 0;
-            $_vtype791 = 0;
-            $xfer += $input->readMapBegin($_ktype790, $_vtype791, $_size789);
-            for ($_i793 = 0; $_i793 < $_size789; ++$_i793)
+            $_size783 = 0;
+            $_ktype784 = 0;
+            $_vtype785 = 0;
+            $xfer += $input->readMapBegin($_ktype784, $_vtype785, $_size783);
+            for ($_i787 = 0; $_i787 < $_size783; ++$_i787)
             {
-              $key794 = '';
-              $val795 = '';
-              $xfer += $input->readString($key794);
-              $xfer += $input->readString($val795);
-              $this->partitionSpecs[$key794] = $val795;
+              $key788 = '';
+              $val789 = '';
+              $xfer += $input->readString($key788);
+              $xfer += $input->readString($val789);
+              $this->partitionSpecs[$key788] = $val789;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -23383,10 +23099,10 @@ class ThriftHiveMetastore_exchange_partitions_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
         {
-          foreach ($this->partitionSpecs as $kiter796 => $viter797)
+          foreach ($this->partitionSpecs as $kiter790 => $viter791)
           {
-            $xfer += $output->writeString($kiter796);
-            $xfer += $output->writeString($viter797);
+            $xfer += $output->writeString($kiter790);
+            $xfer += $output->writeString($viter791);
           }
         }
         $output->writeMapEnd();
@@ -23519,15 +23235,15 @@ class ThriftHiveMetastore_exchange_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size798 = 0;
-            $_etype801 = 0;
-            $xfer += $input->readListBegin($_etype801, $_size798);
-            for ($_i802 = 0; $_i802 < $_size798; ++$_i802)
+            $_size792 = 0;
+            $_etype795 = 0;
+            $xfer += $input->readListBegin($_etype795, $_size792);
+            for ($_i796 = 0; $_i796 < $_size792; ++$_i796)
             {
-              $elem803 = null;
-              $elem803 = new \metastore\Partition();
-              $xfer += $elem803->read($input);
-              $this->success []= $elem803;
+              $elem797 = null;
+              $elem797 = new \metastore\Partition();
+              $xfer += $elem797->read($input);
+              $this->success []= $elem797;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23587,9 +23303,9 @@ class ThriftHiveMetastore_exchange_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter804)
+          foreach ($this->success as $iter798)
           {
-            $xfer += $iter804->write($output);
+            $xfer += $iter798->write($output);
           }
         }
         $output->writeListEnd();
@@ -23735,14 +23451,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size805 = 0;
-            $_etype808 = 0;
-            $xfer += $input->readListBegin($_etype808, $_size805);
-            for ($_i809 = 0; $_i809 < $_size805; ++$_i809)
+            $_size799 = 0;
+            $_etype802 = 0;
+            $xfer += $input->readListBegin($_etype802, $_size799);
+            for ($_i803 = 0; $_i803 < $_size799; ++$_i803)
             {
-              $elem810 = null;
-              $xfer += $input->readString($elem810);
-              $this->part_vals []= $elem810;
+              $elem804 = null;
+              $xfer += $input->readString($elem804);
+              $this->part_vals []= $elem804;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23759,14 +23475,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size811 = 0;
-            $_etype814 = 0;
-            $xfer += $input->readListBegin($_etype814, $_size811);
-            for ($_i815 = 0; $_i815 < $_size811; ++$_i815)
+            $_size805 = 0;
+            $_etype808 = 0;
+            $xfer += $input->readListBegin($_etype808, $_size805);
+            for ($_i809 = 0; $_i809 < $_size805; ++$_i809)
             {
-              $elem816 = null;
-              $xfer += $input->readString($elem816);
-              $this->group_names []= $elem816;
+              $elem810 = null;
+              $xfer += $input->readString($elem810);
+              $this->group_names []= $elem810;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23804,9 +23520,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter817)
+          foreach ($this->part_vals as $iter811)
           {
-            $xfer += $output->writeString($iter817);
+            $xfer += $output->writeString($iter811);
           }
         }
         $output->writeListEnd();
@@ -23826,9 +23542,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter818)
+          foreach ($this->group_names as $iter812)
           {
-            $xfer += $output->writeString($iter818);
+            $xfer += $output->writeString($iter812);
           }
         }
         $output->writeListEnd();
@@ -24419,15 +24135,15 @@ class ThriftHiveMetastore_get_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size819 = 0;
-            $_etype822 = 0;
-            $xfer += $input->readListBegin($_etype822, $_size819);
-            for ($_i823 = 0; $_i823 < $_size819; ++$_i823)
+            $_size813 = 0;
+            $_etype816 = 0;
+            $xfer += $input->readListBegin($_etype816, $_size813);
+            for ($_i817 = 0; $_i817 < $_size813; ++$_i817)
             {
-              $elem824 = null;
-              $elem824 = new \metastore\Partition();
-              $xfer += $elem824->read($input);
-              $this->success []= $elem824;
+              $elem818 = null;
+              $elem818 = new \metastore\Partition();
+              $xfer += $elem818->read($input);
+              $this->success []= $elem818;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24471,9 +24187,9 @@ class ThriftHiveMetastore_get_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter825)
+          foreach ($this->success as $iter819)
           {
-            $xfer += $iter825->write($output);
+            $xfer += $iter819->write($output);
           }
         }
         $output->writeListEnd();
@@ -24619,14 +24335,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size826 = 0;
-            $_etype829 = 0;
-            $xfer += $input->readListBegin($_etype829, $_size826);
-            for ($_i830 = 0; $_i830 < $_size826; ++$_i830)
+            $_size820 = 0;
+            $_etype823 = 0;
+            $xfer += $input->readListBegin($_etype823, $_size820);
+            for ($_i824 = 0; $_i824 < $_size820; ++$_i824)
             {
-              $elem831 = null;
-              $xfer += $input->readString($elem831);
-              $this->group_names []= $elem831;
+              $elem825 = null;
+              $xfer += $input->readString($elem825);
+              $this->group_names []= $elem825;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24674,9 +24390,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter832)
+          foreach ($this->group_names as $iter826)
           {
-            $xfer += $output->writeString($iter832);
+            $xfer += $output->writeString($iter826);
           }
         }
         $output->writeListEnd();
@@ -24765,15 +24481,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size833 = 0;
-            $_etype836 = 0;
-            $xfer += $input->readListBegin($_etype836, $_size833);
-            for ($_i837 = 0; $_i837 < $_size833; ++$_i837)
+            $_size827 = 0;
+            $_etype830 = 0;
+            $xfer += $input->readListBegin($_etype830, $_size827);
+            for ($_i831 = 0; $_i831 < $_size827; ++$_i831)
             {
-              $elem838 = null;
-              $elem838 = new \metastore\Partition();
-              $xfer += $elem838->read($input);
-              $this->success []= $elem838;
+              $elem832 = null;
+              $elem832 = new \metastore\Partition();
+              $xfer += $elem832->read($input);
+              $this->success []= $elem832;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24817,9 +24533,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter839)
+          foreach ($this->success as $iter833)
           {
-            $xfer += $iter839->write($output);
+            $xfer += $iter833->write($output);
           }
         }
         $output->writeListEnd();
@@ -25039,15 +24755,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size840 = 0;
-            $_etype843 = 0;
-            $xfer += $input->readListBegin($_etype843, $_size840);
-            for ($_i844 = 0; $_i844 < $_size840; ++$_i844)
+            $_size834 = 0;
+            $_etype837 = 0;
+            $xfer += $input->readListBegin($_etype837, $_size834);
+            for ($_i838 = 0; $_i838 < $_size834; ++$_i838)
             {
-              $elem845 = null;
-              $elem845 = new \metastore\PartitionSpec();
-              $xfer += $elem845->read($input);
-              $this->success []= $elem845;
+              $elem839 = null;
+              $elem839 = new \metastore\PartitionSpec();
+              $xfer += $elem839->read($input);
+              $this->success []= $elem839;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25091,9 +24807,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter846)
+          foreach ($this->success as $iter840)
           {
-            $xfer += $iter846->write($output);
+            $xfer += $iter840->write($output);
           }
         }
         $output->writeListEnd();
@@ -25300,14 +25016,14 @@ class ThriftHiveMetastore_get_partition_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size847 = 0;
-            $_etype850 = 0;
-            $xfer += $input->readListBegin($_etype850, $_size847);
-            for ($_i851 = 0; $_i851 < $_size847; ++$_i851)
+            $_size841 = 0;
+            $_etype844 = 0;
+            $xfer += $input->readListBegin($_etype844, $_size841);
+            for ($_i845 = 0; $_i845 < $_size841; ++$_i845)
             {
-              $elem852 = null;
-              $xfer += $input->readString($elem852);
-              $this->success []= $elem852;
+              $elem846 = null;
+              $xfer += $input->readString($elem846);
+              $this->success []= $elem846;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25343,9 +25059,9 @@ class ThriftHiveMetastore_get_partition_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter853)
+          foreach ($this->success as $iter847)
           {
-            $xfer += $output->writeString($iter853);
+            $xfer += $output->writeString($iter847);
           }
         }
         $output->writeListEnd();
@@ -25461,14 +25177,14 @@ class ThriftHiveMetastore_get_partitions_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size854 = 0;
-            $_etype857 = 0;
-            $xfer += $input->readListBegin($_etype857, $_size854);
-            for ($_i858 = 0; $_i858 < $_size854; ++$_i858)
+            $_size848 = 0;
+            $_etype851 = 0;
+            $xfer += $input->readListBegin($_etype851, $_size848);
+            for ($_i852 = 0; $_i852 < $_size848; ++$_i852)
             {
-              $elem859 = null;
-              $xfer += $input->readString($elem859);
-              $this->part_vals []= $elem859;
+              $elem853 = null;
+              $xfer += $input->readString($elem853);
+              $this->part_vals []= $elem853;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25513,9 +25229,9 @@ class ThriftHiveMetastore_get_partitions_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter860)
+          foreach ($this->part_vals as $iter854)
           {
-            $xfer += $output->writeString($iter860);
+            $xfer += $output->writeString($iter854);
           }
         }
         $output->writeListEnd();
@@ -25609,15 +25325,15 @@ class ThriftHiveMetastore_get_partitions_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size861 = 0;
-            $_etype864 = 0;
-            $xfer += $input->readListBegin($_etype864, $_size861);
-            for ($_i865 = 0; $_i865 < $_size861; ++$_i865)
+            $_size855 = 0;
+            $_etype858 = 0;
+            $xfer += $input->readListBegin($_etype858, $_size855);
+            for ($_i859 = 0; $_i859 < $_size855; ++$_i859)
             {
-              $elem866 = null;
-              $elem866 = new \metastore\Partition();
-              $xfer += $elem866->read($input);
-              $this->success []= $elem866;
+              $elem860 = null;
+              $elem860 = new \metastore\Partition();
+              $xfer += $elem860->read($input);
+              $this->success []= $elem860;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25661,9 +25377,9 @@ class ThriftHiveMetastore_get_partitions_ps_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter867)
+          foreach ($this->success as $iter861)
           {
-            $xfer += $iter867->write($output);
+            $xfer += $iter861->write($output);
           }
         }
         $output->writeListEnd();
@@ -25810,14 +25526,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size868 = 0;
-            $_etype871 = 0;
-            $xfer += $input->readListBegin($_etype871, $_size868);
-            for ($_i872 = 0; $_i872 < $_size868; ++$_i872)
+            $_size862 = 0;
+            $_etype865 = 0;
+            $xfer += $input->readListBegin($_etype865, $_size862);
+            for ($_i866 = 0; $_i866 < $_size862; ++$_i866)
             {
-              $elem873 = null;
-              $xfer += $input->readString($elem873);
-              $this->part_vals []= $elem873;
+              $elem867 = null;
+              $xfer += $input->readString($elem867);
+              $this->part_vals []= $elem867;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25841,14 +25557,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 6:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size874 = 0;
-            $_etype877 = 0;
-            $xfer += $input->readListBegin($_etype877, $_size874);
-            for ($_i878 = 0; $_i878 < $_size874; ++$_i878)
+            $_size868 = 0;
+            $_etype871 = 0;
+            $xfer += $input->readListBegin($_etype871, $_size868);
+            for ($_i872 = 0; $_i872 < $_size868; ++$_i872)
             {
-              $elem879 = null;
-              $xfer += $input->readString($elem879);
-              $this->group_names []= $elem879;
+              $elem873 = null;
+              $xfer += $input->readString($elem873);
+              $this->group_names []= $elem873;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25886,9 +25602,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter880)
+          foreach ($this->part_vals as $iter874)
           {
-            $xfer += $output->writeString($iter880);
+            $xfer += $output->writeString($iter874);
           }
         }
         $output->writeListEnd();
@@ -25913,9 +25629,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter881)
+          foreach ($this->group_names as $iter875)
           {
-            $xfer += $output->writeString($iter881);
+            $xfer += $output->writeString($iter875);
           }
         }
         $output->writeListEnd();
@@ -26004,15 +25720,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size882 = 0;
-            $_etype885 = 0;
-            $xfer += $input->readListBegin($_etype885, $_size882);
-            for ($_i886 = 0; $_i886 < $_size882; ++$_i886)
+            $_size876 = 0;
+            $_etype879 = 0;
+            $xfer += $input->readListBegin($_etype879, $_size876);
+            for ($_i880 = 0; $_i880 < $_size876; ++$_i880)
             {
-              $elem887 = null;
-              $elem887 = new \metastore\Partition();
-              $xfer += $elem887->read($input);
-              $this->success []= $elem887;
+              $elem881 = null;
+              $elem881 = new \metastore\Partition();
+              $xfer += $elem881->read($input);
+              $this->success []= $elem881;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26056,9 +25772,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter888)
+          foreach ($this->success as $iter882)
           {
-            $xfer += $iter888->write($output);
+            $xfer += $iter882->write($output);
           }
         }
         $output->writeListEnd();
@@ -26179,14 +25895,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size889 = 0;
-            $_etype892 = 0;
-            $xfer += $input->readListBegin($_etype892, $_size889);
-            for ($_i893 = 0; $_i893 < $_size889; ++$_i893)
+            $_size883 = 0;
+            $_etype886 = 0;
+            $xfer += $input->readListBegin($_etype886, $_size883);
+            for ($_i887 = 0; $_i887 < $_size883; ++$_i887)
             {
-              $elem894 = null;
-              $xfer += $input->readString($elem894);
-              $this->part_vals []= $elem894;
+              $elem888 = null;
+              $xfer += $input->readString($elem888);
+              $this->part_vals []= $elem888;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26231,9 +25947,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter895)
+          foreach ($this->part_vals as $iter889)
           {
-            $xfer += $output->writeString($iter895);
+            $xfer += $output->writeString($iter889);
           }
         }
         $output->writeListEnd();
@@ -26326,14 +26042,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size896 = 0;
-            $_etype899 = 0;
-            $xfer += $input->readListBegin($_etype899, $_size896);
-            for ($_i900 = 0; $_i900 < $_size896; ++$_i900)
+            $_size890 = 0;
+            $_etype893 = 0;
+            $xfer += $input->readListBegin($_etype893, $_size890);
+            for ($_i894 = 0; $_i894 < $_size890; ++$_i894)
             {
-              $elem901 = null;
-              $xfer += $input->readString($elem901);
-              $this->success []= $elem901;
+              $elem895 = null;
+              $xfer += $input->readString($elem895);
+              $this->success []= $elem895;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26377,9 +26093,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter902)
+          foreach ($this->success as $iter896)
           {
-            $xfer += $output->writeString($iter902);
+            $xfer += $output->writeString($iter896);
           }
         }
         $output->writeListEnd();
@@ -26622,15 +26338,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size903 = 0;
-            $_etype906 = 0;
-            $xfer += $input->readListBegin($_etype906, $_size903);
-            for ($_i907 = 0; $_i907 < $_size903; ++$_i907)
+            $_size897 = 0;
+            $_etype900 = 0;
+            $xfer += $input->readListBegin($_etype900, $_size897);
+            for ($_i901 = 0; $_i901 < $_size897; ++$_i901)
             {
-              $elem908 = null;
-              $elem908 = new \metastore\Partition();
-              $xfer += $elem908->read($input);
-              $this->success []= $elem908;
+              $elem902 = null;
+              $elem902 = new \metastore\Partition();
+              $xfer += $elem902->read($input);
+              $this->success []= $elem902;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26674,9 +26390,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter909)
+          foreach ($this->success as $iter903)
           {
-            $xfer += $iter909->write($output);
+            $xfer += $iter903->write($output);
           }
         }
         $output->writeListEnd();
@@ -26919,15 +26635,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size910 = 0;
-            $_etype913 = 0;
-            $xfer += $input->readListBegin($_etype913, $_size910);
-            for ($_i914 = 0; $_i914 < $_size910; ++$_i914)
+            $_size904 = 0;
+            $_etype907 = 0;
+            $xfer += $input->readListBegin($_etype907, $_size904);
+            for ($_i908 = 0; $_i908 < $_size904; ++$_i908)
             {
-              $elem915 = null;
-              $elem915 = new \metastore\PartitionSpec();
-              $xfer += $elem915->read($input);
-              $this->success []= $elem915;
+              $elem909 = null;
+              $elem909 = new \metastore\PartitionSpec();
+              $xfer += $elem909->read($input);
+              $this->success []= $elem909;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26971,9 +26687,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter916)
+          foreach ($this->success as $iter910)
           {
-            $xfer += $iter916->write($output);
+            $xfer += $iter910->write($output);
           }
         }
         $output->writeListEnd();
@@ -27539,14 +27255,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->names = array();
-            $_size917 = 0;
-            $_etype920 = 0;
-            $xfer += $input->readListBegin($_etype920, $_size917);
-            for ($_i921 = 0; $_i921 < $_size917; ++$_i921)
+            $_size911 = 0;
+            $_etype914 = 0;
+            $xfer += $input->readListBegin($_etype914, $_size911);
+            for ($_i915 = 0; $_i915 < $_size911; ++$_i915)
             {
-              $elem922 = null;
-              $xfer += $input->readString($elem922);
-              $this->names []= $elem922;
+              $elem916 = null;
+              $xfer += $input->readString($elem916);
+              $this->names []= $elem916;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27584,9 +27300,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
       {
         $output->writeListBegin(TType::STRING, count($this->names));
         {
-          foreach ($this->names as $iter923)
+          foreach ($this->names as $iter917)
           {
-            $xfer += $output->writeString($iter923);
+            $xfer += $output->writeString($iter917);
           }
         }
         $output->writeListEnd();
@@ -27675,15 +27391,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size924 = 0;
-            $_etype927 = 0;
-            $xfer += $input->readListBegin($_etype927, $_size924);
-            for ($_i928 = 0; $_i928 < $_size924; ++$_i928)
+            $_size918 = 0;
+            $_etype921 = 0;
+            $xfer += $input->readListBegin($_etype921, $_size918);
+            for ($_i922 = 0; $_i922 < $_size918; ++$_i922)
             {
-              $elem929 = null;
-              $elem929 = new \metastore\Partition();
-              $xfer += $elem929->read($input);
-              $this->success []= $elem929;
+              $elem923 = null;
+              $elem923 = new \metastore\Partition();
+              $xfer += $elem923->read($input);
+              $this->success []= $elem923;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27727,9 +27443,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter930)
+          foreach ($this->success as $iter924)
           {
-            $xfer += $iter930->write($output);
+            $xfer += $iter924->write($output);
           }
         }
         $output->writeListEnd();
@@ -28068,15 +27784,15 @@ class ThriftHiveMetastore_alter_partitions_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size931 = 0;
-            $_etype934 = 0;
-            $xfer += $input->readListBegin($_etype934, $_size931);
-            for ($_i935 = 0; $_i935 < $_size931; ++$_i935)
+            $_size925 = 0;
+            $_etype928 = 0;
+            $xfer += $input->readListBegin($_etype928, $_size925);
+            for ($_i929 = 0; $_i929 < $_size925; ++$_i929)
             {
-              $elem936 = null;
-              $elem936 = new \metastore\Partition();
-              $xfer += $elem936->read($input);
-              $this->new_parts []= $elem936;
+              $elem930 = null;
+              $elem930 = new \metastore\Partition();
+              $xfer += $elem930->read($input);
+              $this->new_parts []= $elem930;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -28114,9 +27830,9 @@ class ThriftHiveMetastore_alter_partitions_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter937)
+          foreach ($this->new_parts as $iter931)
           {
-            $xfer += $iter937->write($output);
+            $xfer += $iter931->write($output);
           }
         }
         $output->writeListEnd();
@@ -28331,15 +28047,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size938 = 0;
-            $_etype941 = 0;
-            $xfer += $input->readListBegin($_etype941, $_size938);
-            for ($_i942 = 0; $_i942 < $_size938; ++$_i942)
+            $_size932 = 0;
+            $_etype935 = 0;
+            $xfer += $input->readListBegin($_etype935, $_size932);
+            for ($_i936 = 0; $_i936 < $_size932; ++$_i936)
             {
-              $elem943 = null;
-              $elem943 = new \metastore\Partition();
-              $xfer += $elem943->read($input);
-              $this->new_parts []= $elem943;
+              $elem937 = null;
+              $elem937 = new \metastore\Partition();
+              $xfer += $elem937->read($input);
+              $this->new_parts []= $elem937;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -28385,9 +28101,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter944)
+          foreach ($this->new_parts as $iter938)
           {
-            $xfer += $iter944->write($output);
+            $xfer += $iter938->write($output);
           }
         }
         $output->writeListEnd();
@@ -28865,14 +28581,14 @@ class ThriftHiveMetastore_rename_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size945 = 0;
-            $_etype948 = 0;
-            $xfer += $input->readListBegin($_etype948, $_size945);
-            for ($_i949 = 0; $_i949 < $_size945; ++$_i949)
+            $_size939 = 0;
+            $_etype942 = 0;
+            $xfer += $input->readListBegin($_etype942, $_size939);
+            for ($_i943 = 0; $_i943 < $_size939; ++$_i943)
             {
-              $elem950 = null;
-              $xfer += $input->readString($elem950);
-              $this->part_vals []= $elem950;
+              $elem944 = null;
+              $xfer += $input->readString($elem944);
+              $this->part_vals []= $elem944;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -28918,9 +28634,9 @@ class ThriftHiveMetastore_rename_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter951)
+          foreach ($this->part_vals as $iter945)
           {
-            $xfer += $output->writeString($iter951);
+            $xfer += $output->writeString($iter945);
           }
         }
         $output->writeListEnd();
@@ -29105,14 +28821,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size952 = 0;
-            $_etype955 = 0;
-            $xfer += $input->readListBegin($_etype955, $_size952);
-            for ($_i956 = 0; $_i956 < $_size952; ++$_i956)
+            $_size946 = 0;
+            $_etype949 = 0;
+            $xfer += $input->readListBegin($_etype949, $_size946);
+            for ($_i950 = 0; $_i950 < $_size946; ++$_i950)
             {
-              $elem957 = null;
-              $xfer += $input->readString($elem957);
-              $this->part_vals []= $elem957;
+              $elem951 = null;
+              $xfer += $input->readString($elem951);
+              $this->part_vals []= $elem951;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -29147,9 +28863,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter958)
+          foreach ($this->part_vals as $iter952)
           {
-            $xfer += $output->writeString($iter958);
+            $xfer += $output->writeString($iter952);
           }
         }
         $output->writeListEnd();
@@ -29603,14 +29319,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size959 = 0;
-            $_etype962 = 0;
-            $xfer += $input->readListBegin($_etype962, $_size959);
-            for ($_i963 = 0; $_i963 < $_size959; ++$_i963)
+            $_size953 = 0;
+            $_etype956 = 0;
+            $xfer += $input->readListBegin($_etype956, $_size953);
+            for ($_i957 = 0; $_i957 < $_size953; ++$_i957)
             {
-              $elem964 = null;
-              $xfer += $input->readString($elem964);
-              $this->success []= $elem964;
+              $elem958 = null;
+              $xfer += $input->readString($elem958);
+              $this->success []= $elem958;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -29646,9 +29362,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter965)
+          foreach ($this->success as $iter959)
           {
-            $xfer += $output->writeString($iter965);
+            $xfer += $output->writeString($iter959);
           }
         }
         $output->writeListEnd();
@@ -29808,17 +29524,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result {
         case 0:
           if ($ftype == TType::MAP) {
             $this->success = array();
-            $_size966 = 0;
-            $_ktype967 = 0;
-            $_vtype968 = 0;
-            $xfer += $input->readMapBegin($_ktype967, $_vtype968, $_size966);
-            for ($_i970 = 0; $_i970 < $_size966; ++$_i970)
+            $_size960 = 0;
+            $_ktype961 = 0;
+            $_vtype962 = 0;
+            $xfer += $input->readMapBegin($_ktype961, $_vtype962, $_size960);
+            for ($_i964 = 0; $_i964 < $_size960; ++$_i964)
             {
-              $key971 = '';
-              $val972 = '';
-              $xfer += $input->readString($key971);
-              $xfer += $input->readString($val972);
-              $this->success[$key971] = $val972;
+              $key965 = '';
+              $val966 = '';
+              $xfer += $input->readString($key965);
+              $xfer += $input->readString($val966);
+              $this->success[$key965] = $val966;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -29854,10 +29570,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success));
         {
-          foreach ($this->success as $kiter973 => $viter974)
+          foreach ($this->success as $kiter967 => $viter968)
           {
-            $xfer += $output->writeString($kiter973);
-            $xfer += $output->writeString($viter974);
+            $xfer += $output->writeString($kiter967);
+            $xfer += $output->writeString($viter968);
           }
         }
         $output->writeMapEnd();
@@ -29977,17 +29693,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args {
         case 3:
           if ($ftype == TType::MAP) {
             $this->part_vals = array();
-            $_size975 = 0;
-            $_ktype976 = 0;
-            $_vtype977 = 0;
-            $xfer += $input->readMapBegin($_ktype976, $_vtype977, $_size975);
-            for ($_i979 = 0; $_i979 < $_size975; ++$_i979)
+            $_size969 = 0;
+            $_ktype970 = 0;
+            $_vtype971 = 0;
+            $xfer += $input->readMapBegin($_ktype970, $_vtype971, $_size969);
+            for ($_i973 = 0; $_i973 < $_size969; ++$_i973)
             {
-              $key980 = '';
-              $val981 = '';
-              $xfer += $input->readString($key980);
-              $xfer += $input->readString($val981);
-              $this->part_vals[$key980] = $val981;
+              $key974 = '';
+              $val975 = '';
+              $xfer += $input->readString($key974);
+              $xfer += $input->readString($val975);
+              $this->part_vals[$key974] = $val975;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -30032,10 +29748,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $kiter982 => $viter983)
+          foreach ($this->part_vals as $kiter976 => $viter977)
           {
-            $xfer += $output->writeString($kiter982);
-            $xfer += $output->writeString($viter983);
+            $xfer += $output->writeString($kiter976);
+            $xfer += $output->writeString($viter977);
           }
         }
         $output->writeMapEnd();
@@ -30357,17 +30073,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args {
         case 3:
           if ($ftype == TType::MAP) {
             $this->part_vals = array();
-            $_size984 = 0;
-            $_ktype985 = 0;
-            $_vtype986 = 0;
-            $xfer += $input->readMapBegin($_ktype985, $_vtype986, $_size984);
-            for ($_i988 = 0; $_i988 < $_size984; ++$_i988)
+            $_size978 = 0;
+            $_ktype979 = 0;
+            $_vtype980 = 0;
+            $xfer += $input->readMapBegin($_ktype979, $_vtype980, $_size978);
+            for ($_i982 = 0; $_i982 < $_size978; ++$_i982)
             {
-              $key989 = '';
-              $val990 = '';
-              $xfer += $input->readString($key989);
-              $xfer += $input->readString($val990);
-              $this->part_vals[$key989] = $val990;
+              $key983 = '';
+              $val984 = '';
+              $xfer += $input->readString($key983);
+              $xfer += $input->readString($val984);
+              $this->part_vals[$key983] = $val984;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -30412,10 +30128,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $kiter991 => $viter992)
+          foreach ($this->part_vals as $kiter985 => $viter986)
           {
-            $xfer += $output->writeString($kiter991);
-            $xfer += $output->writeString($viter992);
+            $xfer += $output->writeString($kiter985);
+            $xfer += $output->writeString($viter986);
           }
         }
         $output->writeMapEnd();
@@ -31889,15 +31605,15 @@ class ThriftHiveMetastore_get_indexes_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size993 = 0;
-            $_etype996 = 0;
-            $xfer += $input->readListBegin($_etype996, $_size993);
-            for ($_i997 = 0; $_i997 < $_size993; ++$_i997)
+            $_size987 = 0;
+            $_etype990 = 0;
+            $xfer += $input->readListBegin($_etype990, $_size987);
+            for ($_i991 = 0; $_i991 < $_size987; ++$_i991)
             {
-              $elem998 = null;
-              $elem998 = new \metastore\Index();
-              $xfer += $elem998->read($input);
-              $this->success []= $elem998;
+              $elem992 = null;
+              $elem992 = new \metastore\Index();
+              $xfer += $elem992->read($input);
+              $this->success []= $elem992;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31941,9 +31657,9 @@ class ThriftHiveMetastore_get_indexes_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter999)
+          foreach ($this->success as $iter993)
           {
-            $xfer += $iter999->write($output);
+            $xfer += $iter993->write($output);
           }
         }
         $output->writeListEnd();
@@ -32150,14 +31866,14 @@ class ThriftHiveMetastore_get_index_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1000 = 0;
-            $_etype1003 = 0;
-            $xfer += $input->readListBegin($_etype1003, $_size1000);
-            for ($_i1004 = 0; $_i1004 < $_size1000; ++$_i1004)
+            $_size994 = 0;
+            $_etype997 = 0;
+            $xfer += $input->readListBegin($_etype997, $_size994);
+            for ($_i998 = 0; $_i998 < $_size994; ++$_i998)
             {
-              $elem1005 = null;
-              $xfer += $input->readString($elem1005);
-              $this->success []= $elem1005;
+              $elem999 = null;
+              $xfer += $input->readString($elem999);
+              $this->success []= $elem999;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32193,9 +31909,9 @@ class ThriftHiveMetastore_get_index_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter1006)
+          foreach ($this->success as $iter1000)
           {
-            $xfer += $output->writeString($iter1006);
+            $xfer += $output->writeString($iter1000);
           }
         }
         $output->writeListEnd();
@@ -36089,14 +35805,14 @@ class ThriftHiveMetastore_get_functions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1007 = 0;
-            $_etype1010 = 0;
-            $xfer += $input->readListBegin($_etype1010, $_size1007);
-            for ($_i1011 = 0; $_i1011 < $_size1007; ++$_i1011)
+            $_size1001 = 0;
+            $_etype1004 = 0;
+            $xfer += $input->readListBegin($_etype1004, $_size1001);
+            for ($_i1005 = 0; $_i1005 < $_size1001; ++$_i1005)
             {
-              $elem1012 = null;
-              $xfer += $input->readString($elem1012);
-              $this->success []= $elem1012;
+              $elem1006 = null;
+              $xfer += $input->readString($elem1006);
+              $this->success []= $elem1006;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -36132,9 +35848,9 @@ class ThriftHiveMetastore_get_functions_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter1013)
+          foreach ($this->suc

<TRUNCATED>

[35/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
index 5173d8b..c26a075 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
@@ -28,23 +28,20 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.messaging.event.filters.AndFilter;
-import org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter;
-import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter;
-import org.apache.hadoop.hive.metastore.messaging.event.filters.MessageFormatFilter;
+import org.apache.hadoop.hive.metastore.messaging.EventUtils;
 import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
 import org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec.ReplStateMap;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.util.Shell;
 import org.apache.thrift.TException;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Rule;
+import org.junit.Ignore;
 import org.junit.Test;
-import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -64,22 +61,17 @@ import static org.junit.Assert.assertNull;
 
 public class TestReplicationScenarios {
 
-  @Rule
-  public final TestName testName = new TestName();
-
-  private final static String DBNOTIF_LISTENER_CLASSNAME =
-      "org.apache.hive.hcatalog.listener.DbNotificationListener";
+  final static String DBNOTIF_LISTENER_CLASSNAME = "org.apache.hive.hcatalog.listener.DbNotificationListener";
       // FIXME : replace with hive copy once that is copied
-  private final static String tid =
+  final static String tid =
       TestReplicationScenarios.class.getCanonicalName().replace('.','_') + "_" + System.currentTimeMillis();
-  private final static String TEST_PATH =
-      System.getProperty("test.warehouse.dir", "/tmp") + Path.SEPARATOR + tid;
+  final static String TEST_PATH = System.getProperty("test.warehouse.dir","/tmp") + Path.SEPARATOR + tid;
 
-  private static HiveConf hconf;
-  private static boolean useExternalMS = false;
-  private static int msPort;
-  private static Driver driver;
-  private static HiveMetaStoreClient metaStoreClient;
+  static HiveConf hconf;
+  static boolean useExternalMS = false;
+  static int msPort;
+  static Driver driver;
+  static HiveMetaStoreClient metaStoreClient;
 
   protected static final Logger LOG = LoggerFactory.getLogger(TestReplicationScenarios.class);
   private ArrayList<String> lastResults;
@@ -101,7 +93,7 @@ public class TestReplicationScenarios {
       return;
     }
 
-    hconf.setVar(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS,
+    hconf.setVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
         DBNOTIF_LISTENER_CLASSNAME); // turn on db notification listener on metastore
     hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true);
     hconf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true);
@@ -148,32 +140,6 @@ public class TestReplicationScenarios {
     ReplicationSemanticAnalyzer.injectNextDumpDirForTest(String.valueOf(next));
   }
 
-  @Test
-  public void testFunctionReplicationAsPartOfBootstrap() throws IOException {
-    String dbName = createDB(testName.getMethodName());
-    run("CREATE FUNCTION " + dbName
-        + ".testFunction as 'com.yahoo.sketches.hive.theta.DataToSketchUDAF' "
-        + "using jar  'ivy://com.yahoo.datasketches:sketches-hive:0.8.2'");
-
-    String replicatedDbName = loadAndVerify(dbName);
-    run("SHOW FUNCTIONS LIKE '" + replicatedDbName + "*'");
-    verifyResults(new String[] { replicatedDbName + ".testFunction" });
-  }
-
-  private String loadAndVerify(String dbName) throws IOException {
-    advanceDumpDir();
-    run("REPL DUMP " + dbName);
-    String dumpLocation = getResult(0, 0);
-    String lastReplicationId = getResult(0, 1, true);
-    String replicatedDbName = dbName + "_replicated";
-    run("EXPLAIN REPL LOAD " + replicatedDbName + " FROM '" + dumpLocation + "'");
-    printOutput();
-    run("REPL LOAD " + replicatedDbName + " FROM '" + dumpLocation + "'");
-    verifyRun("REPL STATUS " + replicatedDbName, lastReplicationId);
-    return replicatedDbName;
-  }
-
-
   /**
    * Tests basic operation - creates a db, with 4 tables, 2 ptned and 2 unptned.
    * Inserts data into one of the ptned tables, and one of the unptned tables,
@@ -182,8 +148,12 @@ public class TestReplicationScenarios {
    */
   @Test
   public void testBasic() throws IOException {
-    String name = testName.getMethodName();
-    String dbName = createDB(name);
+
+    String testName = "basic";
+    LOG.info("Testing "+testName);
+    String dbName = testName + "_" + tid;
+
+    run("CREATE DATABASE " + dbName);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE");
@@ -194,9 +164,9 @@ public class TestReplicationScenarios {
     String[] ptn_data_2 = new String[]{ "fifteen", "sixteen", "seventeen"};
     String[] empty = new String[]{};
 
-    String unptn_locn = new Path(TEST_PATH, name + "_unptn").toUri().getPath();
-    String ptn_locn_1 = new Path(TEST_PATH, name + "_ptn1").toUri().getPath();
-    String ptn_locn_2 = new Path(TEST_PATH, name + "_ptn2").toUri().getPath();
+    String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath();
+    String ptn_locn_1 = new Path(TEST_PATH , testName + "_ptn1").toUri().getPath();
+    String ptn_locn_2 = new Path(TEST_PATH , testName + "_ptn2").toUri().getPath();
 
     createTestDataFile(unptn_locn, unptn_data);
     createTestDataFile(ptn_locn_1, ptn_data_1);
@@ -211,19 +181,31 @@ public class TestReplicationScenarios {
     verifySetup("SELECT a from " + dbName + ".ptned_empty", empty);
     verifySetup("SELECT * from " + dbName + ".unptned_empty", empty);
 
-    String replicatedDbName = loadAndVerify(dbName);
+    advanceDumpDir();
+    run("REPL DUMP " + dbName);
+    String replDumpLocn = getResult(0,0);
+    String replDumpId = getResult(0,1,true);
+    run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
+    printOutput();
+    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
+
+    verifyRun("REPL STATUS " + dbName + "_dupe", replDumpId);
 
-    verifyRun("SELECT * from " + replicatedDbName + ".unptned", unptn_data);
-    verifyRun("SELECT a from " + replicatedDbName + ".ptned WHERE b=1", ptn_data_1);
-    verifyRun("SELECT a from " + replicatedDbName + ".ptned WHERE b=2", ptn_data_2);
+    verifyRun("SELECT * from " + dbName + "_dupe.unptned", unptn_data);
+    verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", ptn_data_1);
+    verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", ptn_data_2);
     verifyRun("SELECT a from " + dbName + ".ptned_empty", empty);
     verifyRun("SELECT * from " + dbName + ".unptned_empty", empty);
   }
 
   @Test
   public void testBasicWithCM() throws Exception {
-    String name = testName.getMethodName();
-    String dbName = createDB(name);
+
+    String testName = "basic_with_cm";
+    LOG.info("Testing "+testName);
+    String dbName = testName + "_" + tid;
+
+    run("CREATE DATABASE " + dbName);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE");
@@ -235,10 +217,10 @@ public class TestReplicationScenarios {
     String[] ptn_data_2_later = new String[]{ "eighteen", "nineteen", "twenty"};
     String[] empty = new String[]{};
 
-    String unptn_locn = new Path(TEST_PATH, name + "_unptn").toUri().getPath();
-    String ptn_locn_1 = new Path(TEST_PATH, name + "_ptn1").toUri().getPath();
-    String ptn_locn_2 = new Path(TEST_PATH, name + "_ptn2").toUri().getPath();
-    String ptn_locn_2_later = new Path(TEST_PATH, name + "_ptn2_later").toUri().getPath();
+    String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath();
+    String ptn_locn_1 = new Path(TEST_PATH , testName + "_ptn1").toUri().getPath();
+    String ptn_locn_2 = new Path(TEST_PATH , testName + "_ptn2").toUri().getPath();
+    String ptn_locn_2_later = new Path(TEST_PATH , testName + "_ptn2_later").toUri().getPath();
 
     createTestDataFile(unptn_locn, unptn_data);
     createTestDataFile(ptn_locn_1, ptn_data_1);
@@ -298,61 +280,12 @@ public class TestReplicationScenarios {
   }
 
   @Test
-  public void testBootstrapLoadOnExistingDb() throws IOException {
-    String testName = "bootstrapLoadOnExistingDb";
+  public void testIncrementalAdds() throws IOException {
+    String testName = "incrementalAdds";
     LOG.info("Testing "+testName);
     String dbName = testName + "_" + tid;
 
     run("CREATE DATABASE " + dbName);
-    run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
-
-    String[] unptn_data = new String[]{ "eleven" , "twelve" };
-    String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath();
-    createTestDataFile(unptn_locn, unptn_data);
-
-    run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned");
-    verifySetup("SELECT * from " + dbName + ".unptned ORDER BY a", unptn_data);
-
-    // Create an empty database to load
-    run("CREATE DATABASE " + dbName + "_empty");
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName);
-    String replDumpLocn = getResult(0,0);
-    String replDumpId = getResult(0,1,true);
-    // Load to an empty database
-    run("REPL LOAD " + dbName + "_empty FROM '" + replDumpLocn + "'");
-
-    // REPL STATUS should return same repl ID as dump
-    verifyRun("REPL STATUS " + dbName + "_empty", replDumpId);
-    verifyRun("SELECT * from " + dbName + "_empty.unptned", unptn_data);
-
-    String[] nullReplId = new String[]{ "NULL" };
-
-    // Create a database with a table
-    run("CREATE DATABASE " + dbName + "_withtable");
-    run("CREATE TABLE " + dbName + "_withtable.unptned(a string) STORED AS TEXTFILE");
-    // Load using same dump to a DB with table. It should fail as DB is not empty.
-    verifyFail("REPL LOAD " + dbName + "_withtable FROM '" + replDumpLocn + "'");
-
-    // REPL STATUS should return NULL
-    verifyRun("REPL STATUS " + dbName + "_withtable", nullReplId);
-
-    // Create a database with a view
-    run("CREATE DATABASE " + dbName + "_withview");
-    run("CREATE TABLE " + dbName + "_withview.unptned(a string) STORED AS TEXTFILE");
-    run("CREATE VIEW " + dbName + "_withview.view AS SELECT * FROM " + dbName + "_withview.unptned");
-    // Load using same dump to a DB with view. It should fail as DB is not empty.
-    verifyFail("REPL LOAD " + dbName + "_withview FROM '" + replDumpLocn + "'");
-
-    // REPL STATUS should return NULL
-    verifyRun("REPL STATUS " + dbName + "_withview", nullReplId);
-  }
-
-  @Test
-  public void testIncrementalAdds() throws IOException {
-    String name = testName.getMethodName();
-    String dbName = createDB(name);
 
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
@@ -371,9 +304,9 @@ public class TestReplicationScenarios {
     String[] ptn_data_2 = new String[]{ "fifteen", "sixteen", "seventeen"};
     String[] empty = new String[]{};
 
-    String unptn_locn = new Path(TEST_PATH, name + "_unptn").toUri().getPath();
-    String ptn_locn_1 = new Path(TEST_PATH, name + "_ptn1").toUri().getPath();
-    String ptn_locn_2 = new Path(TEST_PATH, name + "_ptn2").toUri().getPath();
+    String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath();
+    String ptn_locn_1 = new Path(TEST_PATH , testName + "_ptn1").toUri().getPath();
+    String ptn_locn_2 = new Path(TEST_PATH , testName + "_ptn2").toUri().getPath();
 
     createTestDataFile(unptn_locn, unptn_data);
     createTestDataFile(ptn_locn_1, ptn_data_1);
@@ -435,8 +368,11 @@ public class TestReplicationScenarios {
   @Test
   public void testDrops() throws IOException {
 
-    String name = testName.getMethodName();
-    String dbName = createDB(name);
+    String testName = "drops";
+    LOG.info("Testing "+testName);
+    String dbName = testName + "_" + tid;
+
+    run("CREATE DATABASE " + dbName);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE");
@@ -447,9 +383,9 @@ public class TestReplicationScenarios {
     String[] ptn_data_2 = new String[]{ "fifteen", "sixteen", "seventeen"};
     String[] empty = new String[]{};
 
-    String unptn_locn = new Path(TEST_PATH, name + "_unptn").toUri().getPath();
-    String ptn_locn_1 = new Path(TEST_PATH, name + "_ptn1").toUri().getPath();
-    String ptn_locn_2 = new Path(TEST_PATH, name + "_ptn2").toUri().getPath();
+    String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath();
+    String ptn_locn_1 = new Path(TEST_PATH , testName + "_ptn1").toUri().getPath();
+    String ptn_locn_2 = new Path(TEST_PATH , testName + "_ptn2").toUri().getPath();
 
     createTestDataFile(unptn_locn, unptn_data);
     createTestDataFile(ptn_locn_1, ptn_data_1);
@@ -546,7 +482,10 @@ public class TestReplicationScenarios {
   public void testDropsWithCM() throws IOException {
 
     String testName = "drops_with_cm";
-    String dbName = createDB(testName);
+    LOG.info("Testing "+testName);
+    String dbName = testName + "_" + tid;
+
+    run("CREATE DATABASE " + dbName);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE");
@@ -669,7 +608,10 @@ public class TestReplicationScenarios {
   public void testAlters() throws IOException {
 
     String testName = "alters";
-    String dbName = createDB(testName);
+    LOG.info("Testing "+testName);
+    String dbName = testName + "_" + tid;
+
+    run("CREATE DATABASE " + dbName);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".unptned2(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE");
@@ -851,7 +793,10 @@ public class TestReplicationScenarios {
   @Test
   public void testIncrementalLoad() throws IOException {
     String testName = "incrementalLoad";
-    String dbName = createDB(testName);
+    LOG.info("Testing " + testName);
+    String dbName = testName + "_" + tid;
+
+    run("CREATE DATABASE " + dbName);
 
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
@@ -899,7 +844,6 @@ public class TestReplicationScenarios {
     run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
     verifyRun("SELECT * from " + dbName + "_dupe.unptned_late", unptn_data);
 
-    run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=1)");
     run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName
         + ".ptned PARTITION(b=1)");
     verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1);
@@ -929,14 +873,15 @@ public class TestReplicationScenarios {
 
     verifyRun("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=1", ptn_data_1);
     verifyRun("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=2", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", ptn_data_2);
   }
 
   @Test
   public void testIncrementalInserts() throws IOException {
     String testName = "incrementalInserts";
-    String dbName = createDB(testName);
+    LOG.info("Testing " + testName);
+    String dbName = testName + "_" + tid;
+
+    run("CREATE DATABASE " + dbName);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
 
     advanceDumpDir();
@@ -947,14 +892,13 @@ public class TestReplicationScenarios {
     run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
 
     String[] unptn_data = new String[] { "eleven", "twelve" };
-
     run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')");
     run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')");
-    verifySetup("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data);
+    verifyRun("SELECT a from " + dbName + ".unptned", unptn_data);
 
     run("CREATE TABLE " + dbName + ".unptned_late LIKE " + dbName + ".unptned");
     run("INSERT INTO TABLE " + dbName + ".unptned_late SELECT * FROM " + dbName + ".unptned");
-    verifySetup("SELECT * from " + dbName + ".unptned_late ORDER BY a", unptn_data);
+    verifyRun("SELECT * from " + dbName + ".unptned_late", unptn_data);
 
     advanceDumpDir();
     run("REPL DUMP " + dbName + " FROM " + replDumpId);
@@ -965,227 +909,14 @@ public class TestReplicationScenarios {
     run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
     printOutput();
     run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data);
-    verifyRun("SELECT a from " + dbName + ".unptned_late ORDER BY a", unptn_data);
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data);
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned_late ORDER BY a", unptn_data);
-
-    String[] unptn_data_after_ins = new String[] { "eleven", "thirteen", "twelve" };
-    String[] data_after_ovwrite = new String[] { "hundred" };
-    run("INSERT INTO TABLE " + dbName + ".unptned_late values('" + unptn_data_after_ins[1] + "')");
-    verifySetup("SELECT a from " + dbName + ".unptned_late ORDER BY a", unptn_data_after_ins);
-    run("INSERT OVERWRITE TABLE " + dbName + ".unptned values('" + data_after_ovwrite[0] + "')");
-    verifySetup("SELECT a from " + dbName + ".unptned", data_after_ovwrite);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId);
-    incrementalDumpLocn = getResult(0, 0);
-    incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    printOutput();
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned_late ORDER BY a", unptn_data_after_ins);
-
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned", data_after_ovwrite);
-  }
-
-  @Test
-  public void testIncrementalInsertToPartition() throws IOException {
-    String testName = "incrementalInsertToPartition";
-    LOG.info("Testing " + testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
-    run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName);
-    String replDumpLocn = getResult(0, 0);
-    String replDumpId = getResult(0, 1, true);
-    LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId);
-    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
-
-    String[] ptn_data_1 = new String[] { "fifteen", "fourteen", "thirteen" };
-    String[] ptn_data_2 = new String[] { "fifteen", "seventeen", "sixteen" };
-
-    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[2] + "')");
-
-    run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)");
-    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[2] + "')");
-    verifySetup("SELECT a from " + dbName + ".ptned where (b=1) ORDER BY a", ptn_data_1);
-    verifySetup("SELECT a from " + dbName + ".ptned where (b=2) ORDER BY a", ptn_data_2);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId);
-    String incrementalDumpLocn = getResult(0, 0);
-    String incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    printOutput();
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + ".ptned where (b=1) ORDER BY a", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + ".ptned where (b=2) ORDER BY a", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2);
-
-    String[] data_after_ovwrite = new String[] { "hundred" };
-    // Insert overwrite on existing partition
-    run("INSERT OVERWRITE TABLE " + dbName + ".ptned partition(b=2) values('" + data_after_ovwrite[0] + "')");
-    verifySetup("SELECT a from " + dbName + ".ptned where (b=2)", data_after_ovwrite);
-    // Insert overwrite on dynamic partition
-    run("INSERT OVERWRITE TABLE " + dbName + ".ptned partition(b=3) values('" + data_after_ovwrite[0] + "')");
-    verifySetup("SELECT a from " + dbName + ".ptned where (b=3)", data_after_ovwrite);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId);
-    incrementalDumpLocn = getResult(0, 0);
-    incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    printOutput();
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2)", data_after_ovwrite);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=3)", data_after_ovwrite);
-  }
-
-  @Test
-  public void testViewsReplication() throws IOException {
-    String testName = "viewsReplication";
-    String dbName = createDB(testName);
-
-    run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
-    run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
-    run("CREATE VIEW " + dbName + ".virtual_view AS SELECT * FROM " + dbName + ".unptned");
-
-    String[] unptn_data = new String[]{ "eleven" , "twelve" };
-    String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"};
-    String[] ptn_data_2 = new String[]{ "fifteen", "sixteen", "seventeen"};
-    String[] empty = new String[]{};
-
-    String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath();
-    String ptn_locn_1 = new Path(TEST_PATH , testName + "_ptn1").toUri().getPath();
-    String ptn_locn_2 = new Path(TEST_PATH , testName + "_ptn2").toUri().getPath();
-
-    createTestDataFile(unptn_locn, unptn_data);
-    createTestDataFile(ptn_locn_1, ptn_data_1);
-    createTestDataFile(ptn_locn_2, ptn_data_2);
-
-    verifySetup("SELECT a from " + dbName + ".ptned", empty);
-    verifySetup("SELECT * from " + dbName + ".unptned", empty);
-    verifySetup("SELECT * from " + dbName + ".virtual_view", empty);
-
-    run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned");
-    verifySetup("SELECT * from " + dbName + ".unptned", unptn_data);
-    verifySetup("SELECT * from " + dbName + ".virtual_view", unptn_data);
-
-    run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)");
-    verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1);
-    run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)");
-    verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2);
-
-    run("CREATE MATERIALIZED VIEW " + dbName + ".mat_view AS SELECT a FROM " + dbName + ".ptned where b=1");
-    verifySetup("SELECT a from " + dbName + ".mat_view", ptn_data_1);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName);
-    String replDumpLocn = getResult(0,0);
-    String replDumpId = getResult(0,1,true);
-    LOG.info("Bootstrap-dump: Dumped to {} with id {}",replDumpLocn,replDumpId);
-    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
-
-    verifyRun("SELECT * from " + dbName + "_dupe.virtual_view", unptn_data);
-    verifyRun("SELECT a from " + dbName + "_dupe.mat_view", ptn_data_1);
-
-    run("CREATE VIEW " + dbName + ".virtual_view2 AS SELECT a FROM " + dbName + ".ptned where b=2");
-    verifySetup("SELECT a from " + dbName + ".virtual_view2", ptn_data_2);
-
-    // Create a view with name already exist. Just to verify if failure flow clears the added create_table event.
-    run("CREATE VIEW " + dbName + ".virtual_view2 AS SELECT a FROM " + dbName + ".ptned where b=2");
-
-    run("CREATE MATERIALIZED VIEW " + dbName + ".mat_view2 AS SELECT * FROM " + dbName + ".unptned");
-    verifySetup("SELECT * from " + dbName + ".mat_view2", unptn_data);
-
-    // Perform REPL-DUMP/LOAD
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId );
-    String incrementalDumpLocn = getResult(0,0);
-    String incrementalDumpId = getResult(0,1,true);
-    LOG.info("Incremental-dump: Dumped to {} with id {}", incrementalDumpLocn, incrementalDumpId);
-    run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    printOutput();
-    run("REPL LOAD " + dbName + "_dupe FROM '"+incrementalDumpLocn+"'");
-
-    run("REPL STATUS " + dbName + "_dupe");
-    verifyResults(new String[] {incrementalDumpId});
-
-    verifyRun("SELECT * from " + dbName + "_dupe.unptned", unptn_data);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned where b=1", ptn_data_1);
-    verifyRun("SELECT * from " + dbName + "_dupe.virtual_view", unptn_data);
-    verifyRun("SELECT a from " + dbName + "_dupe.mat_view", ptn_data_1);
-    verifyRun("SELECT * from " + dbName + "_dupe.virtual_view2", ptn_data_2);
-    verifyRun("SELECT * from " + dbName + "_dupe.mat_view2", unptn_data);
-  }
-
-  @Test
-  public void testDumpLimit() throws IOException {
-    String name = testName.getMethodName();
-    String dbName = createDB(name);
-    run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName);
-    String replDumpLocn = getResult(0, 0);
-    String replDumpId = getResult(0, 1, true);
-    LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId);
-
-    String[] unptn_data = new String[] { "eleven", "thirteen", "twelve" };
-    String[] unptn_data_load1 = new String[] { "eleven" };
-    String[] unptn_data_load2 = new String[] { "eleven", "thirteen" };
-
-    // 3 events to insert, last repl ID: replDumpId+3
-    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')");
-    // 3 events to insert, last repl ID: replDumpId+6
-    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')");
-    // 3 events to insert, last repl ID: replDumpId+9
-    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[2] + "')");
-    verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data);
-
-    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId + " LIMIT 3");
-    String incrementalDumpLocn = getResult(0, 0);
-    String incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data);
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load1);
+    verifyRun("SELECT a from " + dbName + ".unptned", unptn_data);
+    verifyRun("SELECT a from " + dbName + ".unptned_late", unptn_data);
+    verifyRun("SELECT a from " + dbName + "_dupe.unptned", unptn_data);
+    verifyRun("SELECT a from " + dbName + "_dupe.unptned_late", unptn_data);
 
-    advanceDumpDir();
-    Integer lastReplID = Integer.valueOf(replDumpId);
-    lastReplID += 1000;
-    String toReplID = String.valueOf(lastReplID);
-
-    run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + toReplID + " LIMIT 3");
-    incrementalDumpLocn = getResult(0, 0);
-    incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load2);
+    String[] unptn_data_after_ins = new String[] { "eleven", "twelve", "thirteen" };
+    run("INSERT INTO TABLE " + dbName + ".unptned_late values('" + unptn_data_after_ins[2] + "')");
+    verifySetup("SELECT a from " + dbName + ".unptned_late", unptn_data_after_ins);
 
     advanceDumpDir();
     run("REPL DUMP " + dbName + " FROM " + replDumpId);
@@ -1193,312 +924,11 @@ public class TestReplicationScenarios {
     incrementalDumpId = getResult(0, 1, true);
     LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
     replDumpId = incrementalDumpId;
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data);
-  }
-
-  @Test
-  public void testExchangePartition() throws IOException {
-    String testName = "exchangePartition";
-    LOG.info("Testing " + testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
-    run("CREATE TABLE " + dbName + ".ptned_src(a string) partitioned by (b int, c int) STORED AS TEXTFILE");
-    run("CREATE TABLE " + dbName + ".ptned_dest(a string) partitioned by (b int, c int) STORED AS TEXTFILE");
-
-    String[] empty = new String[] {};
-    String[] ptn_data_1 = new String[] { "fifteen", "fourteen", "thirteen" };
-    String[] ptn_data_2 = new String[] { "fifteen", "seventeen", "sixteen" };
-
-    run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=1, c=1) values('" + ptn_data_1[0] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=1, c=1) values('" + ptn_data_1[1] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=1, c=1) values('" + ptn_data_1[2] + "')");
-
-    run("ALTER TABLE " + dbName + ".ptned_src ADD PARTITION (b=2, c=2)");
-    run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=2) values('" + ptn_data_2[0] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=2) values('" + ptn_data_2[1] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=2) values('" + ptn_data_2[2] + "')");
-
-    run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=3) values('" + ptn_data_2[0] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=3) values('" + ptn_data_2[1] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=3) values('" + ptn_data_2[2] + "')");
-    verifySetup("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1) ORDER BY a", ptn_data_1);
-    verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2);
-    verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName);
-    String replDumpLocn = getResult(0, 0);
-    String replDumpId = getResult(0, 1, true);
-    LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId);
-    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1) ORDER BY a", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=1 and c=1) ORDER BY a", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=1 and c=1)", empty);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=2)", empty);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=3)", empty);
-
-    // Exchange single partitions using complete partition-spec (all partition columns)
-    run("ALTER TABLE " + dbName + ".ptned_dest EXCHANGE PARTITION (b=1, c=1) WITH TABLE " + dbName + ".ptned_src");
-    verifySetup("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1)", empty);
-    verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2);
-    verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2);
-    verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1);
-    verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=2)", empty);
-    verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=3)", empty);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId);
-    String incrementalDumpLocn = getResult(0, 0);
-    String incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=1 and c=1)", empty);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=2)", empty);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=3)", empty);
-
-    // Exchange multiple partitions using partial partition-spec (only one partition column)
-    run("ALTER TABLE " + dbName + ".ptned_dest EXCHANGE PARTITION (b=2) WITH TABLE " + dbName + ".ptned_src");
-    verifySetup("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1)", empty);
-    verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2)", empty);
-    verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3)", empty);
-    verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1);
-    verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=2) ORDER BY a", ptn_data_2);
-    verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=3) ORDER BY a", ptn_data_2);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId);
-    incrementalDumpLocn = getResult(0, 0);
-    incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=1 and c=1)", empty);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=2)", empty);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=3)", empty);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=2) ORDER BY a", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=3) ORDER BY a", ptn_data_2);
-  }
-
-  @Test
-  public void testTruncateTable() throws IOException {
-    String testName = "truncateTable";
-    LOG.info("Testing " + testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
-    run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName);
-    String replDumpLocn = getResult(0, 0);
-    String replDumpId = getResult(0, 1, true);
-    LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId);
-    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
-
-    String[] unptn_data = new String[] { "eleven", "twelve" };
-    String[] empty = new String[] {};
-    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')");
-    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')");
-    verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId);
-    String incrementalDumpLocn = getResult(0, 0);
-    String incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
     run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
     printOutput();
     run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data);
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data);
 
-    run("TRUNCATE TABLE " + dbName + ".unptned");
-    verifySetup("SELECT a from " + dbName + ".unptned", empty);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId);
-    incrementalDumpLocn = getResult(0, 0);
-    incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + ".unptned", empty);
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned", empty);
-
-    String[] unptn_data_after_ins = new String[] { "thirteen" };
-    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data_after_ins[0] + "')");
-    verifySetup("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_after_ins);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId);
-    incrementalDumpLocn = getResult(0, 0);
-    incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_after_ins);
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_after_ins);
-  }
-
-  @Test
-  public void testTruncatePartitionedTable() throws IOException {
-    String testName = "truncatePartitionedTable";
-    LOG.info("Testing " + testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
-    run("CREATE TABLE " + dbName + ".ptned_1(a string) PARTITIONED BY (b int) STORED AS TEXTFILE");
-    run("CREATE TABLE " + dbName + ".ptned_2(a string) PARTITIONED BY (b int) STORED AS TEXTFILE");
-
-    String[] ptn_data_1 = new String[] { "fifteen", "fourteen", "thirteen" };
-    String[] ptn_data_2 = new String[] { "fifteen", "seventeen", "sixteen" };
-    String[] empty = new String[] {};
-    run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=1) values('" + ptn_data_1[0] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=1) values('" + ptn_data_1[1] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=1) values('" + ptn_data_1[2] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=2) values('" + ptn_data_2[0] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=2) values('" + ptn_data_2[1] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=2) values('" + ptn_data_2[2] + "')");
-
-    run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=10) values('" + ptn_data_1[0] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=10) values('" + ptn_data_1[1] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=10) values('" + ptn_data_1[2] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=20) values('" + ptn_data_2[0] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=20) values('" + ptn_data_2[1] + "')");
-    run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=20) values('" + ptn_data_2[2] + "')");
-
-    verifyRun("SELECT a from " + dbName + ".ptned_1 where (b=1) ORDER BY a", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + ".ptned_1 where (b=2) ORDER BY a", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + ".ptned_2 where (b=10) ORDER BY a", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + ".ptned_2 where (b=20) ORDER BY a", ptn_data_2);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName);
-    String replDumpLocn = getResult(0, 0);
-    String replDumpId = getResult(0, 1, true);
-    LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId);
-    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_1 where (b=1) ORDER BY a", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_1 where (b=2) ORDER BY a", ptn_data_2);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_2 where (b=10) ORDER BY a", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned_2 where (b=20) ORDER BY a", ptn_data_2);
-
-    run("TRUNCATE TABLE " + dbName + ".ptned_1 PARTITION(b=2)");
-    verifySetup("SELECT a from " + dbName + ".ptned_1 where (b=1) ORDER BY a", ptn_data_1);
-    verifySetup("SELECT a from " + dbName + ".ptned_1 where (b=2)", empty);
-
-    run("TRUNCATE TABLE " + dbName + ".ptned_2");
-    verifySetup("SELECT a from " + dbName + ".ptned_2 where (b=10)", empty);
-    verifySetup("SELECT a from " + dbName + ".ptned_2 where (b=20)", empty);
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId);
-    String incrementalDumpLocn = getResult(0, 0);
-    String incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    verifySetup("SELECT a from " + dbName + "_dupe.ptned_1 where (b=1) ORDER BY a", ptn_data_1);
-    verifySetup("SELECT a from " + dbName + "_dupe.ptned_1 where (b=2)", empty);
-    verifySetup("SELECT a from " + dbName + "_dupe.ptned_2 where (b=10)", empty);
-    verifySetup("SELECT a from " + dbName + "_dupe.ptned_2 where (b=20)", empty);
-  }
-
-  @Test
-  public void testTruncateWithCM() throws IOException {
-    String testName = "truncateWithCM";
-    LOG.info("Testing " + testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
-    run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
-
-    advanceDumpDir();
-    run("REPL DUMP " + dbName);
-    String replDumpLocn = getResult(0, 0);
-    String replDumpId = getResult(0, 1, true);
-    LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId);
-
-    String[] empty = new String[] {};
-    String[] unptn_data = new String[] { "eleven", "thirteen" };
-    String[] unptn_data_load1 = new String[] { "eleven" };
-    String[] unptn_data_load2 = new String[] { "eleven", "thirteen" };
-
-    // 3 events to insert, last repl ID: replDumpId+3
-    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')");
-    // 3 events to insert, last repl ID: replDumpId+6
-    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')");
-    verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data);
-    // 1 event to truncate, last repl ID: replDumpId+8
-    run("TRUNCATE TABLE " + dbName + ".unptned");
-    verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", empty);
-    // 3 events to insert, last repl ID: replDumpId+11
-    run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data_load1[0] + "')");
-    verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_load1);
-
-    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
-
-    // Dump and load only first insert (1 record)
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId + " LIMIT 3");
-    String incrementalDumpLocn = getResult(0, 0);
-    String incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-    verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_load1);
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load1);
-
-    // Dump and load only second insert (2 records)
-    advanceDumpDir();
-    Integer lastReplID = Integer.valueOf(replDumpId);
-    lastReplID += 1000;
-    String toReplID = String.valueOf(lastReplID);
-
-    run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + toReplID + " LIMIT 3");
-    incrementalDumpLocn = getResult(0, 0);
-    incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load2);
-
-    // Dump and load only truncate (0 records)
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId + " LIMIT 2");
-    incrementalDumpLocn = getResult(0, 0);
-    incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", empty);
-
-    // Dump and load insert after truncate (1 record)
-    advanceDumpDir();
-    run("REPL DUMP " + dbName + " FROM " + replDumpId);
-    incrementalDumpLocn = getResult(0, 0);
-    incrementalDumpId = getResult(0, 1, true);
-    LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId);
-    replDumpId = incrementalDumpId;
-    run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
-
-    verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load1);
+    verifyRun("SELECT a from " + dbName + "_dupe.unptned_late", unptn_data_after_ins);
   }
 
   @Test
@@ -1523,8 +953,11 @@ public class TestReplicationScenarios {
 
     // Now, to actually testing status - first, we bootstrap.
 
-    String name = testName.getMethodName();
-    String dbName = createDB(name);
+    String testName = "incrementalStatus";
+    LOG.info("Testing " + testName);
+    String dbName = testName + "_" + tid;
+
+    run("CREATE DATABASE " + dbName);
     advanceDumpDir();
     run("REPL DUMP " + dbName);
     String lastReplDumpLocn = getResult(0, 0);
@@ -1579,13 +1012,6 @@ public class TestReplicationScenarios {
 
   }
 
-  private static String createDB(String name) {
-    LOG.info("Testing " + name);
-    String dbName = name + "_" + tid;
-    run("CREATE DATABASE " + dbName);
-    return dbName;
-  }
-
   @Test
   public void testEventFilters(){
     // Test testing that the filters introduced by EventUtils are working correctly.
@@ -1605,8 +1031,8 @@ public class TestReplicationScenarios {
     // events to those that match the dbname and tblname provided to the filter.
     // If the tblname passed in to the filter is null, then it restricts itself
     // to dbname-matching alone.
-    IMetaStoreClient.NotificationFilter dbTblFilter = new DatabaseAndTableFilter(dbname,tblname);
-    IMetaStoreClient.NotificationFilter dbFilter = new DatabaseAndTableFilter(dbname,null);
+    IMetaStoreClient.NotificationFilter dbTblFilter = EventUtils.getDbTblNotificationFilter(dbname,tblname);
+    IMetaStoreClient.NotificationFilter dbFilter = EventUtils.getDbTblNotificationFilter(dbname,null);
 
     assertFalse(dbTblFilter.accept(null));
     assertTrue(dbTblFilter.accept(createDummyEvent(dbname, tblname, 0)));
@@ -1623,7 +1049,7 @@ public class TestReplicationScenarios {
     // within a range specified.
     long evBegin = 50;
     long evEnd = 75;
-    IMetaStoreClient.NotificationFilter evRangeFilter = new EventBoundaryFilter(evBegin,evEnd);
+    IMetaStoreClient.NotificationFilter evRangeFilter = EventUtils.getEventBoundaryFilter(evBegin,evEnd);
 
     assertTrue(evBegin < evEnd);
     assertFalse(evRangeFilter.accept(null));
@@ -1639,9 +1065,9 @@ public class TestReplicationScenarios {
     // that match a provided message format
 
     IMetaStoreClient.NotificationFilter restrictByDefaultMessageFormat =
-        new MessageFormatFilter(MessageFactory.getInstance().getMessageFormat());
+        EventUtils.restrictByMessageFormat(MessageFactory.getInstance().getMessageFormat());
     IMetaStoreClient.NotificationFilter restrictByArbitraryMessageFormat =
-        new MessageFormatFilter(MessageFactory.getInstance().getMessageFormat() + "_bogus");
+        EventUtils.restrictByMessageFormat(MessageFactory.getInstance().getMessageFormat() + "_bogus");
     NotificationEvent dummyEvent = createDummyEvent(dbname,tblname,0);
 
     assertEquals(MessageFactory.getInstance().getMessageFormat(),dummyEvent.getMessageFormat());
@@ -1666,19 +1092,21 @@ public class TestReplicationScenarios {
       }
     };
 
-    assertTrue(new AndFilter(yes, yes).accept(dummyEvent));
-    assertFalse(new AndFilter(yes, no).accept(dummyEvent));
-    assertFalse(new AndFilter(no, yes).accept(dummyEvent));
-    assertFalse(new AndFilter(no, no).accept(dummyEvent));
-
-    assertTrue(new AndFilter(yes, yes, yes).accept(dummyEvent));
-    assertFalse(new AndFilter(yes, yes, no).accept(dummyEvent));
-    assertFalse(new AndFilter(yes, no, yes).accept(dummyEvent));
-    assertFalse(new AndFilter(yes, no, no).accept(dummyEvent));
-    assertFalse(new AndFilter(no, yes, yes).accept(dummyEvent));
-    assertFalse(new AndFilter(no, yes, no).accept(dummyEvent));
-    assertFalse(new AndFilter(no, no, yes).accept(dummyEvent));
-    assertFalse(new AndFilter(no, no, no).accept(dummyEvent));
+    assertTrue(EventUtils.andFilter(yes, yes).accept(dummyEvent));
+    assertFalse(EventUtils.andFilter(yes, no).accept(dummyEvent));
+    assertFalse(EventUtils.andFilter(no, yes).accept(dummyEvent));
+    assertFalse(EventUtils.andFilter(no, no).accept(dummyEvent));
+
+    assertTrue(EventUtils.andFilter(yes, yes, yes).accept(dummyEvent));
+    assertFalse(EventUtils.andFilter(yes, yes, no).accept(dummyEvent));
+    assertFalse(EventUtils.andFilter(yes, no, yes).accept(dummyEvent));
+    assertFalse(EventUtils.andFilter(yes, no, no).accept(dummyEvent));
+    assertFalse(EventUtils.andFilter(no, yes, yes).accept(dummyEvent));
+    assertFalse(EventUtils.andFilter(no, yes, no).accept(dummyEvent));
+    assertFalse(EventUtils.andFilter(no, no, yes).accept(dummyEvent));
+    assertFalse(EventUtils.andFilter(no, no, no).accept(dummyEvent));
+
+
   }
 
   private NotificationEvent createDummyEvent(String dbname, String tblname, long evid) {
@@ -1709,7 +1137,7 @@ public class TestReplicationScenarios {
     if (tblName != null){
       verifyRun("REPL STATUS " + dbName + "_dupe." + tblName, lastReplDumpId);
     }
-    assertTrue(Long.parseLong(lastReplDumpId) > Long.parseLong(prevReplDumpId));
+    assertTrue(lastReplDumpId.compareTo(prevReplDumpId) > 0);
     return lastReplDumpId;
   }
 
@@ -1724,7 +1152,7 @@ public class TestReplicationScenarios {
     run("REPL LOAD " + dbName + "_dupe." + tblName + " FROM '" + lastDumpLocn + "'");
     verifyRun("REPL STATUS " + dbName + "_dupe", lastDbReplDumpId);
     verifyRun("REPL STATUS " + dbName + "_dupe." + tblName, lastReplDumpId);
-    assertTrue(Long.parseLong(lastReplDumpId) > Long.parseLong(prevReplDumpId));
+    assertTrue(lastReplDumpId.compareTo(prevReplDumpId) > 0);
     return lastReplDumpId;
   }
 
@@ -1746,25 +1174,18 @@ public class TestReplicationScenarios {
     return (lastResults.get(rowNum).split("\\t"))[colNum];
   }
 
-  /**
-   * All the results that are read from the hive output will not preserve
-   * case sensitivity and will all be in lower case, hence we will check against
-   * only lower case data values.
-   * Unless for Null Values it actually returns in UpperCase and hence explicitly lowering case
-   * before assert.
-   */
   private void verifyResults(String[] data) throws IOException {
     List<String> results = getOutput();
-    LOG.info("Expecting {}", data);
-    LOG.info("Got {}", results);
-    assertEquals(data.length, results.size());
-    for (int i = 0; i < data.length; i++) {
-      assertEquals(data[i].toLowerCase(), results.get(i).toLowerCase());
+    LOG.info("Expecting {}",data);
+    LOG.info("Got {}",results);
+    assertEquals(data.length,results.size());
+    for (int i = 0; i < data.length; i++){
+      assertEquals(data[i],results.get(i));
     }
   }
 
   private List<String> getOutput() throws IOException {
-    List<String> results = new ArrayList<>();
+    List<String> results = new ArrayList<String>();
     try {
       driver.getResults(results);
     } catch (CommandNeedRetryException e) {
@@ -1796,18 +1217,6 @@ public class TestReplicationScenarios {
     verifyResults(data);
   }
 
-  private void verifyFail(String cmd) throws RuntimeException {
-    boolean success = false;
-    try {
-      success = run(cmd,false);
-    } catch (AssertionError ae){
-      LOG.warn("AssertionError:",ae);
-      throw new RuntimeException(ae);
-    }
-
-    assertFalse(success);
-  }
-
   private static void run(String cmd) throws RuntimeException {
     try {
     run(cmd,false); // default arg-less run simply runs, and does not care about failure
@@ -1837,7 +1246,7 @@ public class TestReplicationScenarios {
     return success;
   }
 
-  private static void createTestDataFile(String filename, String[] lines) throws IOException {
+  public static void createTestDataFile(String filename, String[] lines) throws IOException {
     FileWriter writer = null;
     try {
       File file = new File(filename);
@@ -1852,4 +1261,5 @@ public class TestReplicationScenarios {
       }
     }
   }
+
 }


[24/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/hive-txn-schema-2.3.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-txn-schema-2.3.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-txn-schema-2.3.0.postgres.sql
deleted file mode 100644
index 1fa99af..0000000
--- a/metastore/scripts/upgrade/postgres/hive-txn-schema-2.3.0.postgres.sql
+++ /dev/null
@@ -1,133 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
---
--- Tables for transaction management
--- 
-
-CREATE TABLE TXNS (
-  TXN_ID bigint PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED bigint NOT NULL,
-  TXN_LAST_HEARTBEAT bigint NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL,
-  TXN_AGENT_INFO varchar(128),
-  TXN_META_INFO varchar(128),
-  TXN_HEARTBEAT_COUNT integer
-);
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
-  TC_DATABASE varchar(128) NOT NULL,
-  TC_TABLE varchar(128),
-  TC_PARTITION varchar(767) DEFAULT NULL,
-  TC_OPERATION_TYPE char(1) NOT NULL
-);
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID bigint,
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(256),
-  CTC_PARTITION varchar(767)
-);
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID bigint NOT NULL,
-  HL_LOCK_INT_ID bigint NOT NULL,
-  HL_TXNID bigint,
-  HL_DB varchar(128) NOT NULL,
-  HL_TABLE varchar(128),
-  HL_PARTITION varchar(767) DEFAULT NULL,
-  HL_LOCK_STATE char(1) NOT NULL,
-  HL_LOCK_TYPE char(1) NOT NULL,
-  HL_LAST_HEARTBEAT bigint NOT NULL,
-  HL_ACQUIRED_AT bigint,
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  HL_HEARTBEAT_COUNT integer,
-  HL_AGENT_INFO varchar(128),
-  HL_BLOCKEDBY_EXT_ID bigint,
-  HL_BLOCKEDBY_INT_ID bigint,
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
-); 
-
-CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID bigint PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_TBLPROPERTIES varchar(2048),
-  CQ_WORKER_ID varchar(128),
-  CQ_START bigint,
-  CQ_RUN_AS varchar(128),
-  CQ_HIGHEST_TXN_ID bigint,
-  CQ_META_INFO bytea,
-  CQ_HADOOP_JOB_ID varchar(32)
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE COMPLETED_COMPACTIONS (
-  CC_ID bigint PRIMARY KEY,
-  CC_DATABASE varchar(128) NOT NULL,
-  CC_TABLE varchar(128) NOT NULL,
-  CC_PARTITION varchar(767),
-  CC_STATE char(1) NOT NULL,
-  CC_TYPE char(1) NOT NULL,
-  CC_TBLPROPERTIES varchar(2048),
-  CC_WORKER_ID varchar(128),
-  CC_START bigint,
-  CC_END bigint,
-  CC_RUN_AS varchar(128),
-  CC_HIGHEST_TXN_ID bigint,
-  CC_META_INFO bytea,
-  CC_HADOOP_JOB_ID varchar(32)
-);
-
-CREATE TABLE AUX_TABLE (
-  MT_KEY1 varchar(128) NOT NULL,
-  MT_KEY2 bigint NOT NULL,
-  MT_COMMENT varchar(255),
-  PRIMARY KEY(MT_KEY1, MT_KEY2)
-);
-
-CREATE TABLE WRITE_SET (
-  WS_DATABASE varchar(128) NOT NULL,
-  WS_TABLE varchar(128) NOT NULL,
-  WS_PARTITION varchar(767),
-  WS_TXNID bigint NOT NULL,
-  WS_COMMIT_ID bigint NOT NULL,
-  WS_OPERATION_TYPE char(1) NOT NULL
-);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql
deleted file mode 100644
index 1fa99af..0000000
--- a/metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql
+++ /dev/null
@@ -1,133 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
---
--- Tables for transaction management
--- 
-
-CREATE TABLE TXNS (
-  TXN_ID bigint PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED bigint NOT NULL,
-  TXN_LAST_HEARTBEAT bigint NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL,
-  TXN_AGENT_INFO varchar(128),
-  TXN_META_INFO varchar(128),
-  TXN_HEARTBEAT_COUNT integer
-);
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
-  TC_DATABASE varchar(128) NOT NULL,
-  TC_TABLE varchar(128),
-  TC_PARTITION varchar(767) DEFAULT NULL,
-  TC_OPERATION_TYPE char(1) NOT NULL
-);
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID bigint,
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(256),
-  CTC_PARTITION varchar(767)
-);
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID bigint NOT NULL,
-  HL_LOCK_INT_ID bigint NOT NULL,
-  HL_TXNID bigint,
-  HL_DB varchar(128) NOT NULL,
-  HL_TABLE varchar(128),
-  HL_PARTITION varchar(767) DEFAULT NULL,
-  HL_LOCK_STATE char(1) NOT NULL,
-  HL_LOCK_TYPE char(1) NOT NULL,
-  HL_LAST_HEARTBEAT bigint NOT NULL,
-  HL_ACQUIRED_AT bigint,
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  HL_HEARTBEAT_COUNT integer,
-  HL_AGENT_INFO varchar(128),
-  HL_BLOCKEDBY_EXT_ID bigint,
-  HL_BLOCKEDBY_INT_ID bigint,
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
-); 
-
-CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID bigint PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_TBLPROPERTIES varchar(2048),
-  CQ_WORKER_ID varchar(128),
-  CQ_START bigint,
-  CQ_RUN_AS varchar(128),
-  CQ_HIGHEST_TXN_ID bigint,
-  CQ_META_INFO bytea,
-  CQ_HADOOP_JOB_ID varchar(32)
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE COMPLETED_COMPACTIONS (
-  CC_ID bigint PRIMARY KEY,
-  CC_DATABASE varchar(128) NOT NULL,
-  CC_TABLE varchar(128) NOT NULL,
-  CC_PARTITION varchar(767),
-  CC_STATE char(1) NOT NULL,
-  CC_TYPE char(1) NOT NULL,
-  CC_TBLPROPERTIES varchar(2048),
-  CC_WORKER_ID varchar(128),
-  CC_START bigint,
-  CC_END bigint,
-  CC_RUN_AS varchar(128),
-  CC_HIGHEST_TXN_ID bigint,
-  CC_META_INFO bytea,
-  CC_HADOOP_JOB_ID varchar(32)
-);
-
-CREATE TABLE AUX_TABLE (
-  MT_KEY1 varchar(128) NOT NULL,
-  MT_KEY2 bigint NOT NULL,
-  MT_COMMENT varchar(255),
-  PRIMARY KEY(MT_KEY1, MT_KEY2)
-);
-
-CREATE TABLE WRITE_SET (
-  WS_DATABASE varchar(128) NOT NULL,
-  WS_TABLE varchar(128) NOT NULL,
-  WS_PARTITION varchar(767),
-  WS_TXNID bigint NOT NULL,
-  WS_COMMIT_ID bigint NOT NULL,
-  WS_OPERATION_TYPE char(1) NOT NULL
-);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
index e6daeca..ae4adf7 100644
--- a/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
@@ -3,7 +3,6 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0';
 \i 036-HIVE-14496.postgres.sql;
 \i 037-HIVE-14637.postgres.sql;
 \i 037-HIVE-10562.postgres.sql;
-\i 038-HIVE-12274.postgres.sql;
 
 UPDATE "VERSION" SET "SCHEMA_VERSION"='2.2.0', "VERSION_COMMENT"='Hive release version 2.2.0' where "VER_ID"=1;
 SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0';

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/upgrade-2.2.0-to-2.3.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/upgrade-2.2.0-to-2.3.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-2.2.0-to-2.3.0.postgres.sql
deleted file mode 100644
index 77358df..0000000
--- a/metastore/scripts/upgrade/postgres/upgrade-2.2.0-to-2.3.0.postgres.sql
+++ /dev/null
@@ -1,7 +0,0 @@
-SELECT 'Upgrading MetaStore schema from 2.2.0 to 2.3.0';
-
-\i 039-HIVE-16399.postgres.sql;
-
-UPDATE "VERSION" SET "SCHEMA_VERSION"='2.3.0', "VERSION_COMMENT"='Hive release version 2.3.0' where "VER_ID"=1;
-SELECT 'Finished upgrading MetaStore schema from 2.2.0 to 2.3.0';
-

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
deleted file mode 100644
index 2dd9bb9..0000000
--- a/metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
+++ /dev/null
@@ -1,5 +0,0 @@
-SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0';
-
-UPDATE "VERSION" SET "SCHEMA_VERSION"='3.0.0', "VERSION_COMMENT"='Hive release version 3.0.0' where "VER_ID"=1;
-SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0';
-

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/upgrade.order.postgres
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/upgrade.order.postgres b/metastore/scripts/upgrade/postgres/upgrade.order.postgres
index d7091b5..420174a 100644
--- a/metastore/scripts/upgrade/postgres/upgrade.order.postgres
+++ b/metastore/scripts/upgrade/postgres/upgrade.order.postgres
@@ -12,5 +12,3 @@
 1.2.0-to-2.0.0
 2.0.0-to-2.1.0
 2.1.0-to-2.2.0
-2.2.0-to-2.3.0
-2.3.0-to-3.0.0


[06/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java
index 82e8748..f68228c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapperBatch.java
@@ -18,15 +18,11 @@
 
 package org.apache.hadoop.hive.ql.exec.vector;
 
-import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapper.EmptyVectorHashKeyWrapper;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.hive.ql.exec.vector.ColumnVector.Type;
 
 /**
  * Class for handling vectorized hash map key wrappers. It evaluates the key columns in a
@@ -63,11 +59,6 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    */
   private int keysFixedSize;
 
-  /**
-   * Shared hashcontext for all keys in this batch
-   */
-  private final VectorHashKeyWrapper.HashContext hashCtx = new VectorHashKeyWrapper.HashContext();
-
    /**
    * Returns the compiled fixed size for the key wrappers.
    * @return
@@ -94,21 +85,12 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * @throws HiveException
    */
   public void evaluateBatch(VectorizedRowBatch batch) throws HiveException {
-
-    if (keyCount == 0) {
-      // all keywrappers must be EmptyVectorHashKeyWrapper
-      return;
+    for(int i = 0; i < keyExpressions.length; ++i) {
+      keyExpressions[i].evaluate(batch);
     }
-
-    for(int i=0;i<batch.size;++i) {
-      vectorHashKeyWrappers[i].clearIsNull();
-    }
-
-    int keyIndex;
-    int columnIndex;
     for(int i = 0; i< longIndices.length; ++i) {
-      keyIndex = longIndices[i];
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
+      int keyIndex = longIndices[i];
+      int columnIndex = keyExpressions[keyIndex].getOutputColumn();
       LongColumnVector columnVector = (LongColumnVector) batch.cols[columnIndex];
       if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
         assignLongNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
@@ -117,11 +99,11 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       } else if (columnVector.noNulls && columnVector.isRepeating) {
         assignLongNoNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignLongNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
+        assignLongNullsNoRepeatingNoSelection(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignLongNullsRepeating(keyIndex, i, batch.size, columnVector);
+        assignLongNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignLongNullsNoRepeatingSelection (keyIndex, i, batch.size, columnVector, batch.selected);
+        assignLongNullsNoRepeatingSelection (i, batch.size, columnVector, batch.selected);
       } else {
         throw new HiveException (String.format(
             "Unimplemented Long null/repeat/selected combination %b/%b/%b",
@@ -129,8 +111,8 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<doubleIndices.length; ++i) {
-      keyIndex = doubleIndices[i];
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
+      int keyIndex = doubleIndices[i];
+      int columnIndex = keyExpressions[keyIndex].getOutputColumn();
       DoubleColumnVector columnVector = (DoubleColumnVector) batch.cols[columnIndex];
       if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
         assignDoubleNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
@@ -139,11 +121,11 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       } else if (columnVector.noNulls && columnVector.isRepeating) {
         assignDoubleNoNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignDoubleNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
+        assignDoubleNullsNoRepeatingNoSelection(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignDoubleNullsRepeating(keyIndex, i, batch.size, columnVector);
+        assignDoubleNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignDoubleNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
+        assignDoubleNullsNoRepeatingSelection (i, batch.size, columnVector, batch.selected);
       } else {
         throw new HiveException (String.format(
             "Unimplemented Double null/repeat/selected combination %b/%b/%b",
@@ -151,8 +133,8 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<stringIndices.length; ++i) {
-      keyIndex = stringIndices[i];
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
+      int keyIndex = stringIndices[i];
+      int columnIndex = keyExpressions[keyIndex].getOutputColumn();
       BytesColumnVector columnVector = (BytesColumnVector) batch.cols[columnIndex];
       if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
         assignStringNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
@@ -161,11 +143,11 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       } else if (columnVector.noNulls && columnVector.isRepeating) {
         assignStringNoNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignStringNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
+        assignStringNullsNoRepeatingNoSelection(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignStringNullsRepeating(keyIndex, i, batch.size, columnVector);
+        assignStringNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignStringNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
+        assignStringNullsNoRepeatingSelection (i, batch.size, columnVector, batch.selected);
       } else {
         throw new HiveException (String.format(
             "Unimplemented String null/repeat/selected combination %b/%b/%b",
@@ -173,8 +155,8 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<decimalIndices.length; ++i) {
-      keyIndex = decimalIndices[i];
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
+      int keyIndex = decimalIndices[i];
+      int columnIndex = keyExpressions[keyIndex].getOutputColumn();
       DecimalColumnVector columnVector = (DecimalColumnVector) batch.cols[columnIndex];
       if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
         assignDecimalNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
@@ -183,11 +165,11 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       } else if (columnVector.noNulls && columnVector.isRepeating) {
         assignDecimalNoNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignDecimalNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
+        assignDecimalNullsNoRepeatingNoSelection(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignDecimalNullsRepeating(keyIndex, i, batch.size, columnVector);
+        assignDecimalNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignDecimalNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
+        assignDecimalNullsNoRepeatingSelection (i, batch.size, columnVector, batch.selected);
       } else {
         throw new HiveException (String.format(
             "Unimplemented Decimal null/repeat/selected combination %b/%b/%b",
@@ -195,8 +177,8 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<timestampIndices.length; ++i) {
-      keyIndex = timestampIndices[i];
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
+      int keyIndex = timestampIndices[i];
+      int columnIndex = keyExpressions[keyIndex].getOutputColumn();
       TimestampColumnVector columnVector = (TimestampColumnVector) batch.cols[columnIndex];
       if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
         assignTimestampNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
@@ -205,11 +187,11 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       } else if (columnVector.noNulls && columnVector.isRepeating) {
         assignTimestampNoNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignTimestampNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
+        assignTimestampNullsNoRepeatingNoSelection(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignTimestampNullsRepeating(keyIndex, i, batch.size, columnVector);
+        assignTimestampNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignTimestampNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
+        assignTimestampNullsNoRepeatingSelection (i, batch.size, columnVector, batch.selected);
       } else {
         throw new HiveException (String.format(
             "Unimplemented timestamp null/repeat/selected combination %b/%b/%b",
@@ -217,8 +199,8 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<intervalDayTimeIndices.length; ++i) {
-      keyIndex = intervalDayTimeIndices[i];
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
+      int keyIndex = intervalDayTimeIndices[i];
+      int columnIndex = keyExpressions[keyIndex].getOutputColumn();
       IntervalDayTimeColumnVector columnVector = (IntervalDayTimeColumnVector) batch.cols[columnIndex];
       if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
         assignIntervalDayTimeNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
@@ -227,198 +209,11 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       } else if (columnVector.noNulls && columnVector.isRepeating) {
         assignIntervalDayTimeNoNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignIntervalDayTimeNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
+        assignIntervalDayTimeNullsNoRepeatingNoSelection(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignIntervalDayTimeNullsRepeating(keyIndex, i, batch.size, columnVector);
+        assignIntervalDayTimeNullsRepeating(i, batch.size, columnVector);
       } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignIntervalDayTimeNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
-      } else {
-        throw new HiveException (String.format(
-            "Unimplemented intervalDayTime null/repeat/selected combination %b/%b/%b",
-            columnVector.noNulls, columnVector.isRepeating, batch.selectedInUse));
-      }
-    }
-    for(int i=0;i<batch.size;++i) {
-      vectorHashKeyWrappers[i].setHashKey();
-    }
-  }
-
-  public void evaluateBatchGroupingSets(VectorizedRowBatch batch,
-      boolean[] groupingSetsOverrideIsNulls) throws HiveException {
-
-    for(int i=0;i<batch.size;++i) {
-      vectorHashKeyWrappers[i].clearIsNull();
-    }
-    int keyIndex;
-    int columnIndex;
-    for(int i = 0; i< longIndices.length; ++i) {
-      keyIndex = longIndices[i];
-      if (groupingSetsOverrideIsNulls[keyIndex]) {
-        final int batchSize = batch.size;
-        for(int r = 0; r < batchSize; ++r) {
-          vectorHashKeyWrappers[r].assignNullLong(keyIndex, i);
-        }
-        continue;
-      }
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
-      LongColumnVector columnVector = (LongColumnVector) batch.cols[columnIndex];
-      if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignLongNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
-      } else if (columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignLongNoNullsNoRepeatingSelection(i, batch.size, columnVector, batch.selected);
-      } else if (columnVector.noNulls && columnVector.isRepeating) {
-        assignLongNoNullsRepeating(i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignLongNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignLongNullsRepeating(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignLongNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
-      } else {
-        throw new HiveException (String.format(
-            "Unimplemented Long null/repeat/selected combination %b/%b/%b",
-            columnVector.noNulls, columnVector.isRepeating, batch.selectedInUse));
-      }
-    }
-    for(int i=0;i<doubleIndices.length; ++i) {
-      keyIndex = doubleIndices[i];
-      if (groupingSetsOverrideIsNulls[keyIndex]) {
-        final int batchSize = batch.size;
-        for(int r = 0; r < batchSize; ++r) {
-          vectorHashKeyWrappers[r].assignNullDouble(keyIndex, i);
-        }
-        continue;
-      }
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
-      DoubleColumnVector columnVector = (DoubleColumnVector) batch.cols[columnIndex];
-      if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignDoubleNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
-      } else if (columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignDoubleNoNullsNoRepeatingSelection(i, batch.size, columnVector, batch.selected);
-      } else if (columnVector.noNulls && columnVector.isRepeating) {
-        assignDoubleNoNullsRepeating(i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignDoubleNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignDoubleNullsRepeating(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignDoubleNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
-      } else {
-        throw new HiveException (String.format(
-            "Unimplemented Double null/repeat/selected combination %b/%b/%b",
-            columnVector.noNulls, columnVector.isRepeating, batch.selectedInUse));
-      }
-    }
-    for(int i=0;i<stringIndices.length; ++i) {
-      keyIndex = stringIndices[i];
-      if (groupingSetsOverrideIsNulls[keyIndex]) {
-        final int batchSize = batch.size;
-        for(int r = 0; r < batchSize; ++r) {
-          vectorHashKeyWrappers[r].assignNullString(keyIndex, i);
-        }
-        continue;
-      }
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
-      BytesColumnVector columnVector = (BytesColumnVector) batch.cols[columnIndex];
-      if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignStringNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
-      } else if (columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignStringNoNullsNoRepeatingSelection(i, batch.size, columnVector, batch.selected);
-      } else if (columnVector.noNulls && columnVector.isRepeating) {
-        assignStringNoNullsRepeating(i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignStringNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignStringNullsRepeating(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignStringNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
-      } else {
-        throw new HiveException (String.format(
-            "Unimplemented String null/repeat/selected combination %b/%b/%b",
-            columnVector.noNulls, columnVector.isRepeating, batch.selectedInUse));
-      }
-    }
-    for(int i=0;i<decimalIndices.length; ++i) {
-      keyIndex = decimalIndices[i];
-      if (groupingSetsOverrideIsNulls[keyIndex]) {
-        final int batchSize = batch.size;
-        for(int r = 0; r < batchSize; ++r) {
-          vectorHashKeyWrappers[r].assignNullDecimal(keyIndex, i);
-        }
-        continue;
-      }
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
-      DecimalColumnVector columnVector = (DecimalColumnVector) batch.cols[columnIndex];
-      if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignDecimalNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
-      } else if (columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignDecimalNoNullsNoRepeatingSelection(i, batch.size, columnVector, batch.selected);
-      } else if (columnVector.noNulls && columnVector.isRepeating) {
-        assignDecimalNoNullsRepeating(i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignDecimalNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignDecimalNullsRepeating(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignDecimalNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
-      } else {
-        throw new HiveException (String.format(
-            "Unimplemented Decimal null/repeat/selected combination %b/%b/%b",
-            columnVector.noNulls, columnVector.isRepeating, batch.selectedInUse));
-      }
-    }
-    for(int i=0;i<timestampIndices.length; ++i) {
-      keyIndex = timestampIndices[i];
-      if (groupingSetsOverrideIsNulls[keyIndex]) {
-        final int batchSize = batch.size;
-        for(int r = 0; r < batchSize; ++r) {
-          vectorHashKeyWrappers[r].assignNullTimestamp(keyIndex, i);
-        }
-        continue;
-      }
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
-      TimestampColumnVector columnVector = (TimestampColumnVector) batch.cols[columnIndex];
-      if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignTimestampNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
-      } else if (columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignTimestampNoNullsNoRepeatingSelection(i, batch.size, columnVector, batch.selected);
-      } else if (columnVector.noNulls && columnVector.isRepeating) {
-        assignTimestampNoNullsRepeating(i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignTimestampNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignTimestampNullsRepeating(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignTimestampNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
-      } else {
-        throw new HiveException (String.format(
-            "Unimplemented timestamp null/repeat/selected combination %b/%b/%b",
-            columnVector.noNulls, columnVector.isRepeating, batch.selectedInUse));
-      }
-    }
-    for(int i=0;i<intervalDayTimeIndices.length; ++i) {
-      keyIndex = intervalDayTimeIndices[i];
-      if (groupingSetsOverrideIsNulls[keyIndex]) {
-        final int batchSize = batch.size;
-        for(int r = 0; r < batchSize; ++r) {
-          vectorHashKeyWrappers[r].assignNullIntervalDayTime(keyIndex, i);
-        }
-        continue;
-      }
-      columnIndex = keyExpressions[keyIndex].getOutputColumn();
-      IntervalDayTimeColumnVector columnVector = (IntervalDayTimeColumnVector) batch.cols[columnIndex];
-      if (columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignIntervalDayTimeNoNullsNoRepeatingNoSelection(i, batch.size, columnVector);
-      } else if (columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignIntervalDayTimeNoNullsNoRepeatingSelection(i, batch.size, columnVector, batch.selected);
-      } else if (columnVector.noNulls && columnVector.isRepeating) {
-        assignIntervalDayTimeNoNullsRepeating(i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && !batch.selectedInUse) {
-        assignIntervalDayTimeNullsNoRepeatingNoSelection(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && columnVector.isRepeating) {
-        assignIntervalDayTimeNullsRepeating(keyIndex, i, batch.size, columnVector);
-      } else if (!columnVector.noNulls && !columnVector.isRepeating && batch.selectedInUse) {
-        assignIntervalDayTimeNullsNoRepeatingSelection(keyIndex, i, batch.size, columnVector, batch.selected);
+        assignIntervalDayTimeNullsNoRepeatingSelection (i, batch.size, columnVector, batch.selected);
       } else {
         throw new HiveException (String.format(
             "Unimplemented intervalDayTime null/repeat/selected combination %b/%b/%b",
@@ -434,15 +229,14 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for string type, possible nulls, no repeat values, batch selection vector.
    */
-  private void assignStringNullsNoRepeatingSelection(int keyIndex, int index, int size,
+  private void assignStringNullsNoRepeatingSelection(int index, int size,
       BytesColumnVector columnVector, int[] selected) {
     for(int i=0; i<size; ++i) {
       int row = selected[i];
       if (columnVector.isNull[row]) {
-        vectorHashKeyWrappers[i].assignNullString(keyIndex, index);
+        vectorHashKeyWrappers[i].assignNullString(index);
       } else {
-        vectorHashKeyWrappers[i].assignString(
-            index,
+        vectorHashKeyWrappers[i].assignString(index,
             columnVector.vector[row],
             columnVector.start[row],
             columnVector.length[row]);
@@ -454,15 +248,14 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for double type, possible nulls, repeat values.
    */
-  private void assignStringNullsRepeating(int keyIndex, int index, int size, BytesColumnVector columnVector) {
+  private void assignStringNullsRepeating(int index, int size, BytesColumnVector columnVector) {
     if (columnVector.isNull[0]) {
       for(int i = 0; i < size; ++i) {
-        vectorHashKeyWrappers[i].assignNullString(keyIndex, index);
+        vectorHashKeyWrappers[i].assignNullString(index);
       }
     } else {
       for(int i = 0; i < size; ++i) {
-        vectorHashKeyWrappers[i].assignString(
-            index,
+        vectorHashKeyWrappers[i].assignString(index,
             columnVector.vector[0],
             columnVector.start[0],
             columnVector.length[0]);
@@ -474,14 +267,13 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for string type, possible nulls, no repeat values, no selection vector.
    */
-  private void assignStringNullsNoRepeatingNoSelection(int keyIndex, int index, int size,
+  private void assignStringNullsNoRepeatingNoSelection(int index, int size,
       BytesColumnVector columnVector) {
     for(int i=0; i<size; ++i) {
       if (columnVector.isNull[i]) {
-        vectorHashKeyWrappers[i].assignNullString(keyIndex, index);
+        vectorHashKeyWrappers[i].assignNullString(index);
       } else {
-        vectorHashKeyWrappers[i].assignString(
-            index,
+        vectorHashKeyWrappers[i].assignString(index,
             columnVector.vector[i],
             columnVector.start[i],
             columnVector.length[i]);
@@ -493,11 +285,9 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for double type, no nulls, repeat values, no selection vector.
    */
-  private void assignStringNoNullsRepeating(int index, int size,
-      BytesColumnVector columnVector) {
+  private void assignStringNoNullsRepeating(int index, int size, BytesColumnVector columnVector) {
     for(int i = 0; i < size; ++i) {
-      vectorHashKeyWrappers[i].assignString(
-          index,
+      vectorHashKeyWrappers[i].assignString(index,
           columnVector.vector[0],
           columnVector.start[0],
           columnVector.length[0]);
@@ -512,8 +302,7 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
       BytesColumnVector columnVector, int[] selected) {
     for(int i=0; i<size; ++i) {
       int row = selected[i];
-      vectorHashKeyWrappers[i].assignString(
-          index,
+      vectorHashKeyWrappers[i].assignString(index,
           columnVector.vector[row],
           columnVector.start[row],
           columnVector.length[row]);
@@ -527,8 +316,7 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
   private void assignStringNoNullsNoRepeatingNoSelection(int index, int size,
       BytesColumnVector columnVector) {
     for(int i=0; i<size; ++i) {
-      vectorHashKeyWrappers[i].assignString(
-          index,
+      vectorHashKeyWrappers[i].assignString(index,
           columnVector.vector[i],
           columnVector.start[i],
           columnVector.length[i]);
@@ -539,14 +327,14 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for double type, possible nulls, no repeat values, batch selection vector.
    */
-  private void assignDoubleNullsNoRepeatingSelection(int keyIndex, int index, int size,
+  private void assignDoubleNullsNoRepeatingSelection(int index, int size,
       DoubleColumnVector columnVector, int[] selected) {
     for(int i = 0; i < size; ++i) {
       int row = selected[i];
       if (!columnVector.isNull[row]) {
         vectorHashKeyWrappers[i].assignDouble(index, columnVector.vector[row]);
       } else {
-        vectorHashKeyWrappers[i].assignNullDouble(keyIndex, index);
+        vectorHashKeyWrappers[i].assignNullDouble(index);
       }
     }
   }
@@ -555,10 +343,10 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for Double type, repeat null values.
    */
-  private void assignDoubleNullsRepeating(int keyIndex, int index, int size,
+  private void assignDoubleNullsRepeating(int index, int size,
       DoubleColumnVector columnVector) {
     for(int r = 0; r < size; ++r) {
-      vectorHashKeyWrappers[r].assignNullDouble(keyIndex, index);
+      vectorHashKeyWrappers[r].assignNullDouble(index);
     }
   }
 
@@ -566,13 +354,13 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for Double type, possible nulls, repeat values.
    */
-  private void assignDoubleNullsNoRepeatingNoSelection(int keyIndex, int index, int size,
+  private void assignDoubleNullsNoRepeatingNoSelection(int index, int size,
       DoubleColumnVector columnVector) {
     for(int r = 0; r < size; ++r) {
       if (!columnVector.isNull[r]) {
         vectorHashKeyWrappers[r].assignDouble(index, columnVector.vector[r]);
       } else {
-        vectorHashKeyWrappers[r].assignNullDouble(keyIndex, index);
+        vectorHashKeyWrappers[r].assignNullDouble(index);
       }
     }
   }
@@ -613,14 +401,14 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for double type, possible nulls, no repeat values, batch selection vector.
    */
-  private void assignLongNullsNoRepeatingSelection(int keyIndex, int index, int size,
+  private void assignLongNullsNoRepeatingSelection(int index, int size,
       LongColumnVector columnVector, int[] selected) {
     for(int i = 0; i < size; ++i) {
       int row = selected[i];
       if (!columnVector.isNull[row]) {
         vectorHashKeyWrappers[i].assignLong(index, columnVector.vector[row]);
       } else {
-        vectorHashKeyWrappers[i].assignNullLong(keyIndex, index);
+        vectorHashKeyWrappers[i].assignNullLong(index);
       }
     }
   }
@@ -629,10 +417,10 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for double type, repeating nulls.
    */
-  private void assignLongNullsRepeating(int keyIndex, int index, int size,
+  private void assignLongNullsRepeating(int index, int size,
       LongColumnVector columnVector) {
     for(int r = 0; r < size; ++r) {
-      vectorHashKeyWrappers[r].assignNullLong(keyIndex, index);
+      vectorHashKeyWrappers[r].assignNullLong(index);
     }
   }
 
@@ -640,13 +428,13 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for double type, possible nulls, no repeat values, no selection vector.
    */
-  private void assignLongNullsNoRepeatingNoSelection(int keyIndex, int index, int size,
+  private void assignLongNullsNoRepeatingNoSelection(int index, int size,
       LongColumnVector columnVector) {
     for(int r = 0; r < size; ++r) {
       if (!columnVector.isNull[r]) {
         vectorHashKeyWrappers[r].assignLong(index, columnVector.vector[r]);
       } else {
-        vectorHashKeyWrappers[r].assignNullLong(keyIndex, index);
+        vectorHashKeyWrappers[r].assignNullLong(index);
       }
     }
   }
@@ -687,14 +475,14 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for Decimal type, possible nulls, no repeat values, batch selection vector.
    */
-  private void assignDecimalNullsNoRepeatingSelection(int keyIndex, int index, int size,
+  private void assignDecimalNullsNoRepeatingSelection(int index, int size,
       DecimalColumnVector columnVector, int[] selected) {
     for(int i = 0; i < size; ++i) {
       int row = selected[i];
       if (!columnVector.isNull[row]) {
         vectorHashKeyWrappers[i].assignDecimal(index, columnVector.vector[row]);
       } else {
-        vectorHashKeyWrappers[i].assignNullDecimal(keyIndex, index);
+        vectorHashKeyWrappers[i].assignNullDecimal(index);
       }
     }
   }
@@ -703,10 +491,10 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for Decimal type, repeat null values.
    */
-  private void assignDecimalNullsRepeating(int keyIndex, int index, int size,
+  private void assignDecimalNullsRepeating(int index, int size,
       DecimalColumnVector columnVector) {
     for(int r = 0; r < size; ++r) {
-      vectorHashKeyWrappers[r].assignNullDecimal(keyIndex, index);
+      vectorHashKeyWrappers[r].assignNullDecimal(index);
     }
   }
 
@@ -714,13 +502,13 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for Decimal type, possible nulls, repeat values.
    */
-  private void assignDecimalNullsNoRepeatingNoSelection(int keyIndex, int index, int size,
+  private void assignDecimalNullsNoRepeatingNoSelection(int index, int size,
       DecimalColumnVector columnVector) {
     for(int r = 0; r < size; ++r) {
       if (!columnVector.isNull[r]) {
         vectorHashKeyWrappers[r].assignDecimal(index, columnVector.vector[r]);
       } else {
-        vectorHashKeyWrappers[r].assignNullDecimal(keyIndex, index);
+        vectorHashKeyWrappers[r].assignNullDecimal(index);
       }
     }
   }
@@ -761,14 +549,14 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for Timestamp type, possible nulls, no repeat values, batch selection vector.
    */
-  private void assignTimestampNullsNoRepeatingSelection(int keyIndex, int index, int size,
+  private void assignTimestampNullsNoRepeatingSelection(int index, int size,
       TimestampColumnVector columnVector, int[] selected) {
     for(int i = 0; i < size; ++i) {
       int row = selected[i];
       if (!columnVector.isNull[row]) {
         vectorHashKeyWrappers[i].assignTimestamp(index, columnVector, row);
       } else {
-        vectorHashKeyWrappers[i].assignNullTimestamp(keyIndex, index);
+        vectorHashKeyWrappers[i].assignNullTimestamp(index);
       }
     }
   }
@@ -777,10 +565,10 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for Timestamp type, repeat null values.
    */
-  private void assignTimestampNullsRepeating(int keyIndex, int index, int size,
+  private void assignTimestampNullsRepeating(int index, int size,
       TimestampColumnVector columnVector) {
     for(int r = 0; r < size; ++r) {
-      vectorHashKeyWrappers[r].assignNullTimestamp(keyIndex, index);
+      vectorHashKeyWrappers[r].assignNullTimestamp(index);
     }
   }
 
@@ -788,13 +576,13 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for Timestamp type, possible nulls, repeat values.
    */
-  private void assignTimestampNullsNoRepeatingNoSelection(int keyIndex, int index, int size,
+  private void assignTimestampNullsNoRepeatingNoSelection(int index, int size,
       TimestampColumnVector columnVector) {
     for(int r = 0; r < size; ++r) {
       if (!columnVector.isNull[r]) {
         vectorHashKeyWrappers[r].assignTimestamp(index, columnVector, r);
       } else {
-        vectorHashKeyWrappers[r].assignNullTimestamp(keyIndex, index);
+        vectorHashKeyWrappers[r].assignNullTimestamp(index);
       }
     }
   }
@@ -835,14 +623,14 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for IntervalDayTime type, possible nulls, no repeat values, batch selection vector.
    */
-  private void assignIntervalDayTimeNullsNoRepeatingSelection(int keyIndex, int index, int size,
+  private void assignIntervalDayTimeNullsNoRepeatingSelection(int index, int size,
       IntervalDayTimeColumnVector columnVector, int[] selected) {
     for(int i = 0; i < size; ++i) {
       int row = selected[i];
       if (!columnVector.isNull[row]) {
         vectorHashKeyWrappers[i].assignIntervalDayTime(index, columnVector, row);
       } else {
-        vectorHashKeyWrappers[i].assignNullIntervalDayTime(keyIndex, index);
+        vectorHashKeyWrappers[i].assignNullIntervalDayTime(index);
       }
     }
   }
@@ -851,10 +639,10 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for IntervalDayTime type, repeat null values.
    */
-  private void assignIntervalDayTimeNullsRepeating(int keyIndex, int index, int size,
+  private void assignIntervalDayTimeNullsRepeating(int index, int size,
       IntervalDayTimeColumnVector columnVector) {
     for(int r = 0; r < size; ++r) {
-      vectorHashKeyWrappers[r].assignNullIntervalDayTime(keyIndex, index);
+      vectorHashKeyWrappers[r].assignNullIntervalDayTime(index);
     }
   }
 
@@ -862,13 +650,13 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
    * Helper method to assign values from a vector column into the key wrapper.
    * Optimized for IntervalDayTime type, possible nulls, repeat values.
    */
-  private void assignIntervalDayTimeNullsNoRepeatingNoSelection(int keyIndex, int index, int size,
+  private void assignIntervalDayTimeNullsNoRepeatingNoSelection(int index, int size,
       IntervalDayTimeColumnVector columnVector) {
     for(int r = 0; r < size; ++r) {
       if (!columnVector.isNull[r]) {
         vectorHashKeyWrappers[r].assignIntervalDayTime(index, columnVector, r);
       } else {
-        vectorHashKeyWrappers[r].assignNullIntervalDayTime(keyIndex, index);
+        vectorHashKeyWrappers[r].assignNullIntervalDayTime(index);
       }
     }
   }
@@ -905,28 +693,13 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
     }
   }
 
-  public static VectorHashKeyWrapperBatch compileKeyWrapperBatch(VectorExpression[] keyExpressions)
-      throws HiveException
-  {
-
-    final int size = keyExpressions.length;
-    ColumnVector.Type[] columnVectorTypes = new ColumnVector.Type[size];
-    for (int i = 0; i < size; i++) {
-      String typeName = VectorizationContext.mapTypeNameSynonyms(keyExpressions[i].getOutputType());
-      TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName);
-      columnVectorTypes[i] = VectorizationContext.getColumnVectorTypeFromTypeInfo(typeInfo);
-    }
-    return compileKeyWrapperBatch(keyExpressions, columnVectorTypes);
-  }
-
   /**
    * Prepares a VectorHashKeyWrapperBatch to work for a specific set of keys.
    * Computes the fast access lookup indices, preallocates all needed internal arrays.
    * This step is done only once per query, not once per batch. The information computed now
    * will be used to generate proper individual VectorKeyHashWrapper objects.
    */
-  public static VectorHashKeyWrapperBatch compileKeyWrapperBatch(VectorExpression[] keyExpressions,
-      ColumnVector.Type[] columnVectorTypes)
+  public static VectorHashKeyWrapperBatch compileKeyWrapperBatch(VectorExpression[] keyExpressions)
     throws HiveException {
     VectorHashKeyWrapperBatch compiledKeyWrapperBatch = new VectorHashKeyWrapperBatch(keyExpressions.length);
     compiledKeyWrapperBatch.keyExpressions = keyExpressions;
@@ -934,8 +707,8 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
     compiledKeyWrapperBatch.keysFixedSize = 0;
 
     // Inspect the output type of each key expression.
-    for(int i=0; i < columnVectorTypes.length; ++i) {
-      compiledKeyWrapperBatch.addKey(columnVectorTypes[i]);
+    for(int i=0; i < keyExpressions.length; ++i) {
+      compiledKeyWrapperBatch.addKey(keyExpressions[i].getOutputType());
     }
     compiledKeyWrapperBatch.finishAdding();
 
@@ -971,54 +744,49 @@ public class VectorHashKeyWrapperBatch extends VectorColumnSetInfo {
   }
 
   public VectorHashKeyWrapper allocateKeyWrapper() {
-    return VectorHashKeyWrapper.allocate(hashCtx,
-        longIndices.length,
-        doubleIndices.length,
-        stringIndices.length,
-        decimalIndices.length,
-        timestampIndices.length,
-        intervalDayTimeIndices.length,
-        keyCount);
+    return VectorHashKeyWrapper.allocate(longIndices.length, doubleIndices.length,
+        stringIndices.length, decimalIndices.length, timestampIndices.length,
+        intervalDayTimeIndices.length);
   }
 
   /**
    * Get the row-mode writable object value of a key from a key wrapper
    * @param keyOutputWriter
    */
-  public Object getWritableKeyValue(VectorHashKeyWrapper kw, int keyIndex,
+  public Object getWritableKeyValue(VectorHashKeyWrapper kw, int i,
       VectorExpressionWriter keyOutputWriter)
     throws HiveException {
 
-    if (kw.isNull(keyIndex)) {
-      return null;
-    }
-
-    ColumnVector.Type columnVectorType = columnVectorTypes[keyIndex];
-    int columnTypeSpecificIndex = columnTypeSpecificIndices[keyIndex];
-
-    switch (columnVectorType) {
-    case LONG:
-      return keyOutputWriter.writeValue(
-          kw.getLongValue(columnTypeSpecificIndex));
-    case DOUBLE:
-      return keyOutputWriter.writeValue(
-          kw.getDoubleValue(columnTypeSpecificIndex));
-    case BYTES:
-      return keyOutputWriter.writeValue(
-          kw.getBytes(columnTypeSpecificIndex),
-          kw.getByteStart(columnTypeSpecificIndex),
-          kw.getByteLength(columnTypeSpecificIndex));
-    case DECIMAL:
-      return keyOutputWriter.writeValue(
-          kw.getDecimal(columnTypeSpecificIndex));
-    case TIMESTAMP:
-      return keyOutputWriter.writeValue(
-          kw.getTimestamp(columnTypeSpecificIndex));
-    case INTERVAL_DAY_TIME:
-      return keyOutputWriter.writeValue(
-          kw.getIntervalDayTime(columnTypeSpecificIndex));
-    default:
-      throw new HiveException("Unexpected column vector type " + columnVectorType);
+    KeyLookupHelper klh = indexLookup[i];
+    if (klh.longIndex >= 0) {
+      return kw.getIsLongNull(klh.longIndex) ? null :
+        keyOutputWriter.writeValue(kw.getLongValue(klh.longIndex));
+    } else if (klh.doubleIndex >= 0) {
+      return kw.getIsDoubleNull(klh.doubleIndex) ? null :
+          keyOutputWriter.writeValue(kw.getDoubleValue(klh.doubleIndex));
+    } else if (klh.stringIndex >= 0) {
+      return kw.getIsBytesNull(klh.stringIndex) ? null :
+          keyOutputWriter.writeValue(
+              kw.getBytes(klh.stringIndex),
+                kw.getByteStart(klh.stringIndex),
+                kw.getByteLength(klh.stringIndex));
+    } else if (klh.decimalIndex >= 0) {
+      return kw.getIsDecimalNull(klh.decimalIndex)? null :
+          keyOutputWriter.writeValue(
+                kw.getDecimal(klh.decimalIndex));
+    } else if (klh.timestampIndex >= 0) {
+      return kw.getIsTimestampNull(klh.timestampIndex)? null :
+          keyOutputWriter.writeValue(
+                kw.getTimestamp(klh.timestampIndex));
+    } else if (klh.intervalDayTimeIndex >= 0) {
+      return kw.getIsIntervalDayTimeNull(klh.intervalDayTimeIndex)? null :
+        keyOutputWriter.writeValue(
+              kw.getIntervalDayTime(klh.intervalDayTimeIndex));
+    } else {
+      throw new HiveException(String.format(
+          "Internal inconsistent KeyLookupHelper at index [%d]:%d %d %d %d %d %d",
+          i, klh.longIndex, klh.doubleIndex, klh.stringIndex, klh.decimalIndex,
+          klh.timestampIndex, klh.intervalDayTimeIndex));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
index 4e05fa3..848fc8e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
@@ -215,9 +215,6 @@ public class VectorMapJoinOperator extends VectorMapJoinBaseOperator {
       }
     }
 
-    for (VectorExpression ve : keyExpressions) {
-      ve.evaluate(inBatch);
-    }
     keyWrapperBatch.evaluateBatch(inBatch);
     keyValues = keyWrapperBatch.getVectorHashKeyWrappers();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java
index f8c4223..ac3363e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java
@@ -257,9 +257,6 @@ public class VectorSMBMapJoinOperator extends SMBMapJoinOperator implements Vect
         }
       }
 
-      for (VectorExpression ve : keyExpressions) {
-        ve.evaluate(inBatch);
-      }
       keyWrapperBatch.evaluateBatch(inBatch);
       keyValues = keyWrapperBatch.getVectorHashKeyWrappers();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index c3940cb..5b0c2bf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -1359,7 +1359,7 @@ public class VectorizationContext {
     return "arguments: " + Arrays.toString(args) + ", argument classes: " + argClasses.toString();
   }
 
-  private static final int STACK_LENGTH_LIMIT = 15;
+  private static int STACK_LENGTH_LIMIT = 15;
 
   public static String getStackTraceAsSingleLine(Throwable e) {
     StringBuilder sb = new StringBuilder();
@@ -1461,8 +1461,6 @@ public class VectorizationContext {
       ve = getBetweenFilterExpression(childExpr, mode, returnType);
     } else if (udf instanceof GenericUDFIn) {
       ve = getInExpression(childExpr, mode, returnType);
-    } else if (udf instanceof GenericUDFWhen) {
-      ve = getWhenExpression(childExpr, mode, returnType);
     } else if (udf instanceof GenericUDFOPPositive) {
       ve = getIdentityExpression(childExpr);
     } else if (udf instanceof GenericUDFCoalesce || udf instanceof GenericUDFNvl) {
@@ -2322,54 +2320,6 @@ public class VectorizationContext {
     return createVectorExpression(cl, childrenAfterNot, VectorExpressionDescriptor.Mode.PROJECTION, returnType);
   }
 
-  private boolean isColumnOrNonNullConst(ExprNodeDesc exprNodeDesc) {
-    if (exprNodeDesc instanceof ExprNodeColumnDesc) {
-      return true;
-    }
-    if (exprNodeDesc instanceof ExprNodeConstantDesc) {
-      String typeString = exprNodeDesc.getTypeString();
-      if (!typeString.equalsIgnoreCase("void")) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  private VectorExpression getWhenExpression(List<ExprNodeDesc> childExpr,
-      VectorExpressionDescriptor.Mode mode, TypeInfo returnType) throws HiveException {
-
-    if (mode != VectorExpressionDescriptor.Mode.PROJECTION) {
-      return null;
-    }
-    if (childExpr.size() != 3) {
-      // For now, we only optimize the 2 value case.
-      return null;
-    }
-
-    /*
-     * When we have 2 simple values:
-     *                          CASE WHEN boolExpr THEN column | const ELSE column | const END
-     * then we can convert to:        IF (boolExpr THEN column | const ELSE column | const)
-     */
-    // CONSIDER: Adding a version of IfExpr* than can handle a non-column/const expression in the
-    //           THEN or ELSE.
-    ExprNodeDesc exprNodeDesc1 = childExpr.get(1);
-    ExprNodeDesc exprNodeDesc2 = childExpr.get(2);
-    if (isColumnOrNonNullConst(exprNodeDesc1) &&
-        isColumnOrNonNullConst(exprNodeDesc2)) {
-      // Yes.
-      GenericUDFIf genericUDFIf = new GenericUDFIf();
-      return
-          getVectorExpressionForUdf(
-            genericUDFIf,
-            GenericUDFIf.class,
-            childExpr,
-            mode,
-            returnType);
-    }
-    return null;   // Not handled by vector classes yet.
-  }
-
   /*
    * Return vector expression for a custom (i.e. not built-in) UDF.
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToLong.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToLong.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToLong.java
deleted file mode 100644
index 5a8a825..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToLong.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector.expressions;
-
-import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.serde2.lazy.LazyByte;
-import org.apache.hadoop.hive.serde2.lazy.LazyInteger;
-import org.apache.hadoop.hive.serde2.lazy.LazyLong;
-import org.apache.hadoop.hive.serde2.lazy.LazyShort;
-import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-
-/**
- * Cast a string to a long.
- *
- * If other functions besides cast need to take a string in and produce a long,
- * you can subclass this class or convert it to a superclass, and
- * implement different "func()" methods for each operation.
- */
-public class CastStringToLong extends VectorExpression {
-  private static final long serialVersionUID = 1L;
-  int inputColumn;
-  int outputColumn;
-
-  private transient boolean integerPrimitiveCategoryKnown = false;
-  protected transient PrimitiveCategory integerPrimitiveCategory;
-
-  public CastStringToLong(int inputColumn, int outputColumn) {
-    super();
-    this.inputColumn = inputColumn;
-    this.outputColumn = outputColumn;
-  }
-
-  public CastStringToLong() {
-    super();
-  }
-
-  /**
-   * Convert input string to a long, at position i in the respective vectors.
-   */
-  protected void func(LongColumnVector outV, BytesColumnVector inV, int batchIndex) {
-
-    byte[] bytes = inV.vector[batchIndex];
-    final int start = inV.start[batchIndex];
-    final int length = inV.length[batchIndex];
-    try {
-
-      switch (integerPrimitiveCategory) {
-      case BOOLEAN:
-        {
-          boolean booleanValue;
-          int i = start;
-          if (length == 4) {
-            if ((bytes[i] == 'T' || bytes[i] == 't') &&
-                (bytes[i + 1] == 'R' || bytes[i + 1] == 'r') &&
-                (bytes[i + 2] == 'U' || bytes[i + 2] == 'u') &&
-                (bytes[i + 3] == 'E' || bytes[i + 3] == 'e')) {
-              booleanValue = true;
-            } else {
-              // No boolean value match for 4 char field.
-              outV.noNulls = false;
-              outV.isNull[batchIndex] = true;
-              return;
-            }
-          } else if (length == 5) {
-            if ((bytes[i] == 'F' || bytes[i] == 'f') &&
-                (bytes[i + 1] == 'A' || bytes[i + 1] == 'a') &&
-                (bytes[i + 2] == 'L' || bytes[i + 2] == 'l') &&
-                (bytes[i + 3] == 'S' || bytes[i + 3] == 's') &&
-                (bytes[i + 4] == 'E' || bytes[i + 4] == 'e')) {
-              booleanValue = false;
-            } else {
-              // No boolean value match for 5 char field.
-              outV.noNulls = false;
-              outV.isNull[batchIndex] = true;
-              return;
-            }
-          } else if (length == 1) {
-            byte b = bytes[start];
-            if (b == '1' || b == 't' || b == 'T') {
-              booleanValue = true;
-            } else if (b == '0' || b == 'f' || b == 'F') {
-              booleanValue = false;
-            } else {
-              // No boolean value match for extended 1 char field.
-              outV.noNulls = false;
-              outV.isNull[batchIndex] = true;
-              return;
-            }
-          } else {
-            // No boolean value match for other lengths.
-            outV.noNulls = false;
-            outV.isNull[batchIndex] = true;
-            return;
-          }
-          outV.vector[batchIndex] = (booleanValue ? 1 : 0);
-        }
-        break;
-      case BYTE:
-        if (!LazyUtils.isNumberMaybe(bytes, start, length)) {
-          outV.noNulls = false;
-          outV.isNull[batchIndex] = true;
-          return;
-        }
-        outV.vector[batchIndex] = LazyByte.parseByte(bytes, start, length, 10);
-        break;
-      case SHORT:
-        if (!LazyUtils.isNumberMaybe(bytes, start, length)) {
-          outV.noNulls = false;
-          outV.isNull[batchIndex] = true;
-          return;
-        }
-        outV.vector[batchIndex] = LazyShort.parseShort(bytes, start, length, 10);
-        break;
-      case INT:
-        if (!LazyUtils.isNumberMaybe(bytes, start, length)) {
-          outV.noNulls = false;
-          outV.isNull[batchIndex] = true;
-          return;
-        }
-        outV.vector[batchIndex] = LazyInteger.parseInt(bytes, start, length, 10);
-        break;
-      case LONG:
-        if (!LazyUtils.isNumberMaybe(bytes, start, length)) {
-          outV.noNulls = false;
-          outV.isNull[batchIndex] = true;
-          return;
-        }
-        outV.vector[batchIndex] = LazyLong.parseLong(bytes, start, length, 10);
-        break;
-      default:
-        throw new Error("Unexpected primitive category " + integerPrimitiveCategory);
-      }
-    } catch (Exception e) {
-
-      // for any exception in conversion to integer, produce NULL
-      outV.noNulls = false;
-      outV.isNull[batchIndex] = true;
-    }
-  }
-
-  @Override
-  public void evaluate(VectorizedRowBatch batch) {
-
-    if (!integerPrimitiveCategoryKnown) {
-      String typeName = getOutputType().toLowerCase();
-      TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName);
-      integerPrimitiveCategory = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
-      integerPrimitiveCategoryKnown = true;
-    }
-
-    if (childExpressions != null) {
-      super.evaluateChildren(batch);
-    }
-
-    BytesColumnVector inV = (BytesColumnVector) batch.cols[inputColumn];
-    int[] sel = batch.selected;
-    int n = batch.size;
-    LongColumnVector outV = (LongColumnVector) batch.cols[outputColumn];
-
-    if (n == 0) {
-
-      // Nothing to do
-      return;
-    }
-
-    if (inV.noNulls) {
-      outV.noNulls = true;
-      if (inV.isRepeating) {
-        outV.isRepeating = true;
-        func(outV, inV, 0);
-      } else if (batch.selectedInUse) {
-        for(int j = 0; j != n; j++) {
-          int i = sel[j];
-          func(outV, inV, i);
-        }
-        outV.isRepeating = false;
-      } else {
-        for(int i = 0; i != n; i++) {
-          func(outV, inV, i);
-        }
-        outV.isRepeating = false;
-      }
-    } else {
-
-      // Handle case with nulls. Don't do function if the value is null,
-      // because the data may be undefined for a null value.
-      outV.noNulls = false;
-      if (inV.isRepeating) {
-        outV.isRepeating = true;
-        outV.isNull[0] = inV.isNull[0];
-        if (!inV.isNull[0]) {
-          func(outV, inV, 0);
-        }
-      } else if (batch.selectedInUse) {
-        for(int j = 0; j != n; j++) {
-          int i = sel[j];
-          outV.isNull[i] = inV.isNull[i];
-          if (!inV.isNull[i]) {
-            func(outV, inV, i);
-          }
-        }
-        outV.isRepeating = false;
-      } else {
-        System.arraycopy(inV.isNull, 0, outV.isNull, 0, n);
-        for(int i = 0; i != n; i++) {
-          if (!inV.isNull[i]) {
-            func(outV, inV, i);
-          }
-        }
-        outV.isRepeating = false;
-      }
-    }
-  }
-
-  @Override
-  public int getOutputColumn() {
-    return outputColumn;
-  }
-
-  public void setOutputColumn(int outputColumn) {
-    this.outputColumn = outputColumn;
-  }
-
-  public int getInputColumn() {
-    return inputColumn;
-  }
-
-  public void setInputColumn(int inputColumn) {
-    this.inputColumn = inputColumn;
-  }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
-  public VectorExpressionDescriptor.Descriptor getDescriptor() {
-    VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
-    b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)
-        .setNumArguments(1)
-        .setArgumentTypes(
-            VectorExpressionDescriptor.ArgumentType.STRING_FAMILY)
-        .setInputExpressionTypes(
-            VectorExpressionDescriptor.InputExpressionType.COLUMN);
-    return b.build();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
index 266365e..6383e8a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
@@ -39,8 +39,8 @@ public class CuckooSetBytes {
   private int salt = 0;
   private Random gen = new Random(676983475);
   private int rehashCount = 0;
-  private static final long INT_MASK  = 0x00000000ffffffffL;
-  private static final long BYTE_MASK = 0x00000000000000ffL;
+  private static long INT_MASK  = 0x00000000ffffffffL;
+  private static long BYTE_MASK = 0x00000000000000ffL;
 
   /**
    * Allocate a new set to hold expectedSize values. Re-allocation to expand

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/OctetLength.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/OctetLength.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/OctetLength.java
deleted file mode 100644
index 3b41ed4..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/OctetLength.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector.expressions;
-
-import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-
-public class OctetLength extends VectorExpression {
-  private static final long serialVersionUID = 1L;
-  private int colNum;
-  private int outputColumn;
-
-  public OctetLength(int colNum, int outputColumn) {
-    this();
-    this.colNum = colNum;
-    this.outputColumn = outputColumn;
-  }
-
-  public OctetLength() {
-    super();
-  }
-
-  // Calculate the length of the UTF-8 strings in input vector and place results in output vector.
-  @Override
-  public void evaluate(VectorizedRowBatch batch) {
-
-    if (childExpressions != null) {
-      super.evaluateChildren(batch);
-    }
-
-    BytesColumnVector inputColVector = (BytesColumnVector) batch.cols[colNum];
-    LongColumnVector outV = (LongColumnVector) batch.cols[outputColumn];
-    int[] sel = batch.selected;
-    int n = batch.size;
-    int [] length = inputColVector.length;
-    long[] resultLen = outV.vector;
-
-    if (n == 0) {
-      //Nothing to do
-      return;
-    }
-
-    if (inputColVector.noNulls) {
-      outV.noNulls = true;
-      if (inputColVector.isRepeating) {
-        outV.isRepeating = true;
-        resultLen[0] = length[0];
-      } else if (batch.selectedInUse) {
-        for(int j = 0; j != n; j++) {
-          int i = sel[j];
-          resultLen[i] = length[i];
-        }
-        outV.isRepeating = false;
-      } else {
-        for(int i = 0; i != n; i++) {
-          resultLen[i] = length[i];
-        }
-        outV.isRepeating = false;
-      }
-    } else {
-
-      /*
-       * Handle case with nulls. Don't do function if the value is null, to save time,
-       * because calling the function can be expensive.
-       */
-      outV.noNulls = false;
-      if (inputColVector.isRepeating) {
-        outV.isRepeating = true;
-        outV.isNull[0] = inputColVector.isNull[0];
-        if (!inputColVector.isNull[0]) {
-          resultLen[0] = length[0];
-        }
-      } else if (batch.selectedInUse) {
-        for(int j = 0; j != n; j++) {
-          int i = sel[j];
-          if (!inputColVector.isNull[i]) {
-            resultLen[i] = length[i];
-          }
-          outV.isNull[i] = inputColVector.isNull[i];
-        }
-        outV.isRepeating = false;
-      } else {
-        for(int i = 0; i != n; i++) {
-          if (!inputColVector.isNull[i]) {
-            resultLen[i] = length[i];
-          }
-          outV.isNull[i] = inputColVector.isNull[i];
-        }
-        outV.isRepeating = false;
-      }
-    }
-  }
-
-  @Override
-  public int getOutputColumn() {
-    return outputColumn;
-  }
-
-  @Override
-  public String getOutputType() {
-    return "Long";
-  }
-
-  public int getColNum() {
-    return colNum;
-  }
-
-  public void setColNum(int colNum) {
-    this.colNum = colNum;
-  }
-
-  public void setOutputColumn(int outputColumn) {
-    this.outputColumn = outputColumn;
-  }
-
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
-  public VectorExpressionDescriptor.Descriptor getDescriptor() {
-    VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
-    b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)
-        .setNumArguments(1)
-        .setArgumentTypes(
-            VectorExpressionDescriptor.ArgumentType.STRING_FAMILY)
-        .setInputExpressionTypes(
-            VectorExpressionDescriptor.InputExpressionType.COLUMN);
-    return b.build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
index 7ab4473..0866f63 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
@@ -52,7 +52,7 @@ public abstract class VectorAggregateExpression  implements Serializable {
   public abstract Object evaluateOutput(AggregationBuffer agg) throws HiveException;
 
   public abstract ObjectInspector getOutputObjectInspector();
-  public abstract long getAggregationBufferFixedSize();
+  public abstract int getAggregationBufferFixedSize();
   public boolean hasVariableSize() {
     return false;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java
index 4aac9d3..74e25ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java
@@ -492,7 +492,7 @@ public class VectorUDAFAvgDecimal extends VectorAggregateExpression {
   }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
     JavaDataModel model = JavaDataModel.get();
     return JavaDataModel.alignUp(
       model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgTimestamp.java
index 365dcf6..483d9dc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgTimestamp.java
@@ -464,7 +464,7 @@ public class VectorUDAFAvgTimestamp extends VectorAggregateExpression {
   }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
     JavaDataModel model = JavaDataModel.get();
     return JavaDataModel.alignUp(
       model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
index 52b05ca..2139eae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
@@ -383,7 +383,7 @@ public class VectorUDAFBloomFilter extends VectorAggregateExpression {
   }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
     if (bitSetSize < 0) {
       // Not pretty, but we need a way to get the size
       try {
@@ -396,7 +396,7 @@ public class VectorUDAFBloomFilter extends VectorAggregateExpression {
 
     // BloomFilter: object(BitSet: object(data: long[]), numBits: int, numHashFunctions: int)
     JavaDataModel model = JavaDataModel.get();
-    long bloomFilterSize = JavaDataModel.alignUp(model.object() + model.lengthForLongArrayOfSize(bitSetSize),
+    int bloomFilterSize = JavaDataModel.alignUp(model.object() + model.lengthForLongArrayOfSize(bitSetSize),
         model.memoryAlign());
     return JavaDataModel.alignUp(
         model.object() + bloomFilterSize + model.primitive1() + model.primitive1(),

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilterMerge.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilterMerge.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilterMerge.java
index b986eb4..d2446d5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilterMerge.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilterMerge.java
@@ -339,7 +339,7 @@ public class VectorUDAFBloomFilterMerge extends VectorAggregateExpression {
   }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
     if (aggBufferSize < 0) {
       // Not pretty, but we need a way to get the size
       try {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCount.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCount.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCount.java
index cadb6dd..494febc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCount.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCount.java
@@ -259,7 +259,7 @@ public class VectorUDAFCount extends VectorAggregateExpression {
     }
 
     @Override
-    public long getAggregationBufferFixedSize() {
+    public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
index c489f8f..dec88cb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
@@ -385,7 +385,7 @@ public class VectorUDAFCountMerge extends VectorAggregateExpression {
     }
 
     @Override
-    public long getAggregationBufferFixedSize() {
+    public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountStar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountStar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountStar.java
index 3b66030..337ba0a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountStar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountStar.java
@@ -142,7 +142,7 @@ public class VectorUDAFCountStar extends VectorAggregateExpression {
     }
 
     @Override
-    public long getAggregationBufferFixedSize() {
+    public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdPopTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdPopTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdPopTimestamp.java
index 5388d37..8cd3506 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdPopTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdPopTimestamp.java
@@ -508,7 +508,7 @@ public class VectorUDAFStdPopTimestamp extends VectorAggregateExpression {
     }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdSampTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdSampTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdSampTimestamp.java
index 1769dc0..61d6977 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdSampTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdSampTimestamp.java
@@ -508,7 +508,7 @@ public class VectorUDAFStdSampTimestamp extends VectorAggregateExpression {
     }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
index a37e3f6..b10f66f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
@@ -431,7 +431,7 @@ public class VectorUDAFSumDecimal extends VectorAggregateExpression {
     }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object(),

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarPopTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarPopTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarPopTimestamp.java
index 61cdeaa..2709b07 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarPopTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarPopTimestamp.java
@@ -508,7 +508,7 @@ public class VectorUDAFVarPopTimestamp extends VectorAggregateExpression {
     }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarSampTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarSampTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarSampTimestamp.java
index c375461..03dce1e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarSampTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarSampTimestamp.java
@@ -508,7 +508,7 @@ public class VectorUDAFVarSampTimestamp extends VectorAggregateExpression {
     }
 
   @Override
-  public long getAggregationBufferFixedSize() {
+  public int getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
         model.object() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
index c4d5113..cb30413 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
@@ -531,8 +531,6 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
   protected void reloadHashTable(byte pos, int partitionId)
           throws IOException, HiveException, SerDeException, ClassNotFoundException {
 
-    this.vectorMapJoinHashTable = null;
-
     // The super method will reload a hash table partition of one of the small tables.
     // Currently, for native vector map join it will only be one small table.
     super.reloadHashTable(pos, partitionId);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMap.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMap.java
index b5eab8b..6242daf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMap.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMap.java
@@ -107,9 +107,4 @@ public abstract class VectorMapJoinFastBytesHashMap
     // Share the same write buffers with our value store.
     keyStore = new VectorMapJoinFastKeyStore(valueStore.writeBuffers());
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize() + valueStore.getEstimatedMemorySize() + keyStore.getEstimatedMemorySize();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSet.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSet.java
index e779762..1a41961 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSet.java
@@ -97,9 +97,4 @@ public abstract class VectorMapJoinFastBytesHashMultiSet
 
     keyStore = new VectorMapJoinFastKeyStore(writeBuffersSize);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize() + keyStore.getEstimatedMemorySize();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSet.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSet.java
index d493319..331867c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSet.java
@@ -84,9 +84,4 @@ public abstract class VectorMapJoinFastBytesHashSet
 
     keyStore = new VectorMapJoinFastKeyStore(writeBuffersSize);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize() + keyStore.getEstimatedMemorySize();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
index 10bc902..b93e977 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashTable;
@@ -219,9 +218,4 @@ public abstract class VectorMapJoinFastBytesHashTable
     super(initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount);
     allocateBucketArray();
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize() + JavaDataModel.get().lengthForLongArrayOfSize(slotTriples.length);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java
index 1f182ee..9030e5f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast;
 
-import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTable;
@@ -41,10 +40,10 @@ public abstract class VectorMapJoinFastHashTable implements VectorMapJoinHashTab
   protected int metricExpands;
 
   // 2^30 (we cannot use Integer.MAX_VALUE which is 2^31-1).
-  public static final int HIGHEST_INT_POWER_OF_2 = 1073741824;
+  public static int HIGHEST_INT_POWER_OF_2 = 1073741824;
 
-  public static final int ONE_QUARTER_LIMIT = HIGHEST_INT_POWER_OF_2 / 4;
-  public static final int ONE_SIXTH_LIMIT = HIGHEST_INT_POWER_OF_2 / 6;
+  public static int ONE_QUARTER_LIMIT = HIGHEST_INT_POWER_OF_2 / 4;
+  public static int ONE_SIXTH_LIMIT = HIGHEST_INT_POWER_OF_2 / 6;
 
   public void throwExpandError(int limit, String dataTypeName) {
     throw new RuntimeException(
@@ -89,10 +88,4 @@ public abstract class VectorMapJoinFastHashTable implements VectorMapJoinHashTab
   public int size() {
     return keysAssigned;
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    JavaDataModel jdm = JavaDataModel.get();
-    return JavaDataModel.alignUp(10L * jdm.primitive1() + jdm.primitive2(), jdm.memoryAlign());
-  }
 }


[21/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index cfa2e49..96c2b0b 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -12027,7 +12027,7 @@ void GetOpenTxnsResponse::__set_txn_high_water_mark(const int64_t val) {
   this->txn_high_water_mark = val;
 }
 
-void GetOpenTxnsResponse::__set_open_txns(const std::vector<int64_t> & val) {
+void GetOpenTxnsResponse::__set_open_txns(const std::set<int64_t> & val) {
   this->open_txns = val;
 }
 
@@ -12036,10 +12036,6 @@ void GetOpenTxnsResponse::__set_min_open_txn(const int64_t val) {
 __isset.min_open_txn = true;
 }
 
-void GetOpenTxnsResponse::__set_abortedBits(const std::string& val) {
-  this->abortedBits = val;
-}
-
 uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -12054,7 +12050,6 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot)
 
   bool isset_txn_high_water_mark = false;
   bool isset_open_txns = false;
-  bool isset_abortedBits = false;
 
   while (true)
   {
@@ -12073,19 +12068,20 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot)
         }
         break;
       case 2:
-        if (ftype == ::apache::thrift::protocol::T_LIST) {
+        if (ftype == ::apache::thrift::protocol::T_SET) {
           {
             this->open_txns.clear();
             uint32_t _size517;
             ::apache::thrift::protocol::TType _etype520;
-            xfer += iprot->readListBegin(_etype520, _size517);
-            this->open_txns.resize(_size517);
+            xfer += iprot->readSetBegin(_etype520, _size517);
             uint32_t _i521;
             for (_i521 = 0; _i521 < _size517; ++_i521)
             {
-              xfer += iprot->readI64(this->open_txns[_i521]);
+              int64_t _elem522;
+              xfer += iprot->readI64(_elem522);
+              this->open_txns.insert(_elem522);
             }
-            xfer += iprot->readListEnd();
+            xfer += iprot->readSetEnd();
           }
           isset_open_txns = true;
         } else {
@@ -12100,14 +12096,6 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot)
           xfer += iprot->skip(ftype);
         }
         break;
-      case 4:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readBinary(this->abortedBits);
-          isset_abortedBits = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -12121,8 +12109,6 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot)
     throw TProtocolException(TProtocolException::INVALID_DATA);
   if (!isset_open_txns)
     throw TProtocolException(TProtocolException::INVALID_DATA);
-  if (!isset_abortedBits)
-    throw TProtocolException(TProtocolException::INVALID_DATA);
   return xfer;
 }
 
@@ -12135,15 +12121,15 @@ uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot
   xfer += oprot->writeI64(this->txn_high_water_mark);
   xfer += oprot->writeFieldEnd();
 
-  xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2);
+  xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_SET, 2);
   {
-    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->open_txns.size()));
-    std::vector<int64_t> ::const_iterator _iter522;
-    for (_iter522 = this->open_txns.begin(); _iter522 != this->open_txns.end(); ++_iter522)
+    xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->open_txns.size()));
+    std::set<int64_t> ::const_iterator _iter523;
+    for (_iter523 = this->open_txns.begin(); _iter523 != this->open_txns.end(); ++_iter523)
     {
-      xfer += oprot->writeI64((*_iter522));
+      xfer += oprot->writeI64((*_iter523));
     }
-    xfer += oprot->writeListEnd();
+    xfer += oprot->writeSetEnd();
   }
   xfer += oprot->writeFieldEnd();
 
@@ -12152,10 +12138,6 @@ uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot
     xfer += oprot->writeI64(this->min_open_txn);
     xfer += oprot->writeFieldEnd();
   }
-  xfer += oprot->writeFieldBegin("abortedBits", ::apache::thrift::protocol::T_STRING, 4);
-  xfer += oprot->writeBinary(this->abortedBits);
-  xfer += oprot->writeFieldEnd();
-
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -12166,23 +12148,20 @@ void swap(GetOpenTxnsResponse &a, GetOpenTxnsResponse &b) {
   swap(a.txn_high_water_mark, b.txn_high_water_mark);
   swap(a.open_txns, b.open_txns);
   swap(a.min_open_txn, b.min_open_txn);
-  swap(a.abortedBits, b.abortedBits);
   swap(a.__isset, b.__isset);
 }
 
-GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other523) {
-  txn_high_water_mark = other523.txn_high_water_mark;
-  open_txns = other523.open_txns;
-  min_open_txn = other523.min_open_txn;
-  abortedBits = other523.abortedBits;
-  __isset = other523.__isset;
-}
-GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other524) {
+GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other524) {
   txn_high_water_mark = other524.txn_high_water_mark;
   open_txns = other524.open_txns;
   min_open_txn = other524.min_open_txn;
-  abortedBits = other524.abortedBits;
   __isset = other524.__isset;
+}
+GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other525) {
+  txn_high_water_mark = other525.txn_high_water_mark;
+  open_txns = other525.open_txns;
+  min_open_txn = other525.min_open_txn;
+  __isset = other525.__isset;
   return *this;
 }
 void GetOpenTxnsResponse::printTo(std::ostream& out) const {
@@ -12191,7 +12170,6 @@ void GetOpenTxnsResponse::printTo(std::ostream& out) const {
   out << "txn_high_water_mark=" << to_string(txn_high_water_mark);
   out << ", " << "open_txns=" << to_string(open_txns);
   out << ", " << "min_open_txn="; (__isset.min_open_txn ? (out << to_string(min_open_txn)) : (out << "<null>"));
-  out << ", " << "abortedBits=" << to_string(abortedBits);
   out << ")";
 }
 
@@ -12327,19 +12305,19 @@ void swap(OpenTxnRequest &a, OpenTxnRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other525) {
-  num_txns = other525.num_txns;
-  user = other525.user;
-  hostname = other525.hostname;
-  agentInfo = other525.agentInfo;
-  __isset = other525.__isset;
-}
-OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other526) {
+OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other526) {
   num_txns = other526.num_txns;
   user = other526.user;
   hostname = other526.hostname;
   agentInfo = other526.agentInfo;
   __isset = other526.__isset;
+}
+OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other527) {
+  num_txns = other527.num_txns;
+  user = other527.user;
+  hostname = other527.hostname;
+  agentInfo = other527.agentInfo;
+  __isset = other527.__isset;
   return *this;
 }
 void OpenTxnRequest::printTo(std::ostream& out) const {
@@ -12387,14 +12365,14 @@ uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->txn_ids.clear();
-            uint32_t _size527;
-            ::apache::thrift::protocol::TType _etype530;
-            xfer += iprot->readListBegin(_etype530, _size527);
-            this->txn_ids.resize(_size527);
-            uint32_t _i531;
-            for (_i531 = 0; _i531 < _size527; ++_i531)
+            uint32_t _size528;
+            ::apache::thrift::protocol::TType _etype531;
+            xfer += iprot->readListBegin(_etype531, _size528);
+            this->txn_ids.resize(_size528);
+            uint32_t _i532;
+            for (_i532 = 0; _i532 < _size528; ++_i532)
             {
-              xfer += iprot->readI64(this->txn_ids[_i531]);
+              xfer += iprot->readI64(this->txn_ids[_i532]);
             }
             xfer += iprot->readListEnd();
           }
@@ -12425,10 +12403,10 @@ uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) c
   xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->txn_ids.size()));
-    std::vector<int64_t> ::const_iterator _iter532;
-    for (_iter532 = this->txn_ids.begin(); _iter532 != this->txn_ids.end(); ++_iter532)
+    std::vector<int64_t> ::const_iterator _iter533;
+    for (_iter533 = this->txn_ids.begin(); _iter533 != this->txn_ids.end(); ++_iter533)
     {
-      xfer += oprot->writeI64((*_iter532));
+      xfer += oprot->writeI64((*_iter533));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12444,11 +12422,11 @@ void swap(OpenTxnsResponse &a, OpenTxnsResponse &b) {
   swap(a.txn_ids, b.txn_ids);
 }
 
-OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other533) {
-  txn_ids = other533.txn_ids;
-}
-OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other534) {
+OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other534) {
   txn_ids = other534.txn_ids;
+}
+OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other535) {
+  txn_ids = other535.txn_ids;
   return *this;
 }
 void OpenTxnsResponse::printTo(std::ostream& out) const {
@@ -12530,11 +12508,11 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b) {
   swap(a.txnid, b.txnid);
 }
 
-AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other535) {
-  txnid = other535.txnid;
-}
-AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other536) {
+AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other536) {
   txnid = other536.txnid;
+}
+AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other537) {
+  txnid = other537.txnid;
   return *this;
 }
 void AbortTxnRequest::printTo(std::ostream& out) const {
@@ -12579,14 +12557,14 @@ uint32_t AbortTxnsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->txn_ids.clear();
-            uint32_t _size537;
-            ::apache::thrift::protocol::TType _etype540;
-            xfer += iprot->readListBegin(_etype540, _size537);
-            this->txn_ids.resize(_size537);
-            uint32_t _i541;
-            for (_i541 = 0; _i541 < _size537; ++_i541)
+            uint32_t _size538;
+            ::apache::thrift::protocol::TType _etype541;
+            xfer += iprot->readListBegin(_etype541, _size538);
+            this->txn_ids.resize(_size538);
+            uint32_t _i542;
+            for (_i542 = 0; _i542 < _size538; ++_i542)
             {
-              xfer += iprot->readI64(this->txn_ids[_i541]);
+              xfer += iprot->readI64(this->txn_ids[_i542]);
             }
             xfer += iprot->readListEnd();
           }
@@ -12617,10 +12595,10 @@ uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) c
   xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->txn_ids.size()));
-    std::vector<int64_t> ::const_iterator _iter542;
-    for (_iter542 = this->txn_ids.begin(); _iter542 != this->txn_ids.end(); ++_iter542)
+    std::vector<int64_t> ::const_iterator _iter543;
+    for (_iter543 = this->txn_ids.begin(); _iter543 != this->txn_ids.end(); ++_iter543)
     {
-      xfer += oprot->writeI64((*_iter542));
+      xfer += oprot->writeI64((*_iter543));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12636,11 +12614,11 @@ void swap(AbortTxnsRequest &a, AbortTxnsRequest &b) {
   swap(a.txn_ids, b.txn_ids);
 }
 
-AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other543) {
-  txn_ids = other543.txn_ids;
-}
-AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other544) {
+AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other544) {
   txn_ids = other544.txn_ids;
+}
+AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other545) {
+  txn_ids = other545.txn_ids;
   return *this;
 }
 void AbortTxnsRequest::printTo(std::ostream& out) const {
@@ -12722,11 +12700,11 @@ void swap(CommitTxnRequest &a, CommitTxnRequest &b) {
   swap(a.txnid, b.txnid);
 }
 
-CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other545) {
-  txnid = other545.txnid;
-}
-CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other546) {
+CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other546) {
   txnid = other546.txnid;
+}
+CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other547) {
+  txnid = other547.txnid;
   return *this;
 }
 void CommitTxnRequest::printTo(std::ostream& out) const {
@@ -12804,9 +12782,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) {
     {
       case 1:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast547;
-          xfer += iprot->readI32(ecast547);
-          this->type = (LockType::type)ecast547;
+          int32_t ecast548;
+          xfer += iprot->readI32(ecast548);
+          this->type = (LockType::type)ecast548;
           isset_type = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -12814,9 +12792,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) {
         break;
       case 2:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast548;
-          xfer += iprot->readI32(ecast548);
-          this->level = (LockLevel::type)ecast548;
+          int32_t ecast549;
+          xfer += iprot->readI32(ecast549);
+          this->level = (LockLevel::type)ecast549;
           isset_level = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -12848,9 +12826,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) {
         break;
       case 6:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast549;
-          xfer += iprot->readI32(ecast549);
-          this->operationType = (DataOperationType::type)ecast549;
+          int32_t ecast550;
+          xfer += iprot->readI32(ecast550);
+          this->operationType = (DataOperationType::type)ecast550;
           this->__isset.operationType = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -12950,18 +12928,7 @@ void swap(LockComponent &a, LockComponent &b) {
   swap(a.__isset, b.__isset);
 }
 
-LockComponent::LockComponent(const LockComponent& other550) {
-  type = other550.type;
-  level = other550.level;
-  dbname = other550.dbname;
-  tablename = other550.tablename;
-  partitionname = other550.partitionname;
-  operationType = other550.operationType;
-  isAcid = other550.isAcid;
-  isDynamicPartitionWrite = other550.isDynamicPartitionWrite;
-  __isset = other550.__isset;
-}
-LockComponent& LockComponent::operator=(const LockComponent& other551) {
+LockComponent::LockComponent(const LockComponent& other551) {
   type = other551.type;
   level = other551.level;
   dbname = other551.dbname;
@@ -12971,6 +12938,17 @@ LockComponent& LockComponent::operator=(const LockComponent& other551) {
   isAcid = other551.isAcid;
   isDynamicPartitionWrite = other551.isDynamicPartitionWrite;
   __isset = other551.__isset;
+}
+LockComponent& LockComponent::operator=(const LockComponent& other552) {
+  type = other552.type;
+  level = other552.level;
+  dbname = other552.dbname;
+  tablename = other552.tablename;
+  partitionname = other552.partitionname;
+  operationType = other552.operationType;
+  isAcid = other552.isAcid;
+  isDynamicPartitionWrite = other552.isDynamicPartitionWrite;
+  __isset = other552.__isset;
   return *this;
 }
 void LockComponent::printTo(std::ostream& out) const {
@@ -13042,14 +13020,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->component.clear();
-            uint32_t _size552;
-            ::apache::thrift::protocol::TType _etype555;
-            xfer += iprot->readListBegin(_etype555, _size552);
-            this->component.resize(_size552);
-            uint32_t _i556;
-            for (_i556 = 0; _i556 < _size552; ++_i556)
+            uint32_t _size553;
+            ::apache::thrift::protocol::TType _etype556;
+            xfer += iprot->readListBegin(_etype556, _size553);
+            this->component.resize(_size553);
+            uint32_t _i557;
+            for (_i557 = 0; _i557 < _size553; ++_i557)
             {
-              xfer += this->component[_i556].read(iprot);
+              xfer += this->component[_i557].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13116,10 +13094,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const
   xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->component.size()));
-    std::vector<LockComponent> ::const_iterator _iter557;
-    for (_iter557 = this->component.begin(); _iter557 != this->component.end(); ++_iter557)
+    std::vector<LockComponent> ::const_iterator _iter558;
+    for (_iter558 = this->component.begin(); _iter558 != this->component.end(); ++_iter558)
     {
-      xfer += (*_iter557).write(oprot);
+      xfer += (*_iter558).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -13158,21 +13136,21 @@ void swap(LockRequest &a, LockRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-LockRequest::LockRequest(const LockRequest& other558) {
-  component = other558.component;
-  txnid = other558.txnid;
-  user = other558.user;
-  hostname = other558.hostname;
-  agentInfo = other558.agentInfo;
-  __isset = other558.__isset;
-}
-LockRequest& LockRequest::operator=(const LockRequest& other559) {
+LockRequest::LockRequest(const LockRequest& other559) {
   component = other559.component;
   txnid = other559.txnid;
   user = other559.user;
   hostname = other559.hostname;
   agentInfo = other559.agentInfo;
   __isset = other559.__isset;
+}
+LockRequest& LockRequest::operator=(const LockRequest& other560) {
+  component = other560.component;
+  txnid = other560.txnid;
+  user = other560.user;
+  hostname = other560.hostname;
+  agentInfo = other560.agentInfo;
+  __isset = other560.__isset;
   return *this;
 }
 void LockRequest::printTo(std::ostream& out) const {
@@ -13232,9 +13210,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
         break;
       case 2:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast560;
-          xfer += iprot->readI32(ecast560);
-          this->state = (LockState::type)ecast560;
+          int32_t ecast561;
+          xfer += iprot->readI32(ecast561);
+          this->state = (LockState::type)ecast561;
           isset_state = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -13280,13 +13258,13 @@ void swap(LockResponse &a, LockResponse &b) {
   swap(a.state, b.state);
 }
 
-LockResponse::LockResponse(const LockResponse& other561) {
-  lockid = other561.lockid;
-  state = other561.state;
-}
-LockResponse& LockResponse::operator=(const LockResponse& other562) {
+LockResponse::LockResponse(const LockResponse& other562) {
   lockid = other562.lockid;
   state = other562.state;
+}
+LockResponse& LockResponse::operator=(const LockResponse& other563) {
+  lockid = other563.lockid;
+  state = other563.state;
   return *this;
 }
 void LockResponse::printTo(std::ostream& out) const {
@@ -13408,17 +13386,17 @@ void swap(CheckLockRequest &a, CheckLockRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-CheckLockRequest::CheckLockRequest(const CheckLockRequest& other563) {
-  lockid = other563.lockid;
-  txnid = other563.txnid;
-  elapsed_ms = other563.elapsed_ms;
-  __isset = other563.__isset;
-}
-CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other564) {
+CheckLockRequest::CheckLockRequest(const CheckLockRequest& other564) {
   lockid = other564.lockid;
   txnid = other564.txnid;
   elapsed_ms = other564.elapsed_ms;
   __isset = other564.__isset;
+}
+CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other565) {
+  lockid = other565.lockid;
+  txnid = other565.txnid;
+  elapsed_ms = other565.elapsed_ms;
+  __isset = other565.__isset;
   return *this;
 }
 void CheckLockRequest::printTo(std::ostream& out) const {
@@ -13502,11 +13480,11 @@ void swap(UnlockRequest &a, UnlockRequest &b) {
   swap(a.lockid, b.lockid);
 }
 
-UnlockRequest::UnlockRequest(const UnlockRequest& other565) {
-  lockid = other565.lockid;
-}
-UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other566) {
+UnlockRequest::UnlockRequest(const UnlockRequest& other566) {
   lockid = other566.lockid;
+}
+UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other567) {
+  lockid = other567.lockid;
   return *this;
 }
 void UnlockRequest::printTo(std::ostream& out) const {
@@ -13645,19 +13623,19 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other567) {
-  dbname = other567.dbname;
-  tablename = other567.tablename;
-  partname = other567.partname;
-  isExtended = other567.isExtended;
-  __isset = other567.__isset;
-}
-ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other568) {
+ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other568) {
   dbname = other568.dbname;
   tablename = other568.tablename;
   partname = other568.partname;
   isExtended = other568.isExtended;
   __isset = other568.__isset;
+}
+ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other569) {
+  dbname = other569.dbname;
+  tablename = other569.tablename;
+  partname = other569.partname;
+  isExtended = other569.isExtended;
+  __isset = other569.__isset;
   return *this;
 }
 void ShowLocksRequest::printTo(std::ostream& out) const {
@@ -13810,9 +13788,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i
         break;
       case 5:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast569;
-          xfer += iprot->readI32(ecast569);
-          this->state = (LockState::type)ecast569;
+          int32_t ecast570;
+          xfer += iprot->readI32(ecast570);
+          this->state = (LockState::type)ecast570;
           isset_state = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -13820,9 +13798,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i
         break;
       case 6:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast570;
-          xfer += iprot->readI32(ecast570);
-          this->type = (LockType::type)ecast570;
+          int32_t ecast571;
+          xfer += iprot->readI32(ecast571);
+          this->type = (LockType::type)ecast571;
           isset_type = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -14038,26 +14016,7 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) {
   swap(a.__isset, b.__isset);
 }
 
-ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other571) {
-  lockid = other571.lockid;
-  dbname = other571.dbname;
-  tablename = other571.tablename;
-  partname = other571.partname;
-  state = other571.state;
-  type = other571.type;
-  txnid = other571.txnid;
-  lastheartbeat = other571.lastheartbeat;
-  acquiredat = other571.acquiredat;
-  user = other571.user;
-  hostname = other571.hostname;
-  heartbeatCount = other571.heartbeatCount;
-  agentInfo = other571.agentInfo;
-  blockedByExtId = other571.blockedByExtId;
-  blockedByIntId = other571.blockedByIntId;
-  lockIdInternal = other571.lockIdInternal;
-  __isset = other571.__isset;
-}
-ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other572) {
+ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other572) {
   lockid = other572.lockid;
   dbname = other572.dbname;
   tablename = other572.tablename;
@@ -14075,6 +14034,25 @@ ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksRes
   blockedByIntId = other572.blockedByIntId;
   lockIdInternal = other572.lockIdInternal;
   __isset = other572.__isset;
+}
+ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other573) {
+  lockid = other573.lockid;
+  dbname = other573.dbname;
+  tablename = other573.tablename;
+  partname = other573.partname;
+  state = other573.state;
+  type = other573.type;
+  txnid = other573.txnid;
+  lastheartbeat = other573.lastheartbeat;
+  acquiredat = other573.acquiredat;
+  user = other573.user;
+  hostname = other573.hostname;
+  heartbeatCount = other573.heartbeatCount;
+  agentInfo = other573.agentInfo;
+  blockedByExtId = other573.blockedByExtId;
+  blockedByIntId = other573.blockedByIntId;
+  lockIdInternal = other573.lockIdInternal;
+  __isset = other573.__isset;
   return *this;
 }
 void ShowLocksResponseElement::printTo(std::ostream& out) const {
@@ -14133,14 +14111,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->locks.clear();
-            uint32_t _size573;
-            ::apache::thrift::protocol::TType _etype576;
-            xfer += iprot->readListBegin(_etype576, _size573);
-            this->locks.resize(_size573);
-            uint32_t _i577;
-            for (_i577 = 0; _i577 < _size573; ++_i577)
+            uint32_t _size574;
+            ::apache::thrift::protocol::TType _etype577;
+            xfer += iprot->readListBegin(_etype577, _size574);
+            this->locks.resize(_size574);
+            uint32_t _i578;
+            for (_i578 = 0; _i578 < _size574; ++_i578)
             {
-              xfer += this->locks[_i577].read(iprot);
+              xfer += this->locks[_i578].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -14169,10 +14147,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot)
   xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->locks.size()));
-    std::vector<ShowLocksResponseElement> ::const_iterator _iter578;
-    for (_iter578 = this->locks.begin(); _iter578 != this->locks.end(); ++_iter578)
+    std::vector<ShowLocksResponseElement> ::const_iterator _iter579;
+    for (_iter579 = this->locks.begin(); _iter579 != this->locks.end(); ++_iter579)
     {
-      xfer += (*_iter578).write(oprot);
+      xfer += (*_iter579).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -14189,13 +14167,13 @@ void swap(ShowLocksResponse &a, ShowLocksResponse &b) {
   swap(a.__isset, b.__isset);
 }
 
-ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other579) {
-  locks = other579.locks;
-  __isset = other579.__isset;
-}
-ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other580) {
+ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other580) {
   locks = other580.locks;
   __isset = other580.__isset;
+}
+ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other581) {
+  locks = other581.locks;
+  __isset = other581.__isset;
   return *this;
 }
 void ShowLocksResponse::printTo(std::ostream& out) const {
@@ -14296,15 +14274,15 @@ void swap(HeartbeatRequest &a, HeartbeatRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other581) {
-  lockid = other581.lockid;
-  txnid = other581.txnid;
-  __isset = other581.__isset;
-}
-HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other582) {
+HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other582) {
   lockid = other582.lockid;
   txnid = other582.txnid;
   __isset = other582.__isset;
+}
+HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other583) {
+  lockid = other583.lockid;
+  txnid = other583.txnid;
+  __isset = other583.__isset;
   return *this;
 }
 void HeartbeatRequest::printTo(std::ostream& out) const {
@@ -14407,13 +14385,13 @@ void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) {
   swap(a.max, b.max);
 }
 
-HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other583) {
-  min = other583.min;
-  max = other583.max;
-}
-HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other584) {
+HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other584) {
   min = other584.min;
   max = other584.max;
+}
+HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other585) {
+  min = other585.min;
+  max = other585.max;
   return *this;
 }
 void HeartbeatTxnRangeRequest::printTo(std::ostream& out) const {
@@ -14464,15 +14442,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol*
         if (ftype == ::apache::thrift::protocol::T_SET) {
           {
             this->aborted.clear();
-            uint32_t _size585;
-            ::apache::thrift::protocol::TType _etype588;
-            xfer += iprot->readSetBegin(_etype588, _size585);
-            uint32_t _i589;
-            for (_i589 = 0; _i589 < _size585; ++_i589)
+            uint32_t _size586;
+            ::apache::thrift::protocol::TType _etype589;
+            xfer += iprot->readSetBegin(_etype589, _size586);
+            uint32_t _i590;
+            for (_i590 = 0; _i590 < _size586; ++_i590)
             {
-              int64_t _elem590;
-              xfer += iprot->readI64(_elem590);
-              this->aborted.insert(_elem590);
+              int64_t _elem591;
+              xfer += iprot->readI64(_elem591);
+              this->aborted.insert(_elem591);
             }
             xfer += iprot->readSetEnd();
           }
@@ -14485,15 +14463,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol*
         if (ftype == ::apache::thrift::protocol::T_SET) {
           {
             this->nosuch.clear();
-            uint32_t _size591;
-            ::apache::thrift::protocol::TType _etype594;
-            xfer += iprot->readSetBegin(_etype594, _size591);
-            uint32_t _i595;
-            for (_i595 = 0; _i595 < _size591; ++_i595)
+            uint32_t _size592;
+            ::apache::thrift::protocol::TType _etype595;
+            xfer += iprot->readSetBegin(_etype595, _size592);
+            uint32_t _i596;
+            for (_i596 = 0; _i596 < _size592; ++_i596)
             {
-              int64_t _elem596;
-              xfer += iprot->readI64(_elem596);
-              this->nosuch.insert(_elem596);
+              int64_t _elem597;
+              xfer += iprot->readI64(_elem597);
+              this->nosuch.insert(_elem597);
             }
             xfer += iprot->readSetEnd();
           }
@@ -14526,10 +14504,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol*
   xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1);
   {
     xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->aborted.size()));
-    std::set<int64_t> ::const_iterator _iter597;
-    for (_iter597 = this->aborted.begin(); _iter597 != this->aborted.end(); ++_iter597)
+    std::set<int64_t> ::const_iterator _iter598;
+    for (_iter598 = this->aborted.begin(); _iter598 != this->aborted.end(); ++_iter598)
     {
-      xfer += oprot->writeI64((*_iter597));
+      xfer += oprot->writeI64((*_iter598));
     }
     xfer += oprot->writeSetEnd();
   }
@@ -14538,10 +14516,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol*
   xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2);
   {
     xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->nosuch.size()));
-    std::set<int64_t> ::const_iterator _iter598;
-    for (_iter598 = this->nosuch.begin(); _iter598 != this->nosuch.end(); ++_iter598)
+    std::set<int64_t> ::const_iterator _iter599;
+    for (_iter599 = this->nosuch.begin(); _iter599 != this->nosuch.end(); ++_iter599)
     {
-      xfer += oprot->writeI64((*_iter598));
+      xfer += oprot->writeI64((*_iter599));
     }
     xfer += oprot->writeSetEnd();
   }
@@ -14558,13 +14536,13 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) {
   swap(a.nosuch, b.nosuch);
 }
 
-HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other599) {
-  aborted = other599.aborted;
-  nosuch = other599.nosuch;
-}
-HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other600) {
+HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other600) {
   aborted = other600.aborted;
   nosuch = other600.nosuch;
+}
+HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other601) {
+  aborted = other601.aborted;
+  nosuch = other601.nosuch;
   return *this;
 }
 void HeartbeatTxnRangeResponse::printTo(std::ostream& out) const {
@@ -14657,9 +14635,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
         break;
       case 4:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast601;
-          xfer += iprot->readI32(ecast601);
-          this->type = (CompactionType::type)ecast601;
+          int32_t ecast602;
+          xfer += iprot->readI32(ecast602);
+          this->type = (CompactionType::type)ecast602;
           isset_type = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -14677,17 +14655,17 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->properties.clear();
-            uint32_t _size602;
-            ::apache::thrift::protocol::TType _ktype603;
-            ::apache::thrift::protocol::TType _vtype604;
-            xfer += iprot->readMapBegin(_ktype603, _vtype604, _size602);
-            uint32_t _i606;
-            for (_i606 = 0; _i606 < _size602; ++_i606)
+            uint32_t _size603;
+            ::apache::thrift::protocol::TType _ktype604;
+            ::apache::thrift::protocol::TType _vtype605;
+            xfer += iprot->readMapBegin(_ktype604, _vtype605, _size603);
+            uint32_t _i607;
+            for (_i607 = 0; _i607 < _size603; ++_i607)
             {
-              std::string _key607;
-              xfer += iprot->readString(_key607);
-              std::string& _val608 = this->properties[_key607];
-              xfer += iprot->readString(_val608);
+              std::string _key608;
+              xfer += iprot->readString(_key608);
+              std::string& _val609 = this->properties[_key608];
+              xfer += iprot->readString(_val609);
             }
             xfer += iprot->readMapEnd();
           }
@@ -14745,11 +14723,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot)
     xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 6);
     {
       xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
-      std::map<std::string, std::string> ::const_iterator _iter609;
-      for (_iter609 = this->properties.begin(); _iter609 != this->properties.end(); ++_iter609)
+      std::map<std::string, std::string> ::const_iterator _iter610;
+      for (_iter610 = this->properties.begin(); _iter610 != this->properties.end(); ++_iter610)
       {
-        xfer += oprot->writeString(_iter609->first);
-        xfer += oprot->writeString(_iter609->second);
+        xfer += oprot->writeString(_iter610->first);
+        xfer += oprot->writeString(_iter610->second);
       }
       xfer += oprot->writeMapEnd();
     }
@@ -14771,16 +14749,7 @@ void swap(CompactionRequest &a, CompactionRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-CompactionRequest::CompactionRequest(const CompactionRequest& other610) {
-  dbname = other610.dbname;
-  tablename = other610.tablename;
-  partitionname = other610.partitionname;
-  type = other610.type;
-  runas = other610.runas;
-  properties = other610.properties;
-  __isset = other610.__isset;
-}
-CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other611) {
+CompactionRequest::CompactionRequest(const CompactionRequest& other611) {
   dbname = other611.dbname;
   tablename = other611.tablename;
   partitionname = other611.partitionname;
@@ -14788,6 +14757,15 @@ CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other61
   runas = other611.runas;
   properties = other611.properties;
   __isset = other611.__isset;
+}
+CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other612) {
+  dbname = other612.dbname;
+  tablename = other612.tablename;
+  partitionname = other612.partitionname;
+  type = other612.type;
+  runas = other612.runas;
+  properties = other612.properties;
+  __isset = other612.__isset;
   return *this;
 }
 void CompactionRequest::printTo(std::ostream& out) const {
@@ -14914,15 +14892,15 @@ void swap(CompactionResponse &a, CompactionResponse &b) {
   swap(a.accepted, b.accepted);
 }
 
-CompactionResponse::CompactionResponse(const CompactionResponse& other612) {
-  id = other612.id;
-  state = other612.state;
-  accepted = other612.accepted;
-}
-CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other613) {
+CompactionResponse::CompactionResponse(const CompactionResponse& other613) {
   id = other613.id;
   state = other613.state;
   accepted = other613.accepted;
+}
+CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other614) {
+  id = other614.id;
+  state = other614.state;
+  accepted = other614.accepted;
   return *this;
 }
 void CompactionResponse::printTo(std::ostream& out) const {
@@ -14983,11 +14961,11 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) {
   (void) b;
 }
 
-ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other614) {
-  (void) other614;
-}
-ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other615) {
+ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other615) {
   (void) other615;
+}
+ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other616) {
+  (void) other616;
   return *this;
 }
 void ShowCompactRequest::printTo(std::ostream& out) const {
@@ -15113,9 +15091,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol*
         break;
       case 4:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast616;
-          xfer += iprot->readI32(ecast616);
-          this->type = (CompactionType::type)ecast616;
+          int32_t ecast617;
+          xfer += iprot->readI32(ecast617);
+          this->type = (CompactionType::type)ecast617;
           isset_type = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -15302,23 +15280,7 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) {
   swap(a.__isset, b.__isset);
 }
 
-ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other617) {
-  dbname = other617.dbname;
-  tablename = other617.tablename;
-  partitionname = other617.partitionname;
-  type = other617.type;
-  state = other617.state;
-  workerid = other617.workerid;
-  start = other617.start;
-  runAs = other617.runAs;
-  hightestTxnId = other617.hightestTxnId;
-  metaInfo = other617.metaInfo;
-  endTime = other617.endTime;
-  hadoopJobId = other617.hadoopJobId;
-  id = other617.id;
-  __isset = other617.__isset;
-}
-ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other618) {
+ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other618) {
   dbname = other618.dbname;
   tablename = other618.tablename;
   partitionname = other618.partitionname;
@@ -15333,6 +15295,22 @@ ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowComp
   hadoopJobId = other618.hadoopJobId;
   id = other618.id;
   __isset = other618.__isset;
+}
+ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other619) {
+  dbname = other619.dbname;
+  tablename = other619.tablename;
+  partitionname = other619.partitionname;
+  type = other619.type;
+  state = other619.state;
+  workerid = other619.workerid;
+  start = other619.start;
+  runAs = other619.runAs;
+  hightestTxnId = other619.hightestTxnId;
+  metaInfo = other619.metaInfo;
+  endTime = other619.endTime;
+  hadoopJobId = other619.hadoopJobId;
+  id = other619.id;
+  __isset = other619.__isset;
   return *this;
 }
 void ShowCompactResponseElement::printTo(std::ostream& out) const {
@@ -15389,14 +15367,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot)
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->compacts.clear();
-            uint32_t _size619;
-            ::apache::thrift::protocol::TType _etype622;
-            xfer += iprot->readListBegin(_etype622, _size619);
-            this->compacts.resize(_size619);
-            uint32_t _i623;
-            for (_i623 = 0; _i623 < _size619; ++_i623)
+            uint32_t _size620;
+            ::apache::thrift::protocol::TType _etype623;
+            xfer += iprot->readListBegin(_etype623, _size620);
+            this->compacts.resize(_size620);
+            uint32_t _i624;
+            for (_i624 = 0; _i624 < _size620; ++_i624)
             {
-              xfer += this->compacts[_i623].read(iprot);
+              xfer += this->compacts[_i624].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -15427,10 +15405,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot
   xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->compacts.size()));
-    std::vector<ShowCompactResponseElement> ::const_iterator _iter624;
-    for (_iter624 = this->compacts.begin(); _iter624 != this->compacts.end(); ++_iter624)
+    std::vector<ShowCompactResponseElement> ::const_iterator _iter625;
+    for (_iter625 = this->compacts.begin(); _iter625 != this->compacts.end(); ++_iter625)
     {
-      xfer += (*_iter624).write(oprot);
+      xfer += (*_iter625).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -15446,11 +15424,11 @@ void swap(ShowCompactResponse &a, ShowCompactResponse &b) {
   swap(a.compacts, b.compacts);
 }
 
-ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other625) {
-  compacts = other625.compacts;
-}
-ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other626) {
+ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other626) {
   compacts = other626.compacts;
+}
+ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other627) {
+  compacts = other627.compacts;
   return *this;
 }
 void ShowCompactResponse::printTo(std::ostream& out) const {
@@ -15539,14 +15517,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->partitionnames.clear();
-            uint32_t _size627;
-            ::apache::thrift::protocol::TType _etype630;
-            xfer += iprot->readListBegin(_etype630, _size627);
-            this->partitionnames.resize(_size627);
-            uint32_t _i631;
-            for (_i631 = 0; _i631 < _size627; ++_i631)
+            uint32_t _size628;
+            ::apache::thrift::protocol::TType _etype631;
+            xfer += iprot->readListBegin(_etype631, _size628);
+            this->partitionnames.resize(_size628);
+            uint32_t _i632;
+            for (_i632 = 0; _i632 < _size628; ++_i632)
             {
-              xfer += iprot->readString(this->partitionnames[_i631]);
+              xfer += iprot->readString(this->partitionnames[_i632]);
             }
             xfer += iprot->readListEnd();
           }
@@ -15557,9 +15535,9 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot
         break;
       case 5:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast632;
-          xfer += iprot->readI32(ecast632);
-          this->operationType = (DataOperationType::type)ecast632;
+          int32_t ecast633;
+          xfer += iprot->readI32(ecast633);
+          this->operationType = (DataOperationType::type)ecast633;
           this->__isset.operationType = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -15605,10 +15583,10 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro
   xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 4);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionnames.size()));
-    std::vector<std::string> ::const_iterator _iter633;
-    for (_iter633 = this->partitionnames.begin(); _iter633 != this->partitionnames.end(); ++_iter633)
+    std::vector<std::string> ::const_iterator _iter634;
+    for (_iter634 = this->partitionnames.begin(); _iter634 != this->partitionnames.end(); ++_iter634)
     {
-      xfer += oprot->writeString((*_iter633));
+      xfer += oprot->writeString((*_iter634));
     }
     xfer += oprot->writeListEnd();
   }
@@ -15634,21 +15612,21 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) {
   swap(a.__isset, b.__isset);
 }
 
-AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other634) {
-  txnid = other634.txnid;
-  dbname = other634.dbname;
-  tablename = other634.tablename;
-  partitionnames = other634.partitionnames;
-  operationType = other634.operationType;
-  __isset = other634.__isset;
-}
-AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other635) {
+AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other635) {
   txnid = other635.txnid;
   dbname = other635.dbname;
   tablename = other635.tablename;
   partitionnames = other635.partitionnames;
   operationType = other635.operationType;
   __isset = other635.__isset;
+}
+AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other636) {
+  txnid = other636.txnid;
+  dbname = other636.dbname;
+  tablename = other636.tablename;
+  partitionnames = other636.partitionnames;
+  operationType = other636.operationType;
+  __isset = other636.__isset;
   return *this;
 }
 void AddDynamicPartitions::printTo(std::ostream& out) const {
@@ -15754,15 +15732,15 @@ void swap(NotificationEventRequest &a, NotificationEventRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other636) {
-  lastEvent = other636.lastEvent;
-  maxEvents = other636.maxEvents;
-  __isset = other636.__isset;
-}
-NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other637) {
+NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other637) {
   lastEvent = other637.lastEvent;
   maxEvents = other637.maxEvents;
   __isset = other637.__isset;
+}
+NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other638) {
+  lastEvent = other638.lastEvent;
+  maxEvents = other638.maxEvents;
+  __isset = other638.__isset;
   return *this;
 }
 void NotificationEventRequest::printTo(std::ostream& out) const {
@@ -15963,17 +15941,7 @@ void swap(NotificationEvent &a, NotificationEvent &b) {
   swap(a.__isset, b.__isset);
 }
 
-NotificationEvent::NotificationEvent(const NotificationEvent& other638) {
-  eventId = other638.eventId;
-  eventTime = other638.eventTime;
-  eventType = other638.eventType;
-  dbName = other638.dbName;
-  tableName = other638.tableName;
-  message = other638.message;
-  messageFormat = other638.messageFormat;
-  __isset = other638.__isset;
-}
-NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other639) {
+NotificationEvent::NotificationEvent(const NotificationEvent& other639) {
   eventId = other639.eventId;
   eventTime = other639.eventTime;
   eventType = other639.eventType;
@@ -15982,6 +15950,16 @@ NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other63
   message = other639.message;
   messageFormat = other639.messageFormat;
   __isset = other639.__isset;
+}
+NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other640) {
+  eventId = other640.eventId;
+  eventTime = other640.eventTime;
+  eventType = other640.eventType;
+  dbName = other640.dbName;
+  tableName = other640.tableName;
+  message = other640.message;
+  messageFormat = other640.messageFormat;
+  __isset = other640.__isset;
   return *this;
 }
 void NotificationEvent::printTo(std::ostream& out) const {
@@ -16032,14 +16010,14 @@ uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol*
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->events.clear();
-            uint32_t _size640;
-            ::apache::thrift::protocol::TType _etype643;
-            xfer += iprot->readListBegin(_etype643, _size640);
-            this->events.resize(_size640);
-            uint32_t _i644;
-            for (_i644 = 0; _i644 < _size640; ++_i644)
+            uint32_t _size641;
+            ::apache::thrift::protocol::TType _etype644;
+            xfer += iprot->readListBegin(_etype644, _size641);
+            this->events.resize(_size641);
+            uint32_t _i645;
+            for (_i645 = 0; _i645 < _size641; ++_i645)
             {
-              xfer += this->events[_i644].read(iprot);
+              xfer += this->events[_i645].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -16070,10 +16048,10 @@ uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol*
   xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->events.size()));
-    std::vector<NotificationEvent> ::const_iterator _iter645;
-    for (_iter645 = this->events.begin(); _iter645 != this->events.end(); ++_iter645)
+    std::vector<NotificationEvent> ::const_iterator _iter646;
+    for (_iter646 = this->events.begin(); _iter646 != this->events.end(); ++_iter646)
     {
-      xfer += (*_iter645).write(oprot);
+      xfer += (*_iter646).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -16089,11 +16067,11 @@ void swap(NotificationEventResponse &a, NotificationEventResponse &b) {
   swap(a.events, b.events);
 }
 
-NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other646) {
-  events = other646.events;
-}
-NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other647) {
+NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other647) {
   events = other647.events;
+}
+NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other648) {
+  events = other648.events;
   return *this;
 }
 void NotificationEventResponse::printTo(std::ostream& out) const {
@@ -16175,11 +16153,11 @@ void swap(CurrentNotificationEventId &a, CurrentNotificationEventId &b) {
   swap(a.eventId, b.eventId);
 }
 
-CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other648) {
-  eventId = other648.eventId;
-}
-CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other649) {
+CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other649) {
   eventId = other649.eventId;
+}
+CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other650) {
+  eventId = other650.eventId;
   return *this;
 }
 void CurrentNotificationEventId::printTo(std::ostream& out) const {
@@ -16194,11 +16172,6 @@ InsertEventRequestData::~InsertEventRequestData() throw() {
 }
 
 
-void InsertEventRequestData::__set_replace(const bool val) {
-  this->replace = val;
-__isset.replace = true;
-}
-
 void InsertEventRequestData::__set_filesAdded(const std::vector<std::string> & val) {
   this->filesAdded = val;
 }
@@ -16231,25 +16204,17 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr
     switch (fid)
     {
       case 1:
-        if (ftype == ::apache::thrift::protocol::T_BOOL) {
-          xfer += iprot->readBool(this->replace);
-          this->__isset.replace = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->filesAdded.clear();
-            uint32_t _size650;
-            ::apache::thrift::protocol::TType _etype653;
-            xfer += iprot->readListBegin(_etype653, _size650);
-            this->filesAdded.resize(_size650);
-            uint32_t _i654;
-            for (_i654 = 0; _i654 < _size650; ++_i654)
+            uint32_t _size651;
+            ::apache::thrift::protocol::TType _etype654;
+            xfer += iprot->readListBegin(_etype654, _size651);
+            this->filesAdded.resize(_size651);
+            uint32_t _i655;
+            for (_i655 = 0; _i655 < _size651; ++_i655)
             {
-              xfer += iprot->readString(this->filesAdded[_i654]);
+              xfer += iprot->readString(this->filesAdded[_i655]);
             }
             xfer += iprot->readListEnd();
           }
@@ -16258,18 +16223,18 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr
           xfer += iprot->skip(ftype);
         }
         break;
-      case 3:
+      case 2:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->filesAddedChecksum.clear();
-            uint32_t _size655;
-            ::apache::thrift::protocol::TType _etype658;
-            xfer += iprot->readListBegin(_etype658, _size655);
-            this->filesAddedChecksum.resize(_size655);
-            uint32_t _i659;
-            for (_i659 = 0; _i659 < _size655; ++_i659)
+            uint32_t _size656;
+            ::apache::thrift::protocol::TType _etype659;
+            xfer += iprot->readListBegin(_etype659, _size656);
+            this->filesAddedChecksum.resize(_size656);
+            uint32_t _i660;
+            for (_i660 = 0; _i660 < _size656; ++_i660)
             {
-              xfer += iprot->readString(this->filesAddedChecksum[_i659]);
+              xfer += iprot->readString(this->filesAddedChecksum[_i660]);
             }
             xfer += iprot->readListEnd();
           }
@@ -16297,31 +16262,26 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
   xfer += oprot->writeStructBegin("InsertEventRequestData");
 
-  if (this->__isset.replace) {
-    xfer += oprot->writeFieldBegin("replace", ::apache::thrift::protocol::T_BOOL, 1);
-    xfer += oprot->writeBool(this->replace);
-    xfer += oprot->writeFieldEnd();
-  }
-  xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 2);
+  xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->filesAdded.size()));
-    std::vector<std::string> ::const_iterator _iter660;
-    for (_iter660 = this->filesAdded.begin(); _iter660 != this->filesAdded.end(); ++_iter660)
+    std::vector<std::string> ::const_iterator _iter661;
+    for (_iter661 = this->filesAdded.begin(); _iter661 != this->filesAdded.end(); ++_iter661)
     {
-      xfer += oprot->writeString((*_iter660));
+      xfer += oprot->writeString((*_iter661));
     }
     xfer += oprot->writeListEnd();
   }
   xfer += oprot->writeFieldEnd();
 
   if (this->__isset.filesAddedChecksum) {
-    xfer += oprot->writeFieldBegin("filesAddedChecksum", ::apache::thrift::protocol::T_LIST, 3);
+    xfer += oprot->writeFieldBegin("filesAddedChecksum", ::apache::thrift::protocol::T_LIST, 2);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->filesAddedChecksum.size()));
-      std::vector<std::string> ::const_iterator _iter661;
-      for (_iter661 = this->filesAddedChecksum.begin(); _iter661 != this->filesAddedChecksum.end(); ++_iter661)
+      std::vector<std::string> ::const_iterator _iter662;
+      for (_iter662 = this->filesAddedChecksum.begin(); _iter662 != this->filesAddedChecksum.end(); ++_iter662)
       {
-        xfer += oprot->writeString((*_iter661));
+        xfer += oprot->writeString((*_iter662));
       }
       xfer += oprot->writeListEnd();
     }
@@ -16334,30 +16294,26 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op
 
 void swap(InsertEventRequestData &a, InsertEventRequestData &b) {
   using ::std::swap;
-  swap(a.replace, b.replace);
   swap(a.filesAdded, b.filesAdded);
   swap(a.filesAddedChecksum, b.filesAddedChecksum);
   swap(a.__isset, b.__isset);
 }
 
-InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other662) {
-  replace = other662.replace;
-  filesAdded = other662.filesAdded;
-  filesAddedChecksum = other662.filesAddedChecksum;
-  __isset = other662.__isset;
-}
-InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other663) {
-  replace = other663.replace;
+InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other663) {
   filesAdded = other663.filesAdded;
   filesAddedChecksum = other663.filesAddedChecksum;
   __isset = other663.__isset;
+}
+InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other664) {
+  filesAdded = other664.filesAdded;
+  filesAddedChecksum = other664.filesAddedChecksum;
+  __isset = other664.__isset;
   return *this;
 }
 void InsertEventRequestData::printTo(std::ostream& out) const {
   using ::apache::thrift::to_string;
   out << "InsertEventRequestData(";
-  out << "replace="; (__isset.replace ? (out << to_string(replace)) : (out << "<null>"));
-  out << ", " << "filesAdded=" << to_string(filesAdded);
+  out << "filesAdded=" << to_string(filesAdded);
   out << ", " << "filesAddedChecksum="; (__isset.filesAddedChecksum ? (out << to_string(filesAddedChecksum)) : (out << "<null>"));
   out << ")";
 }
@@ -16432,13 +16388,13 @@ void swap(FireEventRequestData &a, FireEventRequestData &b) {
   swap(a.__isset, b.__isset);
 }
 
-FireEventRequestData::FireEventRequestData(const FireEventRequestData& other664) {
-  insertData = other664.insertData;
-  __isset = other664.__isset;
-}
-FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other665) {
+FireEventRequestData::FireEventRequestData(const FireEventRequestData& other665) {
   insertData = other665.insertData;
   __isset = other665.__isset;
+}
+FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other666) {
+  insertData = other666.insertData;
+  __isset = other666.__isset;
   return *this;
 }
 void FireEventRequestData::printTo(std::ostream& out) const {
@@ -16535,14 +16491,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->partitionVals.clear();
-            uint32_t _size666;
-            ::apache::thrift::protocol::TType _etype669;
-            xfer += iprot->readListBegin(_etype669, _size666);
-            this->partitionVals.resize(_size666);
-            uint32_t _i670;
-            for (_i670 = 0; _i670 < _size666; ++_i670)
+            uint32_t _size667;
+            ::apache::thrift::protocol::TType _etype670;
+            xfer += iprot->readListBegin(_etype670, _size667);
+            this->partitionVals.resize(_size667);
+            uint32_t _i671;
+            for (_i671 = 0; _i671 < _size667; ++_i671)
             {
-              xfer += iprot->readString(this->partitionVals[_i670]);
+              xfer += iprot->readString(this->partitionVals[_i671]);
             }
             xfer += iprot->readListEnd();
           }
@@ -16594,10 +16550,10 @@ uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) c
     xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionVals.size()));
-      std::vector<std::string> ::const_iterator _iter671;
-      for (_iter671 = this->partitionVals.begin(); _iter671 != this->partitionVals.end(); ++_iter671)
+      std::vector<std::string> ::const_iterator _iter672;
+      for (_iter672 = this->partitionVals.begin(); _iter672 != this->partitionVals.end(); ++_iter672)
       {
-        xfer += oprot->writeString((*_iter671));
+        xfer += oprot->writeString((*_iter672));
       }
       xfer += oprot->writeListEnd();
     }
@@ -16618,21 +16574,21 @@ void swap(FireEventRequest &a, FireEventRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-FireEventRequest::FireEventRequest(const FireEventRequest& other672) {
-  successful = other672.successful;
-  data = other672.data;
-  dbName = other672.dbName;
-  tableName = other672.tableName;
-  partitionVals = other672.partitionVals;
-  __isset = other672.__isset;
-}
-FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other673) {
+FireEventRequest::FireEventRequest(const FireEventRequest& other673) {
   successful = other673.successful;
   data = other673.data;
   dbName = other673.dbName;
   tableName = other673.tableName;
   partitionVals = other673.partitionVals;
   __isset = other673.__isset;
+}
+FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other674) {
+  successful = other674.successful;
+  data = other674.data;
+  dbName = other674.dbName;
+  tableName = other674.tableName;
+  partitionVals = other674.partitionVals;
+  __isset = other674.__isset;
   return *this;
 }
 void FireEventRequest::printTo(std::ostream& out) const {
@@ -16695,11 +16651,11 @@ void swap(FireEventResponse &a, FireEventResponse &b) {
   (void) b;
 }
 
-FireEventResponse::FireEventResponse(const FireEventResponse& other674) {
-  (void) other674;
-}
-FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other675) {
+FireEventResponse::FireEventResponse(const FireEventResponse& other675) {
   (void) other675;
+}
+FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other676) {
+  (void) other676;
   return *this;
 }
 void FireEventResponse::printTo(std::ostream& out) const {
@@ -16799,15 +16755,15 @@ void swap(MetadataPpdResult &a, MetadataPpdResult &b) {
   swap(a.__isset, b.__isset);
 }
 
-MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other676) {
-  metadata = other676.metadata;
-  includeBitset = other676.includeBitset;
-  __isset = other676.__isset;
-}
-MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other677) {
+MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other677) {
   metadata = other677.metadata;
   includeBitset = other677.includeBitset;
   __isset = other677.__isset;
+}
+MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other678) {
+  metadata = other678.metadata;
+  includeBitset = other678.includeBitset;
+  __isset = other678.__isset;
   return *this;
 }
 void MetadataPpdResult::printTo(std::ostream& out) const {
@@ -16858,17 +16814,17 @@ uint32_t GetFileMetadataByExprResult::read(::apache::thrift::protocol::TProtocol
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->metadata.clear();
-            uint32_t _size678;
-            ::apache::thrift::protocol::TType _ktype679;
-            ::apache::thrift::protocol::TType _vtype680;
-            xfer += iprot->readMapBegin(_ktype679, _vtype680, _size678);
-            uint32_t _i682;
-            for (_i682 = 0; _i682 < _size678; ++_i682)
+            uint32_t _size679;
+            ::apache::thrift::protocol::TType _ktype680;
+            ::apache::thrift::protocol::TType _vtype681;
+            xfer += iprot->readMapBegin(_ktype680, _vtype681, _size679);
+            uint32_t _i683;
+            for (_i683 = 0; _i683 < _size679; ++_i683)
             {
-              int64_t _key683;
-              xfer += iprot->readI64(_key683);
-              MetadataPpdResult& _val684 = this->metadata[_key683];
-              xfer += _val684.read(iprot);
+              int64_t _key684;
+              xfer += iprot->readI64(_key684);
+              MetadataPpdResult& _val685 = this->metadata[_key684];
+              xfer += _val685.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -16909,11 +16865,11 @@ uint32_t GetFileMetadataByExprResult::write(::apache::thrift::protocol::TProtoco
   xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->metadata.size()));
-    std::map<int64_t, MetadataPpdResult> ::const_iterator _iter685;
-    for (_iter685 = this->metadata.begin(); _iter685 != this->metadata.end(); ++_iter685)
+    std::map<int64_t, MetadataPpdResult> ::const_iterator _iter686;
+    for (_iter686 = this->metadata.begin(); _iter686 != this->metadata.end(); ++_iter686)
     {
-      xfer += oprot->writeI64(_iter685->first);
-      xfer += _iter685->second.write(oprot);
+      xfer += oprot->writeI64(_iter686->first);
+      xfer += _iter686->second.write(oprot);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -16934,13 +16890,13 @@ void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b) {
   swap(a.isSupported, b.isSupported);
 }
 
-GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other686) {
-  metadata = other686.metadata;
-  isSupported = other686.isSupported;
-}
-GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other687) {
+GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other687) {
   metadata = other687.metadata;
   isSupported = other687.isSupported;
+}
+GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other688) {
+  metadata = other688.metadata;
+  isSupported = other688.isSupported;
   return *this;
 }
 void GetFileMetadataByExprResult::printTo(std::ostream& out) const {
@@ -17001,14 +16957,14 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->fileIds.clear();
-            uint32_t _size688;
-            ::apache::thrift::protocol::TType _etype691;
-            xfer += iprot->readListBegin(_etype691, _size688);
-            this->fileIds.resize(_size688);
-            uint32_t _i692;
-            for (_i692 = 0; _i692 < _size688; ++_i692)
+            uint32_t _size689;
+            ::apache::thrift::protocol::TType _etype692;
+            xfer += iprot->readListBegin(_etype692, _size689);
+            this->fileIds.resize(_size689);
+            uint32_t _i693;
+            for (_i693 = 0; _i693 < _size689; ++_i693)
             {
-              xfer += iprot->readI64(this->fileIds[_i692]);
+              xfer += iprot->readI64(this->fileIds[_i693]);
             }
             xfer += iprot->readListEnd();
           }
@@ -17035,9 +16991,9 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco
         break;
       case 4:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast693;
-          xfer += iprot->readI32(ecast693);
-          this->type = (FileMetadataExprType::type)ecast693;
+          int32_t ecast694;
+          xfer += iprot->readI32(ecast694);
+          this->type = (FileMetadataExprType::type)ecast694;
           this->__isset.type = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -17067,10 +17023,10 @@ uint32_t GetFileMetadataByExprRequest::write(::apache::thrift::protocol::TProtoc
   xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->fileIds.size()));
-    std::vector<int64_t> ::const_iterator _iter694;
-    for (_iter694 = this->fileIds.begin(); _iter694 != this->fileIds.end(); ++_iter694)
+    std::vector<int64_t> ::const_iterator _iter695;
+    for (_iter695 = this->fileIds.begin(); _iter695 != this->fileIds.end(); ++_iter695)
     {
-      xfer += oprot->writeI64((*_iter694));
+      xfer += oprot->writeI64((*_iter695));
     }
     xfer += oprot->writeListEnd();
   }
@@ -17104,19 +17060,19 @@ void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other695) {
-  fileIds = other695.fileIds;
-  expr = other695.expr;
-  doGetFooters = other695.doGetFooters;
-  type = other695.type;
-  __isset = other695.__isset;
-}
-GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other696) {
+GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other696) {
   fileIds = other696.fileIds;
   expr = other696.expr;
   doGetFooters = other696.doGetFooters;
   type = other696.type;
   __isset = other696.__isset;
+}
+GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other697) {
+  fileIds = other697.fileIds;
+  expr = other697.expr;
+  doGetFooters = other697.doGetFooters;
+  type = other697.type;
+  __isset = other697.__isset;
   return *this;
 }
 void GetFileMetadataByExprRequest::printTo(std::ostream& out) const {
@@ -17169,17 +17125,17 @@ uint32_t GetFileMetadataResult::read(::apache::thrift::protocol::TProtocol* ipro
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->metadata.clear();
-            uint32_t _size697;
-            ::apache::thrift::protocol::TType _ktype698;
-            ::apache::thrift::protocol::TType _vtype699;
-            xfer += iprot->readMapBegin(_ktype698, _vtype699, _size697);
-            uint32_t _i701;
-            for (_i701 = 0; _i701 < _size697; ++_i701)
+            uint32_t _size698;
+            ::apache::thrift::protocol::TType _ktype699;
+            ::apache::thrift::protocol::TType _vtype700;
+            xfer += iprot->readMapBegin(_ktype699, _vtype700, _size698);
+            uint32_t _i702;
+            for (_i702 = 0; _i702 < _size698; ++_i702)
             {
-              int64_t _key702;
-              xfer += iprot->readI64(_key702);
-              std::string& _val703 = this->metadata[_key702];
-              xfer += iprot->readBinary(_val703);
+              int64_t _key703;
+              xfer += iprot->readI64(_key703);
+              std::string& _val704 = this->metadata[_key703];
+              xfer += iprot->readBinary(_val704);
             }
             xfer += iprot->readMapEnd();
           }
@@ -17220,11 +17176,11 @@ uint32_t GetFileMetadataResult::write(::apache::thrift::protocol::TProtocol* opr
   xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->metadata.size()));
-    std::map<int64_t, std::string> ::const_iterator _iter704;
-    for (_iter704 = this->metadata.begin(); _iter704 != this->metadata.end(); ++_iter704)
+    std::map<int64_t, std::string> ::const_iterator _iter705;
+    for (_iter705 = this->metadata.begin(); _iter705 != this->metadata.end(); ++_iter705)
     {
-      xfer += oprot->writeI64(_iter704->first);
-      xfer += oprot->writeBinary(_iter704->second);
+      xfer += oprot->writeI64(_iter705->first);
+      xfer += oprot->writeBinary(_iter705->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -17245,13 +17201,13 @@ void swap(GetFileMetadataResult &a, GetFileMetadataResult &b) {
   swap(a.isSupported, b.isSupported);
 }
 
-GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other705) {
-  metadata = other705.metadata;
-  isSupported = other705.isSupported;
-}
-GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other706) {
+GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other706) {
   metadata = other706.metadata;
   isSupported = other706.isSupported;
+}
+GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other707) {
+  metadata = other707.metadata;
+  isSupported = other707.isSupported;
   return *this;
 }
 void GetFileMetadataResult::printTo(std::ostream& out) const {
@@ -17297,14 +17253,14 @@ uint32_t GetFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->fileIds.clear();
-            uint32_t _size707;
-            ::apache::thrift::protocol::TType _etype710;
-            xfer += iprot->readListBegin(_etype710, _size707);
-            this->fileIds.resize(_size707);
-            uint32_t _i711;
-            for (_i711 = 0; _i711 < _size707; ++_i711)
+            uint32_t _size708;
+            ::apache::thrift::protocol::TType _etype711;
+            xfer += iprot->readListBegin(_etype711, _size708);
+            this->fileIds.resize(_size708);
+            uint32_t _i712;
+            for (_i712 = 0; _i712 < _size708; ++_i712)
             {
-              xfer += iprot->readI64(this->fileIds[_i711]);
+              xfer += iprot->readI64(this->fileIds[_i712]);
             }
             xfer += iprot->readListEnd();
           }
@@ -17335,10 +17291,10 @@ uint32_t GetFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op
   xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->fileIds.size()));
-    std::vector<int64_t> ::const_iterator _iter712;
-    for (_iter712 = this->fileIds.begin(); _iter712 != this->fileIds.end(); ++_iter712)
+    std::vector<int64_t> ::const_iterator _iter713;
+    for (_iter713 = this->fileIds.begin(); _iter713 != this->fileIds.end(); ++_iter713)
     {
-      xfer += oprot->writeI64((*_iter712));
+      xfer += oprot->writeI64((*_iter713));
     }
     xfer += oprot->writeListEnd();
   }
@@ -17354,11 +17310,11 @@ void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b) {
   swap(a.fileIds, b.fileIds);
 }
 
-GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other713) {
-  fileIds = other713.fileIds;
-}
-GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other714) {
+GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other714) {
   fileIds = other714.fileIds;
+}
+GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other715) {
+  fileIds = other715.fileIds;
   return *this;
 }
 void GetFileMetadataRequest::printTo(std::ostream& out) const {
@@ -17417,11 +17373,11 @@ void swap(PutFileMetadataResult &a, PutFileMetadataResult &b) {
   (void) b;
 }
 
-PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other715) {
-  (void) other715;
-}
-PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other716) {
+PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other716) {
   (void) other716;
+}
+PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other717) {
+  (void) other717;
   return *this;
 }
 void PutFileMetadataResult::printTo(std::ostream& out) const {
@@ -17475,14 +17431,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->fileIds.clear();
-            uint32_t _size717;
-            ::apache::thrift::protocol::TType _etype720;
-            xfer += iprot->readListBegin(_etype720, _size717);
-            this->fileIds.resize(_size717);
-            uint32_t _i721;
-            for (_i721 = 0; _i721 < _size717; ++_i721)
+            uint32_t _size718;
+            ::apache::thrift::protocol::TType _etype721;
+            xfer += iprot->readListBegin(_etype721, _size718);
+            this->fileIds.resize(_size718);
+            uint32_t _i722;
+            for (_i722 = 0; _i722 < _size718; ++_i722)
             {
-              xfer += iprot->readI64(this->fileIds[_i721]);
+              xfer += iprot->readI64(this->fileIds[_i722]);
             }
             xfer += iprot->readListEnd();
           }
@@ -17495,14 +17451,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->metadata.clear();
-            uint32_t _size722;
-            ::apache::thrift::protocol::TType _etype725;
-            xfer += iprot->readListBegin(_etype725, _size722);
-            this->metadata.resize(_size722);
-            uint32_t _i726;
-            for (_i726 = 0; _i726 < _size722; ++_i726)
+            uint32_t _size723;
+            ::apache::thrift::protocol::TType _etype726;
+            xfer += iprot->readListBegin(_etype726, _size723);
+            this->metadata.resize(_size723);
+            uint32_t _i727;
+            for (_i727 = 0; _i727 < _size723; ++_i727)
             {
-              xfer += iprot->readBinary(this->metadata[_i726]);
+              xfer += iprot->readBinary(this->metadata[_i727]);
             }
             xfer += iprot->readListEnd();
           }
@@ -17513,9 +17469,9 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr
         break;
       case 3:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast727;
-          xfer += iprot->readI32(ecast727);
-          this->type = (FileMetadataExprType::type)ecast727;
+          int32_t ecast728;
+          xfer += iprot->readI32(ecast728);
+          this->type = (FileMetadataExprType::type)ecast728;
           this->__isset.type = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -17545,10 +17501,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op
   xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->fileIds.size()));
-    std::vector<int64_t> ::const_iterator _iter728;
-    for (_iter728 = this->fileIds.begin(); _iter728 != this->fileIds.end(); ++_iter728)
+    std::vector<int64_t> ::const_iterator _iter729;
+    for (_iter729 = this->fileIds.begin(); _iter729 != this->fileIds.end(); ++_iter729)
     {
-      xfer += oprot->writeI64((*_iter728));
+      xfer += oprot->writeI64((*_iter729));
     }
     xfer += oprot->writeListEnd();
   }
@@ -17557,10 +17513,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op
   xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->metadata.size()));
-    std::vector<std::string> ::const_iterator _iter729;
-    for (_iter729 = this->metadata.begin(); _iter729 != this->metadata.end(); ++_iter729)
+    std::vector<std::string> ::const_iterator _iter730;
+    for (_iter730 = this->metadata.begin(); _iter730 != this->metadata.end(); ++_iter730)
     {
-      xfer += oprot->writeBinary((*_iter729));
+      xfer += oprot->writeBinary((*_iter730));
     }
     xfer += oprot->writeListEnd();
   }
@@ -17584,17 +17540,17 @@ void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other730) {
-  fileIds = other730.fileIds;
-  metadata = other730.metadata;
-  type = other730.type;
-  __isset = other730.__isset;
-}
-PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other731) {
+PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other731) {
   fileIds = other731.fileIds;
   metadata = other731.metadata;
   type = other731.type;
   __isset = other731.__isset;
+}
+PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other732) {
+  fileIds = other732.fileIds;
+  metadata = other732.metadata;
+  type = other732.type;
+  __isset = other732.__isset;
   return *this;
 }
 void PutFileMetadataRequest::printTo(std::ostream& out) const {
@@ -17655,11 +17611,11 @@ void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b) {
   (void) b;
 }
 
-ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other732) {
-  (void) other732;
-}
-ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other733) {
+ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other733) {
   (void) other733;
+}
+ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other734) {
+  (void) other734;
   return *this;
 }
 void ClearFileMetadataResult::printTo(std::ostream& out) const {
@@ -17703,14 +17659,14 @@ uint32_t ClearFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* i
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->fileIds.clear();
-            uint32_t _size734;
-            ::apache::thrift::protocol::TType _etype737;
-            xfer += iprot->readListBegin(_etype737, _size734);
-            this->fileIds.resize(_size734);
-            uint32_t _i738;
-            for (_i738 = 0; _i738 < _size734; ++_i738)
+            uint32_t _size735;
+            ::apache::thrift::protocol::TType _etype738;
+            xfer += iprot->readListBegin(_etype738, _size735);
+            this->fileIds.resize(_size735);
+            uint32_t _i739;
+            for (_i739 = 0; _i739 < _size735; ++_i739)
             {
-              xfer += iprot->readI64(this->fileIds[_i738]);
+              xfer += iprot->readI64(this->fileIds[_i739]);
             }
             xfer += iprot->readListEnd();
           }
@@ -17741,10 +17697,10 @@ uint32_t ClearFileMetadataRequest::write(::apache::thrift::protocol::TProtocol*
   xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->fileIds.size()));
-    std::vector<int64_t> ::const_iterator _iter739;
-    for (_iter739 = this->fileIds.begin(); _iter739 != this->fileIds.end(); ++_iter739)
+    std::vector<int64_t> ::const_iterator _iter740;
+    for (_iter740 = this->fileIds.begin(); _iter740 != this->fileIds.end(); ++_iter740)
     {
-      xfer += oprot->writeI64((*_iter739));
+      xfer += oprot->writeI64((*_iter740));
     }
     xfer += oprot->writeListEnd();
   }
@@ -17760,11 +17716,11 @@ void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b) {
   swap(a.fileIds, b.fileIds);
 }
 
-ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other740) {
-  fileIds = other740.fileIds;
-}
-ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other741) {
+ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other741) {
   fileIds = other741.fileIds;
+}
+ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other742) {
+  fileIds = other742.fileIds;
   return *this;
 }
 void ClearFileMetadataRequest::printTo(std::ostream& out) const {
@@ -17846,11 +17802,11 @@ void swap(CacheFileMetadataResult &a, CacheFileMetadataResult &b) {
   swap(a.isSupported, b.isSupported);
 }
 
-CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other742) {
-  isSupported = other742.isSupported;
-}
-CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other743) {
+CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other743) {
   isSupported = other743.isSupported;
+}
+CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other744) {
+  isSupported = other744.isSupported;
   return *this;
 }
 void CacheFileMetadataResult::printTo(std::ostream& out) const {
@@ -17991,19 +17947,19 @@ void swap(CacheFileMetadataRequest &a, CacheFileMetadataRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other744) {
-  dbName = other744.dbName;
-  tblName = other744.tblName;
-  partName = other744.partName;
-  isAllParts = other744.isAllParts;
-  __isset = other744.__isset;
-}
-CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other745) {
+CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other745) {
   dbName = other745.dbName;
   tblName = other745.tblName;
   partName = other745.partName;
   isAllParts = other745.isAllParts;
   __isset = other745.__isset;
+}
+CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other746) {
+  dbName = other746.dbName;
+  tblName = other746.tblName;
+  partName = other746.partName;
+  isAllParts = other746.isAllParts;
+  __isset = other746.__isset;
   return *this;
 }
 void CacheFileMetadataRequest::printTo(std::ostream& out) const {
@@ -18108,13 +18064,13 @@ void swap(GetNextWriteIdRequest &a, GetNextWriteIdRequest &b) {
   swap(a.tblName, b.tblName);
 }
 
-GetNextWriteIdRequest::GetNextWriteIdRequest(const GetNextWriteIdRequest& other746) {
-  dbName = other746.dbName;
-  tblName = other746.tblName;
-}
-GetNextWriteIdRequest& GetNextWriteIdRequest::operator=(const GetNextWriteIdRequest& other747) {
+GetNextWriteIdRequest::GetNextWriteIdRequest(const GetNextWriteIdRequest& other747) {
   dbName = other747.dbName;
   tblName = other747.tblName;
+}
+GetNextWriteIdRequest& GetNextWriteIdRequest::operator=(const GetNextWriteIdRequest& other748) {
+  dbName = other748.dbName;
+  tblName = other748.tblName;
   return *this;
 }
 void GetNextWriteIdRequest::printTo(std::ostream& out) const {
@@ -18197,11 +18153,11 @@ void swap(GetNextWriteIdResult &a, GetNextWriteIdResult &b) {
   swap(a.writeId, b.writeId);
 }
 
-GetNextWriteIdResult::GetNextWriteIdResult(const GetNextWriteIdResult& other748) {
-  writeId = other748.writeId;
-}
-GetNextWriteIdResult& GetNextWriteIdResult::operator=(const GetNextWriteIdResult& other749) {
+GetNextWriteIdResult::GetNextWriteIdResult(const GetNextWriteIdResult& other749) {
   writeId = other749.writeId;
+}
+GetNextWriteIdResult& GetNextWriteIdResult::operator=(const GetNextWriteIdResult& other750) {
+  writeId = other750.writeId;
   return *this;
 }
 void GetN

<TRUNCATED>

[37/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/rcfile_format_part.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/rcfile_format_part.q.out b/itests/hive-blobstore/src/test/results/clientpositive/rcfile_format_part.q.out
deleted file mode 100644
index bed10ab..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/rcfile_format_part.q.out
+++ /dev/null
@@ -1,274 +0,0 @@
-PREHOOK: query: DROP TABLE src_events
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE src_events
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_format_part/src_events
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_events
-POSTHOOK: query: CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_format_part/src_events
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_events
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_events
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_events
-PREHOOK: query: DROP TABLE rcfile_events
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE rcfile_events
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE rcfile_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS RCFILE
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_format_part/rcfile_events
-PREHOOK: Output: database:default
-PREHOOK: Output: default@rcfile_events
-POSTHOOK: query: CREATE TABLE rcfile_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS RCFILE
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_format_part/rcfile_events
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@rcfile_events
-PREHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events
-POSTHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS rcfile_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@rcfile_events
-POSTHOOK: query: SHOW PARTITIONS rcfile_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@rcfile_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-200
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events WHERE run_date=20120921
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events WHERE run_date=20120921
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-50
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events WHERE run_date=20121121
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events WHERE run_date=20121121
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-100
-PREHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date=201211, game_id, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,game_id,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events@run_date=201211
-POSTHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date=201211, game_id, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,game_id,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS rcfile_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@rcfile_events
-POSTHOOK: query: SHOW PARTITIONS rcfile_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@rcfile_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-300
-PREHOOK: query: INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events@run_date=201209/game_id=39
-POSTHOOK: query: INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-350
-PREHOOK: query: INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-400
-PREHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-350

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/rcfile_nonstd_partitions_loc.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/rcfile_nonstd_partitions_loc.q.out b/itests/hive-blobstore/src/test/results/clientpositive/rcfile_nonstd_partitions_loc.q.out
deleted file mode 100644
index c6442f9..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/rcfile_nonstd_partitions_loc.q.out
+++ /dev/null
@@ -1,533 +0,0 @@
-PREHOOK: query: DROP TABLE src_events
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE src_events
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_nonstd_partitions_loc/src_events
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_events
-POSTHOOK: query: CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_nonstd_partitions_loc/src_events
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_events
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_events
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_events
-PREHOOK: query: DROP TABLE rcfile_events
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE rcfile_events
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE rcfile_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS RCFILE
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_nonstd_partitions_loc/rcfile_events
-PREHOOK: Output: database:default
-PREHOOK: Output: default@rcfile_events
-POSTHOOK: query: CREATE TABLE rcfile_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS RCFILE
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_nonstd_partitions_loc/rcfile_events
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@rcfile_events
-PREHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events
-POSTHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS rcfile_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@rcfile_events
-POSTHOOK: query: SHOW PARTITIONS rcfile_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@rcfile_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-200
-PREHOOK: query: ALTER TABLE rcfile_events ADD PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_nonstd_partitions_loc/rcfile_nonstd_loc/ns-part-1
-PREHOOK: Output: default@rcfile_events
-POSTHOOK: query: ALTER TABLE rcfile_events ADD PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_nonstd_partitions_loc/rcfile_nonstd_loc/ns-part-1
-POSTHOOK: Output: default@rcfile_events
-POSTHOOK: Output: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: query: SHOW PARTITIONS rcfile_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@rcfile_events
-POSTHOOK: query: SHOW PARTITIONS rcfile_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@rcfile_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-300
-PREHOOK: query: INSERT INTO TABLE rcfile_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT INTO TABLE rcfile_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: query: SHOW PARTITIONS rcfile_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@rcfile_events
-POSTHOOK: query: SHOW PARTITIONS rcfile_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@rcfile_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-400
-PREHOOK: query: ALTER TABLE rcfile_events ADD PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_nonstd_partitions_loc/rcfile_nonstd_loc/ns-part-2
-PREHOOK: Output: default@rcfile_events
-POSTHOOK: query: ALTER TABLE rcfile_events ADD PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_nonstd_partitions_loc/rcfile_nonstd_loc/ns-part-2
-POSTHOOK: Output: default@rcfile_events
-POSTHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: query: INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS rcfile_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@rcfile_events
-POSTHOOK: query: SHOW PARTITIONS rcfile_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@rcfile_events
-run_date=201209/game_id=39/event_name=hq_change
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-PREHOOK: Input: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-POSTHOOK: Input: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-500
-PREHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events
-POSTHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS rcfile_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@rcfile_events
-POSTHOOK: query: SHOW PARTITIONS rcfile_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@rcfile_events
-run_date=201209/game_id=39/event_name=hq_change
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-PREHOOK: Input: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-POSTHOOK: Input: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-500
-PREHOOK: query: ALTER TABLE rcfile_events ADD PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_nonstd_partitions_loc/rcfile_nonstd_loc/ns-part-3
-PREHOOK: Output: default@rcfile_events
-POSTHOOK: query: ALTER TABLE rcfile_events ADD PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_nonstd_partitions_loc/rcfile_nonstd_loc/ns-part-3
-POSTHOOK: Output: default@rcfile_events
-POSTHOOK: Output: default@rcfile_events@run_date=201207/game_id=39/event_name=hq_change
-PREHOOK: query: INSERT INTO TABLE rcfile_events PARTITION(run_date=201207,game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events@run_date=201207/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT INTO TABLE rcfile_events PARTITION(run_date=201207,game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=201207/game_id=39/event_name=hq_change
-PREHOOK: query: SHOW PARTITIONS rcfile_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@rcfile_events
-POSTHOOK: query: SHOW PARTITIONS rcfile_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@rcfile_events
-run_date=201207/game_id=39/event_name=hq_change
-run_date=201209/game_id=39/event_name=hq_change
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-PREHOOK: Input: default@rcfile_events@run_date=201207/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-POSTHOOK: Input: default@rcfile_events@run_date=201207/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-550
-PREHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@rcfile_events
-POSTHOOK: query: INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: rcfile_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS rcfile_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@rcfile_events
-POSTHOOK: query: SHOW PARTITIONS rcfile_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@rcfile_events
-run_date=201207/game_id=39/event_name=hq_change
-run_date=201209/game_id=39/event_name=hq_change
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-PREHOOK: Input: default@rcfile_events@run_date=201207/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: Input: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-POSTHOOK: Input: default@rcfile_events@run_date=201207/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@rcfile_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-550
-PREHOOK: query: ALTER TABLE rcfile_events DROP PARTITION (run_date=201211,game_id=39, event_name='hq_change')
-PREHOOK: type: ALTERTABLE_DROPPARTS
-PREHOOK: Input: default@rcfile_events
-PREHOOK: Output: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: query: ALTER TABLE rcfile_events DROP PARTITION (run_date=201211,game_id=39, event_name='hq_change')
-POSTHOOK: type: ALTERTABLE_DROPPARTS
-POSTHOOK: Input: default@rcfile_events
-POSTHOOK: Output: default@rcfile_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: query: ALTER TABLE rcfile_events DROP PARTITION (run_date=201209,game_id=39, event_name='hq_change')
-PREHOOK: type: ALTERTABLE_DROPPARTS
-PREHOOK: Input: default@rcfile_events
-PREHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: query: ALTER TABLE rcfile_events DROP PARTITION (run_date=201209,game_id=39, event_name='hq_change')
-POSTHOOK: type: ALTERTABLE_DROPPARTS
-POSTHOOK: Input: default@rcfile_events
-POSTHOOK: Output: default@rcfile_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: query: ALTER TABLE rcfile_events DROP PARTITION (run_date=201207,game_id=39, event_name='hq_change')
-PREHOOK: type: ALTERTABLE_DROPPARTS
-PREHOOK: Input: default@rcfile_events
-PREHOOK: Output: default@rcfile_events@run_date=201207/game_id=39/event_name=hq_change
-POSTHOOK: query: ALTER TABLE rcfile_events DROP PARTITION (run_date=201207,game_id=39, event_name='hq_change')
-POSTHOOK: type: ALTERTABLE_DROPPARTS
-POSTHOOK: Input: default@rcfile_events
-POSTHOOK: Output: default@rcfile_events@run_date=201207/game_id=39/event_name=hq_change
-PREHOOK: query: SHOW PARTITIONS rcfile_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@rcfile_events
-POSTHOOK: query: SHOW PARTITIONS rcfile_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@rcfile_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM rcfile_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM rcfile_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_events
-#### A masked pattern was here ####
-200

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
index 46bfef5..8a90a9e 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
@@ -192,7 +192,6 @@ STAGE PLANS:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                 properties:
-                  COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                   bucket_count -1
                   column.name.delimiter ,
                   columns key
@@ -201,13 +200,9 @@ STAGE PLANS:
 #### A masked pattern was here ####
                   location ### test.blobstore.path ###/write_final_output_blobstore
                   name default.blobstore_table
-                  numFiles 0
-                  numRows 0
-                  rawDataSize 0
                   serialization.ddl struct blobstore_table { i32 key}
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  totalSize 0
 #### A masked pattern was here ####
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 name: default.blobstore_table
@@ -224,7 +219,6 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns key
@@ -233,13 +227,9 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 location ### test.blobstore.path ###/write_final_output_blobstore
                 name default.blobstore_table
-                numFiles 0
-                numRows 0
-                rawDataSize 0
                 serialization.ddl struct blobstore_table { i32 key}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 0
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.blobstore_table
@@ -416,7 +406,6 @@ STAGE PLANS:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                 properties:
-                  COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                   bucket_count -1
                   column.name.delimiter ,
                   columns key
@@ -425,13 +414,9 @@ STAGE PLANS:
 #### A masked pattern was here ####
                   location ### test.blobstore.path ###/write_final_output_blobstore
                   name default.blobstore_table
-                  numFiles 0
-                  numRows 0
-                  rawDataSize 0
                   serialization.ddl struct blobstore_table { i32 key}
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  totalSize 0
 #### A masked pattern was here ####
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 name: default.blobstore_table
@@ -448,7 +433,6 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns key
@@ -457,13 +441,9 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 location ### test.blobstore.path ###/write_final_output_blobstore
                 name default.blobstore_table
-                numFiles 0
-                numRows 0
-                rawDataSize 0
                 serialization.ddl struct blobstore_table { i32 key}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 0
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.blobstore_table

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/zero_rows_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/zero_rows_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/zero_rows_blobstore.q.out
deleted file mode 100644
index 590f947..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/zero_rows_blobstore.q.out
+++ /dev/null
@@ -1,91 +0,0 @@
-PREHOOK: query: DROP TABLE blobstore_source
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_source
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE blobstore_source (
-    key int
-) 
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/zero_rows_blobstore/blobstore_source
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: CREATE TABLE blobstore_source (
-    key int
-) 
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/zero_rows_blobstore/blobstore_source
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv6.txt' INTO TABLE blobstore_source
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv6.txt' INTO TABLE blobstore_source
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: DROP TABLE blobstore_target
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_target
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE blobstore_target (
-    key int
-) 
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/zero_rows_blobstore/blobstore_target
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_target
-POSTHOOK: query: CREATE TABLE blobstore_target (
-    key int
-) 
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/zero_rows_blobstore/blobstore_target
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_target
-PREHOOK: query: SELECT COUNT(*) FROM blobstore_target
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_target
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM blobstore_target
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_target
-#### A masked pattern was here ####
-0
-PREHOOK: query: INSERT OVERWRITE TABLE blobstore_target SELECT key FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@blobstore_target
-POSTHOOK: query: INSERT OVERWRITE TABLE blobstore_target SELECT key FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@blobstore_target
-PREHOOK: query: SELECT COUNT(*) FROM blobstore_target
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_target
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM blobstore_target
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_target
-#### A masked pattern was here ####
-100
-PREHOOK: query: INSERT OVERWRITE TABLE blobstore_target SELECT key FROM blobstore_source WHERE FALSE
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@blobstore_target
-POSTHOOK: query: INSERT OVERWRITE TABLE blobstore_target SELECT key FROM blobstore_source WHERE FALSE
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@blobstore_target
-PREHOOK: query: SELECT COUNT(*) FROM blobstore_target
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_target
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM blobstore_target
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_target
-#### A masked pattern was here ####
-0

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/zero_rows_hdfs.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/zero_rows_hdfs.q.out b/itests/hive-blobstore/src/test/results/clientpositive/zero_rows_hdfs.q.out
deleted file mode 100644
index 92cf51f..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/zero_rows_hdfs.q.out
+++ /dev/null
@@ -1,89 +0,0 @@
-PREHOOK: query: DROP TABLE blobstore_source
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_source
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE blobstore_source (
-    key int
-)
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/zero_rows_hdfs/blobstore_source
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: CREATE TABLE blobstore_source (
-    key int
-)
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/zero_rows_hdfs/blobstore_source
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv6.txt' INTO TABLE blobstore_source
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv6.txt' INTO TABLE blobstore_source
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: DROP TABLE hdfs_target
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE hdfs_target
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hdfs_target (
-    key int
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@hdfs_target
-POSTHOOK: query: CREATE TABLE hdfs_target (
-    key int
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@hdfs_target
-PREHOOK: query: SELECT COUNT(*) FROM hdfs_target
-PREHOOK: type: QUERY
-PREHOOK: Input: default@hdfs_target
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM hdfs_target
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@hdfs_target
-#### A masked pattern was here ####
-0
-PREHOOK: query: INSERT OVERWRITE TABLE hdfs_target SELECT key FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@hdfs_target
-POSTHOOK: query: INSERT OVERWRITE TABLE hdfs_target SELECT key FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@hdfs_target
-POSTHOOK: Lineage: hdfs_target.key SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: SELECT COUNT(*) FROM hdfs_target
-PREHOOK: type: QUERY
-PREHOOK: Input: default@hdfs_target
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM hdfs_target
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@hdfs_target
-#### A masked pattern was here ####
-100
-PREHOOK: query: INSERT OVERWRITE TABLE hdfs_target SELECT key FROM blobstore_source WHERE FALSE
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@hdfs_target
-POSTHOOK: query: INSERT OVERWRITE TABLE hdfs_target SELECT key FROM blobstore_source WHERE FALSE
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@hdfs_target
-POSTHOOK: Lineage: hdfs_target.key SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: SELECT COUNT(*) FROM hdfs_target
-PREHOOK: type: QUERY
-PREHOOK: Input: default@hdfs_target
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM hdfs_target
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@hdfs_target
-#### A masked pattern was here ####
-0

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-jmh/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-jmh/pom.xml b/itests/hive-jmh/pom.xml
index af8eb19..f1417fd 100644
--- a/itests/hive-jmh/pom.xml
+++ b/itests/hive-jmh/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-minikdc/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-minikdc/pom.xml b/itests/hive-minikdc/pom.xml
index 95d2614..dcc5c2d 100644
--- a/itests/hive-minikdc/pom.xml
+++ b/itests/hive-minikdc/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java
----------------------------------------------------------------------
diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java
deleted file mode 100644
index 3153b9f..0000000
--- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hive.minikdc;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hive.jdbc.miniHS2.MiniHS2;
-import org.hadoop.hive.jdbc.SSLTestUtils;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-
-public class TestSSLWithMiniKdc {
-
-  private static MiniHS2 miniHS2 = null;
-  private static MiniHiveKdc miniHiveKdc = null;
-
-  @BeforeClass
-  public static void beforeTest() throws Exception {
-    Class.forName(MiniHS2.getJdbcDriverName());
-
-    HiveConf hiveConf = new HiveConf();
-
-    SSLTestUtils.setMetastoreSslConf(hiveConf);
-    hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
-
-    miniHiveKdc = MiniHiveKdc.getMiniHiveKdc(hiveConf);
-
-    setHMSSaslConf(miniHiveKdc, hiveConf);
-
-    miniHS2 = MiniHiveKdc.getMiniHS2WithKerbWithRemoteHMS(miniHiveKdc, hiveConf);
-
-    Map<String, String> confOverlay = new HashMap<>();
-    SSLTestUtils.setHttpConfOverlay(confOverlay);
-    SSLTestUtils.setSslConfOverlay(confOverlay);
-
-    miniHS2.start(confOverlay);
-  }
-
-  @AfterClass
-  public static void afterTest() throws Exception {
-    miniHS2.stop();
-  }
-
-  @Test
-  public void testConnection() throws Exception {
-    String tableName = "testTable";
-    Path dataFilePath = new Path(SSLTestUtils.getDataFileDir(), "kv1.txt");
-    Connection hs2Conn = getConnection(MiniHiveKdc.HIVE_TEST_USER_1);
-
-    Statement stmt = hs2Conn.createStatement();
-
-    SSLTestUtils.setupTestTableWithData(tableName, dataFilePath, hs2Conn);
-
-    stmt.execute("select * from " + tableName);
-    stmt.execute("drop table " + tableName);
-    stmt.close();
-  }
-
-  private Connection getConnection(String userName) throws Exception {
-    miniHiveKdc.loginUser(userName);
-    return DriverManager.getConnection(miniHS2.getJdbcURL("default", SSLTestUtils.SSL_CONN_PARAMS),
-        System.getProperty("user.name"), "bar");
-  }
-
-  private static void setHMSSaslConf(MiniHiveKdc miniHiveKdc, HiveConf conf) {
-   String hivePrincipal =
-        miniHiveKdc.getFullyQualifiedServicePrincipal(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL);
-    String hiveKeytab = miniHiveKdc.getKeyTabFile(
-        miniHiveKdc.getServicePrincipalForUser(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL));
-
-    conf.setBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL, true);
-    conf.setVar(ConfVars.METASTORE_KERBEROS_PRINCIPAL, hivePrincipal);
-    conf.setVar(ConfVars.METASTORE_KERBEROS_KEYTAB_FILE, hiveKeytab);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit-hadoop2/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-unit-hadoop2/pom.xml b/itests/hive-unit-hadoop2/pom.xml
index 339a194..d15bd54 100644
--- a/itests/hive-unit-hadoop2/pom.xml
+++ b/itests/hive-unit-hadoop2/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java
new file mode 100644
index 0000000..b798379
--- /dev/null
+++ b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.security;
+
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
+import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
+import static org.apache.hadoop.fs.permission.AclEntryType.USER;
+
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
+public class TestExtendedAcls extends FolderPermissionBase {
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    conf = new HiveConf(TestExtendedAcls.class);
+    //setup the mini DFS with acl's enabled.
+    conf.set("dfs.namenode.acls.enabled", "true");
+    conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
+    baseSetup();
+  }
+
+  private final ImmutableList<AclEntry> aclSpec1 = ImmutableList.of(
+      aclEntry(ACCESS, USER, FsAction.ALL),
+      aclEntry(ACCESS, GROUP, FsAction.ALL),
+      aclEntry(ACCESS, OTHER, FsAction.ALL),
+      aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE),
+      aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE),
+      aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE));
+
+  private final ImmutableList<AclEntry> aclSpec2 = ImmutableList.of(
+      aclEntry(ACCESS, USER, FsAction.ALL),
+      aclEntry(ACCESS, GROUP, FsAction.ALL),
+      aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE),
+      aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE),
+      aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, "bar2", FsAction.READ),
+      aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE));
+
+  @Override
+  public void setPermission(String locn, int permIndex) throws Exception {
+    switch (permIndex) {
+      case 0:
+        setAcl(locn, aclSpec1);
+        break;
+      case 1:
+        setAcl(locn, aclSpec2);
+        break;
+      default:
+        throw new RuntimeException("Only 2 permissions by this test");
+    }
+  }
+
+  @Override
+  public void verifyPermission(String locn, int permIndex) throws Exception {
+    switch (permIndex) {
+      case 0:
+        FsPermission perm = fs.getFileStatus(new Path(locn)).getPermission();
+        Assert.assertEquals("Location: " + locn, "rwxrwxrwx", String.valueOf(perm));
+
+        List<AclEntry> actual = getAcl(locn);
+        verifyAcls(aclSpec1, actual);
+        break;
+      case 1:
+        perm = fs.getFileStatus(new Path(locn)).getPermission();
+        Assert.assertEquals("Location: " + locn, "rwxrwxr-x", String.valueOf(perm));
+
+        List<AclEntry> acls = getAcl(locn);
+        verifyAcls(aclSpec2, acls);
+        break;
+      default:
+        throw new RuntimeException("Only 2 permissions by this test: " + permIndex);
+    }
+  }
+
+  /**
+   * Create a new AclEntry with scope, type and permission (no name).
+   *
+   * @param scope
+   *          AclEntryScope scope of the ACL entry
+   * @param type
+   *          AclEntryType ACL entry type
+   * @param permission
+   *          FsAction set of permissions in the ACL entry
+   * @return AclEntry new AclEntry
+   */
+  private AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
+      FsAction permission) {
+    return new AclEntry.Builder().setScope(scope).setType(type)
+        .setPermission(permission).build();
+  }
+
+  /**
+   * Create a new AclEntry with scope, type, name and permission.
+   *
+   * @param scope
+   *          AclEntryScope scope of the ACL entry
+   * @param type
+   *          AclEntryType ACL entry type
+   * @param name
+   *          String optional ACL entry name
+   * @param permission
+   *          FsAction set of permissions in the ACL entry
+   * @return AclEntry new AclEntry
+   */
+  private AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
+      String name, FsAction permission) {
+    return new AclEntry.Builder().setScope(scope).setType(type).setName(name)
+        .setPermission(permission).build();
+  }
+
+  private void verifyAcls(List<AclEntry> expectedList, List<AclEntry> actualList) {
+    for (AclEntry expected : expectedList) {
+      if (expected.getName() != null) {
+        //the non-named acl's are coming as regular permission, and not as aclEntries.
+        boolean found = false;
+        for (AclEntry actual : actualList) {
+          if (actual.equals(expected)) {
+            found = true;
+          }
+        }
+        if (!found) {
+          Assert.fail("Following Acl does not have a match: " + expected);
+        }
+      }
+    }
+  }
+
+  private void setAcl(String locn, List<AclEntry> aclSpec) throws Exception {
+    fs.setAcl(new Path(locn), aclSpec);
+  }
+
+  private List<AclEntry> getAcl(String locn) throws Exception {
+    return fs.getAclStatus(new Path(locn)).getEntries();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java
index 62c109c..028c117 100644
--- a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java
+++ b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java
@@ -84,6 +84,7 @@ public class TestStorageBasedMetastoreAuthorizationProviderWithACL
     warehouseDir = new Path(new Path(fs.getUri()), "/warehouse");
     fs.mkdirs(warehouseDir);
     conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehouseDir.toString());
+    conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true);
 
     // Set up scratch directory
     Path scratchDir = new Path(new Path(fs.getUri()), "/scratchdir");

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index 8be25b2..789192b 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 


[32/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFile.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFile.java b/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFile.java
new file mode 100644
index 0000000..49d6d24
--- /dev/null
+++ b/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFile.java
@@ -0,0 +1,273 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.beeline.qfile;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.util.Shell;
+import org.apache.hive.common.util.StreamPrinter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+/**
+ * Class for representing a Query and the connected files. It provides accessors for the specific
+ * input and output files, and provides methods for filtering the output of the runs.
+ */
+public final class QFile {
+  private static final Logger LOG = LoggerFactory.getLogger(QFile.class.getName());
+
+  private String name;
+  private File inputFile;
+  private File rawOutputFile;
+  private File outputFile;
+  private File expcetedOutputFile;
+  private File logFile;
+  private File infraLogFile;
+  private static RegexFilterSet staticFilterSet = getStaticFilterSet();
+  private RegexFilterSet specificFilterSet;
+
+  private QFile() {}
+
+  public String getName() {
+    return name;
+  }
+
+  public File getInputFile() {
+    return inputFile;
+  }
+
+  public File getRawOutputFile() {
+    return rawOutputFile;
+  }
+
+  public File getOutputFile() {
+    return outputFile;
+  }
+
+  public File getExpectedOutputFile() {
+    return expcetedOutputFile;
+  }
+
+  public File getLogFile() {
+    return logFile;
+  }
+
+  public File getInfraLogFile() {
+    return infraLogFile;
+  }
+
+  public void filterOutput() throws IOException {
+    String rawOutput = FileUtils.readFileToString(rawOutputFile);
+    String filteredOutput = staticFilterSet.filter(specificFilterSet.filter(rawOutput));
+    FileUtils.writeStringToFile(outputFile, filteredOutput);
+  }
+
+  public boolean compareResults() throws IOException, InterruptedException {
+    if (!expcetedOutputFile.exists()) {
+      LOG.error("Expected results file does not exist: " + expcetedOutputFile);
+      return false;
+    }
+    return executeDiff();
+  }
+
+  public void overwriteResults() throws IOException {
+    if (expcetedOutputFile.exists()) {
+      FileUtils.forceDelete(expcetedOutputFile);
+    }
+    FileUtils.copyFile(outputFile, expcetedOutputFile);
+  }
+
+  private boolean executeDiff() throws IOException, InterruptedException {
+    List<String> diffCommandArgs = new ArrayList<String>();
+    diffCommandArgs.add("diff");
+
+    // Text file comparison
+    diffCommandArgs.add("-a");
+
+    if (Shell.WINDOWS) {
+      // Ignore changes in the amount of white space
+      diffCommandArgs.add("-b");
+
+      // Files created on Windows machines have different line endings
+      // than files created on Unix/Linux. Windows uses carriage return and line feed
+      // ("\r\n") as a line ending, whereas Unix uses just line feed ("\n").
+      // Also StringBuilder.toString(), Stream to String conversions adds extra
+      // spaces at the end of the line.
+      diffCommandArgs.add("--strip-trailing-cr"); // Strip trailing carriage return on input
+      diffCommandArgs.add("-B"); // Ignore changes whose lines are all blank
+    }
+
+    // Add files to compare to the arguments list
+    diffCommandArgs.add(getQuotedString(expcetedOutputFile));
+    diffCommandArgs.add(getQuotedString(outputFile));
+
+    System.out.println("Running: " + org.apache.commons.lang.StringUtils.join(diffCommandArgs,
+        ' '));
+    Process executor = Runtime.getRuntime().exec(diffCommandArgs.toArray(
+        new String[diffCommandArgs.size()]));
+
+    StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, System.err);
+    StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
+
+    outPrinter.start();
+    errPrinter.start();
+
+    int result = executor.waitFor();
+
+    outPrinter.join();
+    errPrinter.join();
+
+    executor.waitFor();
+
+    return (result == 0);
+  }
+
+  private static String getQuotedString(File file) {
+    return Shell.WINDOWS ? String.format("\"%s\"", file.getAbsolutePath()) : file.getAbsolutePath();
+  }
+
+  private static class RegexFilterSet {
+    private final Map<Pattern, String> regexFilters = new LinkedHashMap<Pattern, String>();
+
+    public RegexFilterSet addFilter(String regex, String replacement) {
+      regexFilters.put(Pattern.compile(regex), replacement);
+      return this;
+    }
+
+    public String filter(String input) {
+      for (Pattern pattern : regexFilters.keySet()) {
+        input = pattern.matcher(input).replaceAll(regexFilters.get(pattern));
+      }
+      return input;
+    }
+  }
+
+  // These are the filters which are common for every QTest.
+  // Check specificFilterSet for QTest specific ones.
+  private static RegexFilterSet getStaticFilterSet() {
+    // Extract the leading four digits from the unix time value.
+    // Use this as a prefix in order to increase the selectivity
+    // of the unix time stamp replacement regex.
+    String currentTimePrefix = Long.toString(System.currentTimeMillis()).substring(0, 4);
+
+    String userName = System.getProperty("user.name");
+
+    String timePattern = "(Mon|Tue|Wed|Thu|Fri|Sat|Sun) "
+        + "(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) "
+        + "\\d{2} \\d{2}:\\d{2}:\\d{2} \\w+ 20\\d{2}";
+    // Pattern to remove the timestamp and other infrastructural info from the out file
+    String logPattern = "\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d*\\s+\\S+\\s+\\[" +
+        ".*\\]\\s+\\S+:\\s+";
+    String operatorPattern = "\"(CONDITION|COPY|DEPENDENCY_COLLECTION|DDL"
+        + "|EXPLAIN|FETCH|FIL|FS|FUNCTION|GBY|HASHTABLEDUMMY|HASTTABLESINK|JOIN"
+        + "|LATERALVIEWFORWARD|LIM|LVJ|MAP|MAPJOIN|MAPRED|MAPREDLOCAL|MOVE|OP|RS"
+        + "|SCR|SEL|STATS|TS|UDTF|UNION)_\\d+\"";
+
+    return new RegexFilterSet()
+        .addFilter(logPattern, "")
+        .addFilter("Getting log thread is interrupted, since query is done!\n", "")
+        .addFilter("going to print operations logs\n", "")
+        .addFilter("printed operations logs\n", "")
+        .addFilter("\\(queryId=[^\\)]*\\)", "queryId=(!!{queryId}!!)")
+        .addFilter("file:/\\w\\S+", "file:/!!ELIDED!!")
+        .addFilter("pfile:/\\w\\S+", "pfile:/!!ELIDED!!")
+        .addFilter("hdfs:/\\w\\S+", "hdfs:/!!ELIDED!!")
+        .addFilter("last_modified_by=\\w+", "last_modified_by=!!ELIDED!!")
+        .addFilter(timePattern, "!!TIMESTAMP!!")
+        .addFilter("(\\D)" + currentTimePrefix + "\\d{6}(\\D)", "$1!!UNIXTIME!!$2")
+        .addFilter("(\\D)" + currentTimePrefix + "\\d{9}(\\D)", "$1!!UNIXTIMEMILLIS!!$2")
+        .addFilter(userName, "!!{user.name}!!")
+        .addFilter(operatorPattern, "\"$1_!!ELIDED!!\"")
+        .addFilter("Time taken: [0-9\\.]* seconds", "Time taken: !!ELIDED!! seconds");
+  }
+
+  /**
+   * Builder to generate QFile objects. After initializing the builder it is possible the
+   * generate the next QFile object using it's name only.
+   */
+  public static class QFileBuilder {
+    private File queryDirectory;
+    private File logDirectory;
+    private File resultsDirectory;
+    private String scratchDirectoryString;
+    private String warehouseDirectoryString;
+    private File hiveRootDirectory;
+
+    public QFileBuilder() {
+    }
+
+    public QFileBuilder setQueryDirectory(File queryDirectory) {
+      this.queryDirectory = queryDirectory;
+      return this;
+    }
+
+    public QFileBuilder setLogDirectory(File logDirectory) {
+      this.logDirectory = logDirectory;
+      return this;
+    }
+
+    public QFileBuilder setResultsDirectory(File resultsDirectory) {
+      this.resultsDirectory = resultsDirectory;
+      return this;
+    }
+
+    public QFileBuilder setScratchDirectoryString(String scratchDirectoryString) {
+      this.scratchDirectoryString = scratchDirectoryString;
+      return this;
+    }
+
+    public QFileBuilder setWarehouseDirectoryString(String warehouseDirectoryString) {
+      this.warehouseDirectoryString = warehouseDirectoryString;
+      return this;
+    }
+
+    public QFileBuilder setHiveRootDirectory(File hiveRootDirectory) {
+      this.hiveRootDirectory = hiveRootDirectory;
+      return this;
+    }
+
+    public QFile getQFile(String name) throws IOException {
+      QFile result = new QFile();
+      result.name = name;
+      result.inputFile = new File(queryDirectory, name + ".q");
+      result.rawOutputFile = new File(logDirectory, name + ".q.out.raw");
+      result.outputFile = new File(logDirectory, name + ".q.out");
+      result.expcetedOutputFile = new File(resultsDirectory, name + ".q.out");
+      result.logFile = new File(logDirectory, name + ".q.beeline");
+      result.infraLogFile = new File(logDirectory, name + ".q.out.infra");
+      // These are the filters which are specific for the given QTest.
+      // Check staticFilterSet for common filters.
+      result.specificFilterSet = new RegexFilterSet()
+          .addFilter(scratchDirectoryString + "[\\w\\-/]+", "!!{hive.exec.scratchdir}!!")
+          .addFilter(warehouseDirectoryString, "!!{hive.metastore.warehouse.dir}!!")
+          .addFilter(resultsDirectory.getAbsolutePath(), "!!{expectedDirectory}!!")
+          .addFilter(logDirectory.getAbsolutePath(), "!!{outputDirectory}!!")
+          .addFilter(queryDirectory.getAbsolutePath(), "!!{qFileDirectory}!!")
+          .addFilter(hiveRootDirectory.getAbsolutePath(), "!!{hive.root}!!");
+      return result;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFileBeeLineClient.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFileBeeLineClient.java b/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFileBeeLineClient.java
new file mode 100644
index 0000000..b6eac89
--- /dev/null
+++ b/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFileBeeLineClient.java
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.beeline.qfile;
+
+import org.apache.hive.beeline.BeeLine;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+
+/**
+ * QFile test client using BeeLine. It can be used to submit a list of command strings, or a QFile.
+ */
+public class QFileBeeLineClient implements AutoCloseable {
+  private BeeLine beeLine;
+  private PrintStream beelineOutputStream;
+  private File logFile;
+
+  protected QFileBeeLineClient(String jdbcUrl, String jdbcDriver, String username, String password,
+      File log) throws IOException {
+    logFile = log;
+    beeLine = new BeeLine();
+    beelineOutputStream = new PrintStream(logFile, "UTF-8");
+    beeLine.setOutputStream(beelineOutputStream);
+    beeLine.setErrorStream(beelineOutputStream);
+    beeLine.runCommands(
+        new String[] {
+          "!set verbose true",
+          "!set shownestederrs true",
+          "!set showwarnings true",
+          "!set showelapsedtime false",
+          "!set maxwidth -1",
+          "!connect " + jdbcUrl + " " + username + " " + password + " " + jdbcDriver
+        });
+  }
+
+  public boolean execute(String[] commands, File resultFile) {
+    boolean hasErrors = false;
+    beeLine.runCommands(
+        new String[] {
+          "!set outputformat csv",
+          "!record " + resultFile.getAbsolutePath()
+        });
+
+    if (commands.length != beeLine.runCommands(commands)) {
+      hasErrors = true;
+    }
+
+    beeLine.runCommands(new String[] {"!record"});
+    return !hasErrors;
+  }
+
+  private void beforeExecute(QFile qFile) {
+    assert(execute(
+        new String[] {
+          "USE default;",
+          "SHOW TABLES;",
+          "DROP DATABASE IF EXISTS `" + qFile.getName() + "` CASCADE;",
+          "CREATE DATABASE `" + qFile.getName() + "`;",
+          "USE `" + qFile.getName() + "`;"
+        },
+        qFile.getInfraLogFile()));
+  }
+
+  private void afterExecute(QFile qFile) {
+    assert(execute(
+        new String[] {
+          "USE default;",
+          "DROP DATABASE IF EXISTS `" + qFile.getName() + "` CASCADE;",
+        },
+        qFile.getInfraLogFile()));
+  }
+
+  public boolean execute(QFile qFile) {
+    beforeExecute(qFile);
+    boolean result = execute(
+        new String[] {
+          "!run " + qFile.getInputFile().getAbsolutePath()
+        },
+        qFile.getRawOutputFile());
+    afterExecute(qFile);
+    return result;
+  }
+
+  public void close() {
+    if (beeLine != null) {
+      beeLine.runCommands(new String[] {
+        "!quit"
+      });
+    }
+    if (beelineOutputStream != null) {
+      beelineOutputStream.close();
+    }
+  }
+
+  /**
+   * Builder to generated QFileBeeLineClient objects. The after initializing the builder, it can be
+   * used to create new clients without any parameters.
+   */
+  public static class QFileClientBuilder {
+    private String username;
+    private String password;
+    private String jdbcUrl;
+    private String jdbcDriver;
+
+    public QFileClientBuilder() {
+    }
+
+    public QFileClientBuilder setUsername(String username) {
+      this.username = username;
+      return this;
+    }
+
+    public QFileClientBuilder setPassword(String password) {
+      this.password = password;
+      return this;
+    }
+
+    public QFileClientBuilder setJdbcUrl(String jdbcUrl) {
+      this.jdbcUrl = jdbcUrl;
+      return this;
+    }
+
+    public QFileClientBuilder setJdbcDriver(String jdbcDriver) {
+      this.jdbcDriver = jdbcDriver;
+      return this;
+    }
+
+    public QFileBeeLineClient getClient(File logFile) throws IOException {
+      return new QFileBeeLineClient(jdbcUrl, jdbcDriver, username, password, logFile);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hive/beeline/qfile/package-info.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/qfile/package-info.java b/itests/util/src/main/java/org/apache/hive/beeline/qfile/package-info.java
new file mode 100644
index 0000000..fcd50ec
--- /dev/null
+++ b/itests/util/src/main/java/org/apache/hive/beeline/qfile/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package for the BeeLine specific QTest file classes.
+ */
+package org.apache.hive.beeline.qfile;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/jdbc-handler/pom.xml
----------------------------------------------------------------------
diff --git a/jdbc-handler/pom.xml b/jdbc-handler/pom.xml
index 6c6e1fa..364886a 100644
--- a/jdbc-handler/pom.xml
+++ b/jdbc-handler/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/jdbc/pom.xml
----------------------------------------------------------------------
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index 1294a61..8adf67b 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -168,18 +168,6 @@
                   </includes>
                 </filter>
                 <filter>
-                  <artifact>org.apache.parquet:parquet-hadoop-bundle</artifact>
-                  <excludes>
-                    <exclude>shaded/parquet/org/codehaus/jackson/**</exclude>
-                  </excludes>
-                </filter>
-                <filter>
-                  <artifact>org.apache.logging.log4j:log4j-core</artifact>
-                  <excludes>
-                    <exclude>org/apache/logging/log4j/core/jackson/**</exclude>
-                  </excludes>
-                </filter>
-                <filter>
                   <artifact>*:*</artifact>
                   <excludes>
                     <exclude>META-INF/*.SF</exclude>
@@ -192,7 +180,11 @@
               <artifactSet>
                 <excludes>
                   <exclude>org.apache.commons:commons-compress</exclude>
-                  <exclude>org.apache.hadoop:*</exclude>
+                  <exclude>org.apache.hadoop:hadoop-yarn*</exclude>
+                  <exclude>org.apache.hadoop:hadoop-mapreduce*</exclude>
+                  <exclude>org.apache.hadoop:hadoop-hdfs</exclude>
+                  <exclude>org.apache.hadoop:hadoop-client</exclude>
+                  <exclude>org.apache.hadoop:hadoop-annotations</exclude>
                   <exclude>org.apache.hive:hive-vector-code-gen</exclude>
                   <exclude>org.apache.ant:*</exclude>
                   <exclude>junit:*</exclude>
@@ -237,7 +229,6 @@
                   <exclude>com.thoughtworks.paranamer:*</exclude>
                   <exclude>com.twitter:*</exclude>
                   <exclude>com.zaxxer:*</exclude>
-                  <exclude>com.fasterxml.jackson.core:*</exclude>
                   <exclude>io.netty:*</exclude>
                   <exclude>javax.activation:*</exclude>
                   <exclude>javax.inject:*</exclude>
@@ -301,6 +292,13 @@
                   <shadedPattern>org.apache.hive.com.facebook</shadedPattern>
                 </relocation>
                 <relocation>
+                  <pattern>org.apache.hadoop</pattern>
+                  <shadedPattern>org.apache.hive.org.apache.hadoop</shadedPattern>
+                  <excludes>
+                    <exclude>org.apache.hadoop.security.*</exclude>
+                  </excludes>
+                </relocation>
+                <relocation>
                   <pattern>org.apache.zookeeper</pattern>
                   <shadedPattern>org.apache.hive.org.apache.zookeeper</shadedPattern>
                 </relocation>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index fb18adb..1695c5d 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -41,7 +41,6 @@ import org.apache.hive.service.rpc.thrift.TSessionHandle;
 import org.apache.http.HttpRequestInterceptor;
 import org.apache.http.HttpResponse;
 import org.apache.http.client.CookieStore;
-import org.apache.http.client.HttpRequestRetryHandler;
 import org.apache.http.client.ServiceUnavailableRetryStrategy;
 import org.apache.http.config.Registry;
 import org.apache.http.config.RegistryBuilder;
@@ -387,9 +386,9 @@ public class HiveConnection implements java.sql.Connection {
        * Add an interceptor to pass username/password in the header.
        * In https mode, the entire information is encrypted
        */
-        requestInterceptor =
-            new HttpBasicAuthInterceptor(getUserName(), getPassword(), cookieStore, cookieName,
-                useSsl, additionalHttpHeaders);
+      requestInterceptor = new HttpBasicAuthInterceptor(getUserName(), getPassword(),
+                                                        cookieStore, cookieName, useSsl,
+                                                        additionalHttpHeaders);
       }
     }
     // Configure http client for cookie based authentication
@@ -422,23 +421,6 @@ public class HiveConnection implements java.sql.Connection {
     } else {
       httpClientBuilder = HttpClientBuilder.create();
     }
-    // In case the server's idletimeout is set to a lower value, it might close it's side of
-    // connection. However we retry one more time on NoHttpResponseException
-    httpClientBuilder.setRetryHandler(new HttpRequestRetryHandler() {
-      @Override
-      public boolean retryRequest(IOException exception, int executionCount, HttpContext context) {
-        if (executionCount > 1) {
-          LOG.info("Retry attempts to connect to server exceeded.");
-          return false;
-        }
-        if (exception instanceof org.apache.http.NoHttpResponseException) {
-          LOG.info("Could not connect to the server. Retrying one more time.");
-          return true;
-        }
-        return false;
-      }
-    });
-
     // Add the request interceptor to the client builder
     httpClientBuilder.addInterceptorFirst(requestInterceptor);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
index c385e2c..a0aea72 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
@@ -315,11 +315,9 @@ public class HiveStatement implements java.sql.Statement {
       isExecuteStatementFailed = false;
     } catch (SQLException eS) {
       isExecuteStatementFailed = true;
-      isLogBeingGenerated = false;
       throw eS;
     } catch (Exception ex) {
       isExecuteStatementFailed = true;
-      isLogBeingGenerated = false;
       throw new SQLException(ex.toString(), "08S01", ex);
     }
   }
@@ -916,6 +914,10 @@ public class HiveStatement implements java.sql.Statement {
         if (isQueryClosed) {
           throw new ClosedOrCancelledStatementException("Method getQueryLog() failed. The " +
               "statement has been closed or cancelled.");
+        }
+        if (isExecuteStatementFailed) {
+          throw new SQLException("Method getQueryLog() failed. Because the stmtHandle in " +
+              "HiveStatement is null and the statement execution might fail.");
         } else {
           return logs;
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/jdbc/src/java/org/apache/hive/jdbc/logs/InPlaceUpdateStream.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/logs/InPlaceUpdateStream.java b/jdbc/src/java/org/apache/hive/jdbc/logs/InPlaceUpdateStream.java
index 90b7368..d4cd79c 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/logs/InPlaceUpdateStream.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/logs/InPlaceUpdateStream.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hive.jdbc.logs;
 
 import org.apache.hive.service.rpc.thrift.TProgressUpdateResp;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-client/pom.xml
----------------------------------------------------------------------
diff --git a/llap-client/pom.xml b/llap-client/pom.xml
index aa2cf32..3bacd2b 100644
--- a/llap-client/pom.xml
+++ b/llap-client/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java
----------------------------------------------------------------------
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java b/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java
index 42129b7..e5ab601 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java
@@ -25,5 +25,4 @@ import org.apache.hadoop.mapred.InputFormat;
 public interface LlapIo<T> {
   InputFormat<NullWritable, T> getInputFormat(InputFormat sourceInputFormat, Deserializer serde);
   void close();
-  String getMemoryInfo();
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapRegistryService.java
----------------------------------------------------------------------
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapRegistryService.java b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapRegistryService.java
index 76fc9c7..610c0a5 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapRegistryService.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapRegistryService.java
@@ -61,7 +61,7 @@ public class LlapRegistryService extends AbstractService {
     if (hosts.startsWith("@")) {
       // Caching instances only in case of the YARN registry. Each host based list will get it's own copy.
       String appName = hosts.substring(1);
-      String userName = HiveConf.getVar(conf, ConfVars.LLAP_ZK_REGISTRY_USER, currentUser());
+      String userName = HiveConf.getVar(conf, ConfVars.LLAP_ZK_REGISTRY_USER, RegistryUtils.currentUser());
       String key = appName + "-" + userName;
       registry = yarnRegistries.get(key);
       if (registry == null || !registry.isInState(STATE.STARTED)) {
@@ -79,9 +79,6 @@ public class LlapRegistryService extends AbstractService {
     return registry;
   }
 
-  public static String currentUser() {
-    return RegistryUtils.currentUser();
-  }
 
   @Override
   public void serviceInit(Configuration conf) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-common/pom.xml
----------------------------------------------------------------------
diff --git a/llap-common/pom.xml b/llap-common/pom.xml
index bb40996..334fd75 100644
--- a/llap-common/pom.xml
+++ b/llap-common/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-common/src/java/org/apache/hadoop/hive/llap/LlapDaemonInfo.java
----------------------------------------------------------------------
diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/LlapDaemonInfo.java b/llap-common/src/java/org/apache/hadoop/hive/llap/LlapDaemonInfo.java
deleted file mode 100644
index fa29b59..0000000
--- a/llap-common/src/java/org/apache/hadoop/hive/llap/LlapDaemonInfo.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hive.llap;
-
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.yarn.api.records.Resource;
-
-public enum LlapDaemonInfo {
-  INSTANCE;
-
-  private static final class LlapDaemonInfoHolder {
-    public LlapDaemonInfoHolder(int numExecutors, long executorMemory, long cacheSize,
-        boolean isDirectCache, boolean isLlapIo) {
-      this.numExecutors = numExecutors;
-      this.executorMemory = executorMemory;
-      this.cacheSize = cacheSize;
-      this.isDirectCache = isDirectCache;
-      this.isLlapIo = isLlapIo;
-    }
-
-    final int numExecutors;
-    final long executorMemory;
-    final long cacheSize;
-    final boolean isDirectCache;
-    final boolean isLlapIo;
-  }
-
-  // add more variables as required
-  private AtomicReference<LlapDaemonInfoHolder> dataRef =
-      new AtomicReference<LlapDaemonInfoHolder>();
-
-  public static void initialize(String appName, Configuration daemonConf) {
-    int numExecutors = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_NUM_EXECUTORS);
-    long executorMemoryBytes =
-        HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB) * 1024l * 1024l;
-    long ioMemoryBytes = HiveConf.getSizeVar(daemonConf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
-    boolean isDirectCache = HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_ALLOCATOR_DIRECT);
-    boolean isLlapIo = HiveConf.getBoolVar(daemonConf, HiveConf.ConfVars.LLAP_IO_ENABLED, true);
-    initialize(appName, numExecutors, executorMemoryBytes, ioMemoryBytes, isDirectCache, isLlapIo);
-  }
-
-  public static void initialize(String appName, int numExecutors, long executorMemoryBytes,
-      long ioMemoryBytes, boolean isDirectCache, boolean isLlapIo) {
-    INSTANCE.dataRef.set(new LlapDaemonInfoHolder(numExecutors, executorMemoryBytes, ioMemoryBytes,
-        isDirectCache, isLlapIo));
-  }
-
-  public boolean isLlap() {
-    return dataRef.get() != null;
-  }
-
-  public int getNumExecutors() {
-    return dataRef.get().numExecutors;
-  }
-
-  public long getExecutorMemory() {
-    return dataRef.get().executorMemory;
-  }
-
-  public long getMemoryPerExecutor() {
-    final LlapDaemonInfoHolder data = dataRef.get();
-    return (getExecutorMemory() - -(data.isDirectCache ? 0 : data.cacheSize)) / getNumExecutors();
-  }
-
-  public long getCacheSize() {
-    return dataRef.get().cacheSize;
-  }
-  
-  public boolean isDirectCache() {
-    return dataRef.get().isDirectCache;
-  }
-
-  public boolean isLlapIo() {
-    return dataRef.get().isLlapIo;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-ext-client/pom.xml
----------------------------------------------------------------------
diff --git a/llap-ext-client/pom.xml b/llap-ext-client/pom.xml
index d9ea026..5ba0ec5 100644
--- a/llap-ext-client/pom.xml
+++ b/llap-ext-client/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/bin/runLlapDaemon.sh
----------------------------------------------------------------------
diff --git a/llap-server/bin/runLlapDaemon.sh b/llap-server/bin/runLlapDaemon.sh
index 82c2cc5..001e304 100755
--- a/llap-server/bin/runLlapDaemon.sh
+++ b/llap-server/bin/runLlapDaemon.sh
@@ -51,7 +51,7 @@ shift
 JAVA=$JAVA_HOME/bin/java
 LOG_LEVEL_DEFAULT="INFO"
 LOGGER_DEFAULT="console"
-JAVA_OPTS_BASE="-server -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+PrintGCDetails -verbose:gc -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=4 -XX:GCLogFileSize=100M -XX:+PrintGCDateStamps"
+JAVA_OPTS_BASE="-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+PrintGCDetails -verbose:gc -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=4 -XX:GCLogFileSize=100M -XX:+PrintGCDateStamps"
 
 if [ ! -d "${LLAP_DAEMON_HOME}" ]; then
   echo No LLAP_DAEMON_HOME set, or is not a directory. 
@@ -107,7 +107,7 @@ elif [ "$COMMAND" = "run" ] ; then
   CLASS='org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon'
 fi
 
-JAVA_OPTS_BASE="${JAVA_OPTS_BASE} -Xloggc:${LLAP_DAEMON_LOG_DIR}/gc_$(date +%Y-%m-%d-%H).log"
+JAVA_OPTS_BASE="${JAVA_OPTS_BASE} -Xloggc:${LLAP_DAEMON_LOG_DIR}/gc.log"
 LLAP_DAEMON_OPTS="${LLAP_DAEMON_OPTS} ${JAVA_OPTS_BASE}"
 
 # Set the default GC option if none set

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/pom.xml
----------------------------------------------------------------------
diff --git a/llap-server/pom.xml b/llap-server/pom.xml
index b10f05f..630e243 100644
--- a/llap-server/pom.xml
+++ b/llap-server/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -178,10 +178,6 @@
       <version>${slider.version}</version>
       <exclusions>
         <exclusion>
-          <groupId>asm</groupId>
-          <artifactId>asm</artifactId>
-        </exclusion>
-        <exclusion>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
         </exclusion>
@@ -226,8 +222,8 @@
           <artifactId>jettison</artifactId>
         </exclusion>
         <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
+          <groupId>asm</groupId>
+          <artifactId>asm</artifactId>
         </exclusion>
       </exclusions>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
index 6cf8dbb..ff6e7ce 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
@@ -116,7 +116,7 @@ public class IncrementalObjectSizeEstimator {
           addToProcessing(byType, stack, fieldObj, fieldClass);
         }
       }
-      estimator.directSize = (int) JavaDataModel.alignUp(
+      estimator.directSize = JavaDataModel.alignUp(
           estimator.directSize, memoryModel.memoryAlign());
     }
   }
@@ -454,7 +454,7 @@ public class IncrementalObjectSizeEstimator {
           if (len != 0) {
             int elementSize = getPrimitiveSize(e.field.getType().getComponentType());
             arraySize += elementSize * len;
-            arraySize = (int) JavaDataModel.alignUp(arraySize, memoryModel.memoryAlign());
+            arraySize = JavaDataModel.alignUp(arraySize, memoryModel.memoryAlign());
           }
           referencedSize += arraySize;
           break;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
index 302918a..8d7f0d3 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hive.llap.cache;
 
-import java.util.concurrent.atomic.AtomicLong;
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
@@ -45,14 +43,11 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
 
-public final class BuddyAllocator
-  implements EvictionAwareAllocator, BuddyAllocatorMXBean, LlapOomDebugDump {
+public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAllocatorMXBean {
   private final Arena[] arenas;
   private final AtomicInteger allocatedArenas = new AtomicInteger(0);
 
   private final MemoryManager memoryManager;
-  private static final long MAX_DUMP_INTERVAL_NS = 300 * 1000000000L; // 5 minutes.
-  private final AtomicLong lastLog = new AtomicLong(-1);
 
   // Config settings
   private final int minAllocLog2, maxAllocLog2, arenaSizeLog2, maxArenas;
@@ -124,14 +119,13 @@ public final class BuddyAllocator
     } else {
       cacheDir = null;
     }
-    long arenaSizeVal = (arenaCount == 0) ? MAX_ARENA_SIZE : maxSizeVal / arenaCount;
-    // The math.min, and the fact that maxAllocation is an int, ensures we don't overflow.
+    int arenaSizeVal = (arenaCount == 0) ? MAX_ARENA_SIZE : (int)(maxSizeVal / arenaCount);
     arenaSizeVal = Math.max(maxAllocation, Math.min(arenaSizeVal, MAX_ARENA_SIZE));
     if (LlapIoImpl.LOG.isInfoEnabled()) {
-      LlapIoImpl.LOG.info("Buddy allocator with " + (isDirect ? "direct" : "byte") + " buffers; "
-          + (isMapped ? ("memory mapped off " + cacheDir.toString() + "; ") : "")
+      LlapIoImpl.LOG.info("Buddy allocator with " + (isDirect ? "direct" : "byte") + " buffers;"
+          + (isMapped ? (" memory mapped off " + cacheDir.toString() + "; ") : "")
           + "allocation sizes " + minAllocation + " - " + maxAllocation
-          + ", arena size " + arenaSizeVal + ", total size " + maxSizeVal);
+          + ", arena size " + arenaSizeVal + ". total size " + maxSizeVal);
     }
 
     String minName = ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.varname,
@@ -154,7 +148,7 @@ public final class BuddyAllocator
       LlapIoImpl.LOG.warn("Rounding arena size to " + arenaSizeVal + " from " + oldArenaSize
           + " to be divisible by allocation size " + maxAllocation);
     }
-    arenaSize = (int)arenaSizeVal;
+    arenaSize = arenaSizeVal;
     if ((maxSizeVal % arenaSize) > 0) {
       long oldMaxSize = maxSizeVal;
       maxSizeVal = (maxSizeVal / arenaSize) * arenaSize;
@@ -197,7 +191,8 @@ public final class BuddyAllocator
     int allocLog2 = freeListIx + minAllocLog2;
     int allocationSize = 1 << allocLog2;
     // TODO: reserving the entire thing is not ideal before we alloc anything. Interleave?
-    memoryManager.reserveMemory(dest.length << allocLog2);
+    memoryManager.reserveMemory(dest.length << allocLog2, true);
+
     int destAllocIx = 0;
     for (int i = 0; i < dest.length; ++i) {
       if (dest[i] != null) continue;
@@ -246,106 +241,38 @@ public final class BuddyAllocator
     // into some sort of queues that deallocate and split will examine), or having and "actor"
     // allocator thread (or threads per arena).
     // The 2nd one is probably much simpler and will allow us to get rid of a lot of sync code.
-    // But for now we will just retry. We will evict more each time.
-    long forceReserved = 0;
-    int attempt = 0;
-    try {
-      while (true) {
-        // Try to split bigger blocks. TODO: again, ideally we would tryLock at least once
-        {
-          int startArenaIx = (int)((threadId + attempt) % arenaCount), arenaIx = startArenaIx;
-          do {
-            int newDestIx = arenas[arenaIx].allocateWithSplit(
-                arenaIx, freeListIx, dest, destAllocIx, allocationSize);
-            if (newDestIx == dest.length) return;
-            assert newDestIx != -1;
-            destAllocIx = newDestIx;
-            if ((++arenaIx) == arenaCount) {
-              arenaIx = 0;
-            }
-          } while (arenaIx != startArenaIx);
-        }
-
-        if (attempt == 0) {
-          // Try to allocate memory if we haven't allocated all the way to maxSize yet; very rare.
-          for (int arenaIx = arenaCount; arenaIx < arenas.length; ++arenaIx) {
-            destAllocIx = arenas[arenaIx].allocateWithExpand(
-                arenaIx, freeListIx, dest, destAllocIx, allocationSize);
-            if (destAllocIx == dest.length) return;
+    // But for now we will just retry 5 times 0_o
+    for (int attempt = 0; attempt < 5; ++attempt) {
+      // Try to split bigger blocks. TODO: again, ideally we would tryLock at least once
+      {
+        int startArenaIx = (int)((threadId + attempt) % arenaCount), arenaIx = startArenaIx;
+        do {
+          int newDestIx = arenas[arenaIx].allocateWithSplit(
+              arenaIx, freeListIx, dest, destAllocIx, allocationSize);
+          if (newDestIx == dest.length) return;
+          assert newDestIx != -1;
+          destAllocIx = newDestIx;
+          if ((++arenaIx) == arenaCount) {
+            arenaIx = 0;
           }
-        }
-        int numberToForce = (dest.length - destAllocIx) * (attempt + 1);
-        long newReserved = memoryManager.forceReservedMemory(allocationSize, numberToForce);
-        forceReserved += newReserved;
-        if (newReserved == 0) {
-          // Cannot force-evict anything, give up.
-          String msg = "Failed to allocate " + size + "; at " + destAllocIx + " out of "
-              + dest.length + " (entire cache is fragmented and locked, or an internal issue)";
-          logOomErrorMessage(msg);
-          throw new AllocatorOutOfMemoryException(msg);
-        }
-        if (attempt == 0) {
-          LlapIoImpl.LOG.warn("Failed to allocate despite reserved memory; will retry");
-        }
-        ++attempt;
-      }
-    } finally {
-      if (attempt > 4) {
-        LlapIoImpl.LOG.warn("Allocation of " + dest.length + " buffers of size " + size
-            + " took " + attempt + " attempts to evict enough memory");
-      }
-      // After we succeed (or fail), release the force-evicted memory to memory manager. We have
-      // previously reserved enough to allocate all we need, so we don't take our allocation out
-      // of this - as per the comment above, we basically just wasted a bunch of cache (and CPU).
-      if (forceReserved > 0) {
-        memoryManager.releaseMemory(forceReserved);
+        } while (arenaIx != startArenaIx);
       }
-    }
-  }
 
-  private void logOomErrorMessage(String msg) {
-    while (true) {
-      long time = System.nanoTime();
-      long lastTime = lastLog.get();
-      // Magic value usage is invalid with nanoTime, so once in a 1000 years we may log extra.
-      boolean shouldLog = (lastTime == -1 || (time - lastTime) > MAX_DUMP_INTERVAL_NS);
-      if (shouldLog && !lastLog.compareAndSet(lastTime, time)) {
-        continue;
-      }
-      if (shouldLog) {
-        LlapIoImpl.LOG.error(msg + debugDumpForOom());
-      } else {
-        LlapIoImpl.LOG.error(msg);
-      }
-      return;
-    }
-  }
-
-  /**
-   * Arbitrarily, we start getting the state from Allocator. Allocator calls MM which calls
-   * the policies that call the eviction dispatcher that calls the caches. See init - these all
-   * are connected in a cycle, so we need to make sure the who-calls-whom order is definite.
-   */
-  @Override
-  public void debugDumpShort(StringBuilder sb) {
-    memoryManager.debugDumpShort(sb);
-    sb.append("\nAllocator state:");
-    int unallocCount = 0, fullCount = 0;
-    long totalFree = 0;
-    for (Arena arena : arenas) {
-      Integer result = arena.debugDumpShort(sb);
-      if (result == null) {
-        ++unallocCount;
-      } else if (result == 0) {
-        ++fullCount;
-      } else {
-        totalFree += result;
+      if (attempt == 0) {
+        // Try to allocate memory if we haven't allocated all the way to maxSize yet; very rare.
+        for (int arenaIx = arenaCount; arenaIx < arenas.length; ++arenaIx) {
+          destAllocIx = arenas[arenaIx].allocateWithExpand(
+              arenaIx, freeListIx, dest, destAllocIx, allocationSize);
+          if (destAllocIx == dest.length) return;
+        }
       }
+      memoryManager.forceReservedMemory(allocationSize, dest.length - destAllocIx);
+      LlapIoImpl.LOG.warn("Failed to allocate despite reserved memory; will retry " + attempt);
     }
-    sb.append("\nTotal available and allocated: ").append(totalFree).append(
-        "; unallocated arenas: ").append(unallocCount).append(
-        "; full arenas ").append(fullCount);
-    sb.append("\n");
+    String msg = "Failed to allocate " + size + "; at " + destAllocIx + " out of " + dest.length;
+    LlapIoImpl.LOG.error(msg + "\nALLOCATOR STATE:\n" + debugDump()
+        + "\nPARENT STATE:\n" + memoryManager.debugDumpForOom());
+    throw new AllocatorOutOfMemoryException(msg);
   }
 
   @Override
@@ -372,7 +299,7 @@ public final class BuddyAllocator
     return isDirect;
   }
 
-  public String debugDumpForOomInternal() {
+  public String debugDump() {
     StringBuilder result = new StringBuilder(
         "NOTE: with multiple threads the dump is not guaranteed to be consistent");
     for (Arena arena : arenas) {
@@ -469,36 +396,6 @@ public final class BuddyAllocator
       }
     }
 
-    public Integer debugDumpShort(StringBuilder result) {
-      if (data == null) {
-        return null;
-      }
-      int allocSize = minAllocation;
-      int total = 0;
-      for (int i = 0; i < freeLists.length; ++i, allocSize <<= 1) {
-        FreeList freeList = freeLists[i];
-        freeList.lock.lock();
-        try {
-          int nextHeaderIx = freeList.listHead;
-          int count = 0;
-          while (nextHeaderIx >= 0) {
-            ++count;
-            nextHeaderIx = getNextFreeListItem(offsetFromHeaderIndex(nextHeaderIx));
-          }
-          if (count > 0) {
-            if (total == 0) {
-              result.append("\nArena with free list lengths by size: ");
-            }
-            total += (allocSize * count);
-            result.append(allocSize).append(" => ").append(count).append(", ");
-          }
-        } finally {
-          freeList.lock.unlock();
-        }
-      }
-      return total;
-    }
-
     public void debugDump(StringBuilder result) {
       result.append("\nArena: ");
       if (data == null) {
@@ -780,10 +677,4 @@ public final class BuddyAllocator
   public MemoryBuffer createUnallocated() {
     return new LlapDataBuffer();
   }
-
-  @Override
-  public String debugDumpForOom() {
-    return "\nALLOCATOR STATE:\n" + debugDumpForOomInternal()
-        + "\nPARENT STATE:\n" + memoryManager.debugDumpForOom();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/EvictionDispatcher.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/EvictionDispatcher.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/EvictionDispatcher.java
index c73f1a1..a6b0abd 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/EvictionDispatcher.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/EvictionDispatcher.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hive.llap.io.metadata.OrcStripeMetadata;
 /**
  * Eviction dispatcher - uses double dispatch to route eviction notifications to correct caches.
  */
-public final class EvictionDispatcher implements EvictionListener, LlapOomDebugDump {
+public final class EvictionDispatcher implements EvictionListener {
   private final LowLevelCache dataCache;
   private final SerDeLowLevelCacheImpl serdeCache;
   private final OrcMetadataCache metadataCache;
@@ -65,27 +65,4 @@ public final class EvictionDispatcher implements EvictionListener, LlapOomDebugD
   public void notifyEvicted(OrcFileEstimateErrors buffer) {
     metadataCache.notifyEvicted(buffer);
   }
-
-  @Override
-  public String debugDumpForOom() {
-    StringBuilder sb = new StringBuilder(dataCache.debugDumpForOom());
-    if (serdeCache != null) {
-      sb.append(serdeCache.debugDumpForOom());
-    }
-    if (metadataCache != null) {
-      sb.append(metadataCache.debugDumpForOom());
-    }
-    return sb.toString();
-  }
-
-  @Override
-  public void debugDumpShort(StringBuilder sb) {
-    dataCache.debugDumpShort(sb);
-    if (serdeCache != null) {
-      serdeCache.debugDumpShort(sb);
-    }
-    if (metadataCache != null) {
-      metadataCache.debugDumpShort(sb);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LlapOomDebugDump.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LlapOomDebugDump.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LlapOomDebugDump.java
index e861a7e..30bf5a9 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LlapOomDebugDump.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LlapOomDebugDump.java
@@ -20,5 +20,4 @@ package org.apache.hadoop.hive.llap.cache;
 
 public interface LlapOomDebugDump {
   String debugDumpForOom();
-  void debugDumpShort(StringBuilder sb);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
index c5d0c84..19c589a 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.common.io.DataCache.BooleanRef;
 import org.apache.hadoop.hive.common.io.DataCache.DiskRangeListFactory;
 import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
 
-public interface LowLevelCache extends LlapOomDebugDump {
+public interface LowLevelCache {
   public enum Priority {
     NORMAL,
     HIGH

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
index 23796f6..4dc1c23 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
@@ -466,43 +466,4 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
     }
     return sb.toString();
   }
-
-  @Override
-  public void debugDumpShort(StringBuilder sb) {
-    sb.append("\nORC cache state ");
-    int allLocked = 0, allUnlocked = 0, allEvicted = 0;
-    for (Map.Entry<Object, FileCache<ConcurrentSkipListMap<Long, LlapDataBuffer>>> e :
-      cache.entrySet()) {
-      if (!e.getValue().incRef()) continue;
-      try {
-        int fileLocked = 0, fileUnlocked = 0, fileEvicted = 0;
-        if (e.getValue().getCache().isEmpty()) continue;
-        for (Map.Entry<Long, LlapDataBuffer> e2 : e.getValue().getCache().entrySet()) {
-          int newRc = e2.getValue().incRef();
-          if (newRc < 0) {
-            ++fileEvicted;
-            continue;
-          }
-          try {
-            if (newRc > 1) { // We hold one refcount.
-              ++fileLocked;
-            } else {
-              ++fileUnlocked;
-            }
-          } finally {
-            e2.getValue().decRef();
-          }
-        }
-        allLocked += fileLocked;
-        allUnlocked += fileUnlocked;
-        allEvicted += fileEvicted;
-        sb.append("\n  file " + e.getKey() + ": " + fileLocked + " locked, "
-            + fileUnlocked + " unlocked, " + fileEvicted + " evicted");
-      } finally {
-        e.getValue().decRef();
-      }
-    }
-    sb.append("\nORC cache summary: " + allLocked + " locked, "
-        + allUnlocked + " unlocked, " + allEvicted + " evicted");
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
index 2132574..88bfa8b 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
@@ -38,28 +38,25 @@ public class LowLevelCacheMemoryManager implements MemoryManager {
   private long maxSize;
 
   public LowLevelCacheMemoryManager(
+      Configuration conf, LowLevelCachePolicy evictor, LlapDaemonCacheMetrics metrics) {
+    this(HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE), evictor, metrics);
+  }
+
+  @VisibleForTesting
+  public LowLevelCacheMemoryManager(
       long maxSize, LowLevelCachePolicy evictor, LlapDaemonCacheMetrics metrics) {
     this.maxSize = maxSize;
     this.evictor = evictor;
     this.usedMemory = new AtomicLong(0);
     this.metrics = metrics;
+    metrics.setCacheCapacityTotal(maxSize);
     if (LlapIoImpl.LOG.isInfoEnabled()) {
       LlapIoImpl.LOG.info("Memory manager initialized with max size {} and" +
           " {} ability to evict blocks", maxSize, ((evictor == null) ? "no " : ""));
     }
   }
 
-
   @Override
-  public void reserveMemory(final long memoryToReserve) {
-    boolean result = reserveMemory(memoryToReserve, true);
-    if (result) return;
-    // Can only happen if there's no evictor, or if thread is interrupted.
-    throw new RuntimeException("Cannot reserve memory"
-        + (Thread.currentThread().isInterrupted() ? "; thread interrupted" : ""));
-  }
-
-  @VisibleForTesting
   public boolean reserveMemory(final long memoryToReserve, boolean waitForEviction) {
     // TODO: if this cannot evict enough, it will spin infinitely. Terminate at some point?
     int badCallCount = 0;
@@ -111,19 +108,19 @@ public class LowLevelCacheMemoryManager implements MemoryManager {
         usedMem = usedMemory.get();
       }
     }
-    if (!result) {
-      releaseMemory(reservedTotalMetric);
-      reservedTotalMetric = 0;
-    }
     metrics.incrCacheCapacityUsed(reservedTotalMetric - evictedTotalMetric);
     return result;
   }
 
 
   @Override
-  public long forceReservedMemory(int allocationSize, int count) {
-    if (evictor == null) return 0;
-    return evictor.tryEvictContiguousData(allocationSize, count);
+  public void forceReservedMemory(int allocationSize, int count) {
+    if (evictor == null) return;
+    while (count > 0) {
+      int evictedCount = evictor.tryEvictContiguousData(allocationSize, count);
+      if (evictedCount == 0) return;
+      count -= evictedCount;
+    }
   }
 
   @Override
@@ -139,13 +136,7 @@ public class LowLevelCacheMemoryManager implements MemoryManager {
   @Override
   public String debugDumpForOom() {
     if (evictor == null) return null;
-    return "\ncache state\n" + evictor.debugDumpForOom();
-  }
-
-  @Override
-  public void debugDumpShort(StringBuilder sb) {
-    if (evictor == null) return;
-    evictor.debugDumpShort(sb);
+    return "cache state\n" + evictor.debugDumpForOom();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java
index fd9d942..bb1d1b0 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCachePolicy.java
@@ -28,5 +28,5 @@ public interface LowLevelCachePolicy extends LlapOomDebugDump {
   void setEvictionListener(EvictionListener listener);
   void setParentDebugDumper(LlapOomDebugDump dumper);
   /** TODO: temporary method until we have a better allocator */
-  long tryEvictContiguousData(int allocationSize, int count);
+  int tryEvictContiguousData(int allocationSize, int count);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
index 761fd00..430a5f8 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
@@ -34,7 +34,7 @@ public class LowLevelFifoCachePolicy implements LowLevelCachePolicy {
   private EvictionListener evictionListener;
   private LlapOomDebugDump parentDebugDump;
 
-  public LowLevelFifoCachePolicy() {
+  public LowLevelFifoCachePolicy(Configuration conf) {
     LlapIoImpl.LOG.info("FIFO cache policy");
     buffers = new LinkedList<LlapCacheableBuffer>();
   }
@@ -116,26 +116,10 @@ public class LowLevelFifoCachePolicy implements LowLevelCachePolicy {
   }
 
   @Override
-  public void debugDumpShort(StringBuilder sb) {
-    sb.append("\nFIFO eviction list: ");
-    lock.lock();
-    try {
-      sb.append(buffers.size()).append(" elements)");
-    } finally {
-      lock.unlock();
-    }
-    if (parentDebugDump != null) {
-      parentDebugDump.debugDumpShort(sb);
-    }
-  }
-
-  @Override
-  public long tryEvictContiguousData(int allocationSize, int count) {
+  public int tryEvictContiguousData(int allocationSize, int count) {
     long evicted = evictInternal(allocationSize * count, allocationSize);
-    int remainingCount = count - (int)(evicted / allocationSize);
-    if (remainingCount > 0) {
-      evicted += evictInternal(allocationSize * remainingCount, -1);
-    }
-    return evicted;
+    // This makes granularity assumptions.
+    assert evicted % allocationSize == 0;
+    return (int)(evicted / allocationSize);
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
index 3973c8a..4cd2c18 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
@@ -69,6 +69,12 @@ public class LowLevelLrfuCachePolicy implements LowLevelCachePolicy {
   private EvictionListener evictionListener;
   private LlapOomDebugDump parentDebugDump;
 
+  public LowLevelLrfuCachePolicy(Configuration conf) {
+    this((int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC),
+        HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE), conf);
+  }
+
+  @VisibleForTesting
   public LowLevelLrfuCachePolicy(int minBufferSize, long maxSize, Configuration conf) {
     lambda = HiveConf.getFloatVar(conf, HiveConf.ConfVars.LLAP_LRFU_LAMBDA);
     int maxBuffers = (int)Math.ceil((maxSize * 1.0) / minBufferSize);
@@ -204,14 +210,13 @@ public class LowLevelLrfuCachePolicy implements LowLevelCachePolicy {
   }
 
   @Override
-  public long tryEvictContiguousData(int allocationSize, int count) {
+  public int tryEvictContiguousData(int allocationSize, int count) {
     int evicted = evictDataFromList(allocationSize, count);
-    if (count <= evicted) return evicted * allocationSize;
-    evicted += evictDataFromHeap(timer.get(), count - evicted, allocationSize);
-    long evictedBytes = evicted * allocationSize;
-    if (count <= evicted) return evictedBytes;
-    evictedBytes += evictSomeBlocks(allocationSize * (count - evicted));
-    return evictedBytes;
+    count -= evicted;
+    if (count > 0) {
+      evicted += evictDataFromHeap(timer.get(), count, allocationSize);
+    }
+    return evicted;
   }
 
   private long evictFromList(long memoryToReserve) {
@@ -568,26 +573,4 @@ public class LowLevelLrfuCachePolicy implements LowLevelCachePolicy {
     }
     return result;
   }
-
-  @Override
-  public void debugDumpShort(StringBuilder sb) {
-    sb.append("\nLRFU eviction list: ");
-    LlapCacheableBuffer listHeadLocal = listHead, listTailLocal = listTail;
-    if (listHeadLocal == null) {
-      sb.append("0 items");
-    } else {
-      LlapCacheableBuffer listItem = listHeadLocal;
-      int c = 0;
-      while (listItem != null) {
-        ++c;
-        if (listItem == listTailLocal) break;
-        listItem = listItem.next;
-      }
-      sb.append(c + " items");
-    }
-    sb.append("\nLRFU eviction heap: " + heapSize + " items");
-    if (parentDebugDump != null) {
-      parentDebugDump.debugDumpShort(sb);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
index 0f4d3c0..ca41825 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
@@ -19,9 +19,9 @@
 package org.apache.hadoop.hive.llap.cache;
 
 public interface MemoryManager extends LlapOomDebugDump {
+  boolean reserveMemory(long memoryToReserve, boolean waitForEviction);
   void releaseMemory(long memUsage);
   void updateMaxSize(long maxSize);
   /** TODO: temporary method until we get a better allocator. */
-  long forceReservedMemory(int allocationSize, int count);
-  void reserveMemory(long memoryToReserve);
+  void forceReservedMemory(int allocationSize, int count);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SerDeLowLevelCacheImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SerDeLowLevelCacheImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SerDeLowLevelCacheImpl.java
index cd5bc9b..4809398 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SerDeLowLevelCacheImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SerDeLowLevelCacheImpl.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -43,7 +44,7 @@ import org.apache.orc.OrcProto.ColumnEncoding;
 
 import com.google.common.base.Function;
 
-public class SerDeLowLevelCacheImpl implements LlapOomDebugDump {
+public class SerDeLowLevelCacheImpl implements BufferUsageManager, LlapOomDebugDump {
   private static final int DEFAULT_CLEANUP_INTERVAL = 600;
   private final Allocator allocator;
   private final AtomicInteger newEvictions = new AtomicInteger(0);
@@ -616,6 +617,18 @@ public class SerDeLowLevelCacheImpl implements LlapOomDebugDump {
     } 
   }
 
+  @Override
+  public void decRefBuffer(MemoryBuffer buffer) {
+    unlockBuffer((LlapDataBuffer)buffer, true);
+  }
+
+  @Override
+  public void decRefBuffers(List<MemoryBuffer> cacheBuffers) {
+    for (MemoryBuffer b : cacheBuffers) {
+      unlockBuffer((LlapDataBuffer)b, true);
+    }
+  }
+
   private void unlockBuffer(LlapDataBuffer buffer, boolean handleLastDecRef) {
     boolean isLastDecref = (buffer.decRef() == 0);
     if (handleLastDecRef && isLastDecref) {
@@ -691,6 +704,18 @@ public class SerDeLowLevelCacheImpl implements LlapOomDebugDump {
   }
 
   @Override
+  public boolean incRefBuffer(MemoryBuffer buffer) {
+    // notifyReused implies that buffer is already locked; it's also called once for new
+    // buffers that are not cached yet. Don't notify cache policy.
+    return lockBuffer(((LlapDataBuffer)buffer), false);
+  }
+
+  @Override
+  public Allocator getAllocator() {
+    return allocator;
+  }
+
+  @Override
   public String debugDumpForOom() {
     StringBuilder sb = new StringBuilder("File cache state ");
     for (Map.Entry<Object, FileCache<FileData>> e : cache.entrySet()) {
@@ -706,55 +731,4 @@ public class SerDeLowLevelCacheImpl implements LlapOomDebugDump {
     }
     return sb.toString();
   }
-
-
-  @Override
-  public void debugDumpShort(StringBuilder sb) {
-    sb.append("\nSerDe cache state ");
-    int allLocked = 0, allUnlocked = 0, allEvicted = 0;
-    for (Map.Entry<Object, FileCache<FileData>> e : cache.entrySet()) {
-      if (!e.getValue().incRef()) continue;
-      try {
-        FileData fd = e.getValue().getCache();
-        int fileLocked = 0, fileUnlocked = 0, fileEvicted = 0;
-        sb.append(fd.colCount).append(" columns, ").append(fd.stripes.size()).append(" stripes; ");
-        for (StripeData stripe : fd.stripes) {
-          if (stripe.data == null) continue;
-          for (int i = 0; i < stripe.data.length; ++i) {
-            LlapDataBuffer[][] colData = stripe.data[i];
-            if (colData == null) continue;
-            for (int j = 0; j < colData.length; ++j) {
-              LlapDataBuffer[] streamData = colData[j];
-              if (streamData == null) continue;
-              for (int k = 0; k < streamData.length; ++k) {
-                int newRc = streamData[k].incRef();
-                if (newRc < 0) {
-                  ++fileEvicted;
-                  continue;
-                }
-                try {
-                  if (newRc > 1) { // We hold one refcount.
-                    ++fileLocked;
-                  } else {
-                    ++fileUnlocked;
-                  }
-                } finally {
-                  streamData[k].decRef();
-                }
-              }
-            }
-          }
-        }
-        allLocked += fileLocked;
-        allUnlocked += fileUnlocked;
-        allEvicted += fileEvicted;
-        sb.append("\n  file " + e.getKey() + ": " + fileLocked + " locked, "
-            + fileUnlocked + " unlocked, " + fileEvicted + " evicted");
-      } finally {
-        e.getValue().decRef();
-      }
-    }
-    sb.append("\nSerDe cache summary: " + allLocked + " locked, "
-        + allUnlocked + " unlocked, " + allEvicted + " evicted");
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SimpleAllocator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SimpleAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SimpleAllocator.java
index 51eb34e..d8f59d1 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SimpleAllocator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SimpleAllocator.java
@@ -32,10 +32,9 @@ public final class SimpleAllocator implements Allocator, BuddyAllocatorMXBean {
   private final boolean isDirect;
   private static Field cleanerField;
   static {
+    ByteBuffer tmp = ByteBuffer.allocateDirect(1);
     try {
-      // TODO: To make it work for JDK9 use CleanerUtil from https://issues.apache.org/jira/browse/HADOOP-12760
-      final Class<?> dbClazz = Class.forName("java.nio.DirectByteBuffer");
-      cleanerField = dbClazz.getDeclaredField("cleaner");
+      cleanerField = tmp.getClass().getDeclaredField("cleaner");
       cleanerField.setAccessible(true);
     } catch (Throwable t) {
       LlapIoImpl.LOG.warn("Cannot initialize DirectByteBuffer cleaner", t);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SimpleBufferManager.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SimpleBufferManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SimpleBufferManager.java
index af7cf3d..d1eee04 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SimpleBufferManager.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/SimpleBufferManager.java
@@ -102,14 +102,4 @@ public class SimpleBufferManager implements BufferUsageManager, LowLevelCache {
   public void notifyEvicted(MemoryBuffer buffer) {
     throw new UnsupportedOperationException("Buffer manager doesn't have cache");
   }
-
-  @Override
-  public String debugDumpForOom() {
-    return "";
-  }
-
-  @Override
-  public void debugDumpShort(StringBuilder sb) {
-    // TODO Auto-generated method stub
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
index a662c75..22e5ee8 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
@@ -76,8 +76,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.eclipse.jetty.rewrite.handler.Rule;
-import org.eclipse.jetty.util.ssl.SslContextFactory;
+import org.eclipse.jetty.server.ssl.SslSocketConnector;
 import org.joda.time.DateTime;
 import org.json.JSONException;
 import org.json.JSONObject;
@@ -379,8 +378,7 @@ public class LlapServiceDriver {
               LlapTezUtils.class, // llap-tez
               LlapInputFormat.class, // llap-server
               HiveInputFormat.class, // hive-exec
-              SslContextFactory.class, // hive-common (https deps)
-              Rule.class, // Jetty rewrite class
+              SslSocketConnector.class, // hive-common (https deps)
               RegistryUtils.ServiceRecordMarshal.class, // ZK registry
               // log4j2
               com.lmax.disruptor.RingBuffer.class, // disruptor

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapSliderUtils.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapSliderUtils.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapSliderUtils.java
index 2d0121c..8342067 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapSliderUtils.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapSliderUtils.java
@@ -24,11 +24,7 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.util.Clock;
-import org.apache.hadoop.yarn.util.SystemClock;
-import org.apache.slider.api.types.ApplicationDiagnostics;
 import org.apache.slider.client.SliderClient;
 import org.apache.slider.common.params.ActionCreateArgs;
 import org.apache.slider.common.params.ActionDestroyArgs;
@@ -62,60 +58,6 @@ public class LlapSliderUtils {
     return sliderClient;
   }
 
-  public static ApplicationReport getAppReport(String appName, SliderClient sliderClient,
-                                               long timeoutMs) throws
-      LlapStatusServiceDriver.LlapStatusCliException {
-    Clock clock = new SystemClock();
-    long startTime = clock.getTime();
-    long timeoutTime = timeoutMs < 0 ? Long.MAX_VALUE : (startTime + timeoutMs);
-    ApplicationReport appReport = null;
-
-    while (appReport == null) {
-      try {
-        appReport = sliderClient.getYarnAppListClient().findInstance(appName);
-        if (timeoutMs == 0) {
-          // break immediately if timeout is 0
-          break;
-        }
-        // Otherwise sleep, and try again.
-        if (appReport == null) {
-          long remainingTime = Math.min(timeoutTime - clock.getTime(), 500l);
-          if (remainingTime > 0) {
-            Thread.sleep(remainingTime);
-          } else {
-            break;
-          }
-        }
-      } catch (Exception e) { // No point separating IOException vs YarnException vs others
-        throw new LlapStatusServiceDriver.LlapStatusCliException(
-            LlapStatusServiceDriver.ExitCode.YARN_ERROR,
-            "Failed to get Yarn AppReport", e);
-      }
-    }
-    return appReport;
-  }
-
-  public static ApplicationDiagnostics getApplicationDiagnosticsFromYarnDiagnostics(
-      ApplicationReport appReport, Logger LOG) {
-    if (appReport == null) {
-      return null;
-    }
-    String diagnostics = appReport.getDiagnostics();
-    if (diagnostics == null || diagnostics.isEmpty()) {
-      return null;
-    }
-    try {
-      ApplicationDiagnostics appDiagnostics =
-          ApplicationDiagnostics.fromJson(diagnostics);
-      return appDiagnostics;
-    } catch (IOException e) {
-      LOG.warn(
-          "Failed to parse application diagnostics from Yarn Diagnostics - {}",
-          diagnostics);
-      return null;
-    }
-  }
-
   public static void startCluster(Configuration conf, String name,
       String packageName, Path packageDir, String queue) {
     LOG.info("Starting cluster with " + name + ", "

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusOptionsProcessor.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusOptionsProcessor.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusOptionsProcessor.java
index bd91495..b4aa430 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusOptionsProcessor.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusOptionsProcessor.java
@@ -38,7 +38,6 @@ public class LlapStatusOptionsProcessor {
   private static final long DEFAULT_STATUS_REFRESH_INTERVAL_MS = 1 * 1000l; // 1 seconds wait until subsequent status
   private static final long DEFAULT_WATCH_MODE_TIMEOUT_MS = 5 * 60 * 1000l; // 5 minutes timeout for watch mode
   private static final float DEFAULT_RUNNING_NODES_THRESHOLD = 1.0f;
-
   enum OptionConstants {
 
     NAME("name", 'n', "LLAP cluster name", true),


[39/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_local.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_local.q.out b/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_local.q.out
deleted file mode 100644
index 4d296da..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_local.q.out
+++ /dev/null
@@ -1,283 +0,0 @@
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: EXPORT TABLE exim_employee
-TO '### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-PREHOOK: Output: ### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee
-POSTHOOK: query: EXPORT TABLE exim_employee
-TO '### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-POSTHOOK: Output: ### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='us')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee'
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee
-PREHOOK: Output: database:default
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='us')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee'
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee'
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee'
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='in')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee'
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='in')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee'
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_local/export/exim_employee
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_warehouse.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_warehouse.q.out b/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_warehouse.q.out
deleted file mode 100644
index fce5ffe..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_warehouse.q.out
+++ /dev/null
@@ -1,271 +0,0 @@
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: EXPORT TABLE exim_employee
-TO '### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-PREHOOK: Output: ### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee
-POSTHOOK: query: EXPORT TABLE exim_employee
-TO '### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-POSTHOOK: Output: ### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='us')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee'
-PREHOOK: type: IMPORT
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee
-PREHOOK: Output: database:default
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='us')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee'
-POSTHOOK: type: IMPORT
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee'
-PREHOOK: type: IMPORT
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee'
-POSTHOOK: type: IMPORT
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='in')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee'
-PREHOOK: type: IMPORT
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='in')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee'
-POSTHOOK: type: IMPORT
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_warehouse/export/exim_employee
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_local_to_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_local_to_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_local_to_blobstore.q.out
deleted file mode 100644
index 72068c5..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_local_to_blobstore.q.out
+++ /dev/null
@@ -1,277 +0,0 @@
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: EXPORT TABLE exim_employee
-#### A masked pattern was here ####
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: EXPORT TABLE exim_employee
-#### A masked pattern was here ####
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='us')
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_local_to_blobstore/import/exim_employee
-PREHOOK: Output: database:default
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='us')
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_local_to_blobstore/import/exim_employee
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_local_to_blobstore/import/exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_local_to_blobstore/import/exim_employee
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='in')
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_local_to_blobstore/import/exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='in')
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_local_to_blobstore/import/exim_employee
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_blobstore.q.out
deleted file mode 100644
index b15a22f..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_blobstore.q.out
+++ /dev/null
@@ -1,161 +0,0 @@
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: EXPORT TABLE exim_employee PARTITION (emp_country='us')
-TO '### test.blobstore.path ###/import_blobstore_to_blobstore/export/exim_employee'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@exim_employee@emp_country=us
-PREHOOK: Output: ### test.blobstore.path ###/import_blobstore_to_blobstore/export/exim_employee
-POSTHOOK: query: EXPORT TABLE exim_employee PARTITION (emp_country='us')
-TO '### test.blobstore.path ###/import_blobstore_to_blobstore/export/exim_employee'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@exim_employee@emp_country=us
-POSTHOOK: Output: ### test.blobstore.path ###/import_blobstore_to_blobstore/export/exim_employee
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: IMPORT FROM '### test.blobstore.path ###/import_blobstore_to_blobstore/export/exim_employee'
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-PREHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_blobstore/export/exim_employee
-PREHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_blobstore/import/exim_employee
-PREHOOK: Output: database:default
-POSTHOOK: query: IMPORT FROM '### test.blobstore.path ###/import_blobstore_to_blobstore/export/exim_employee'
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-POSTHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_blobstore/export/exim_employee
-POSTHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_blobstore/import/exim_employee
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_blobstore_nonpart.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_blobstore_nonpart.q.out b/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_blobstore_nonpart.q.out
deleted file mode 100644
index d92b6c1..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_blobstore_nonpart.q.out
+++ /dev/null
@@ -1,103 +0,0 @@
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-#### A masked pattern was here ####
-1
-2
-3
-4
-5
-6
-PREHOOK: query: EXPORT TABLE exim_employee
-TO '### test.blobstore.path ###/import_blobstore_to_blobstore_nonpart/export/exim_employee'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: ### test.blobstore.path ###/import_blobstore_to_blobstore_nonpart/export/exim_employee
-POSTHOOK: query: EXPORT TABLE exim_employee
-TO '### test.blobstore.path ###/import_blobstore_to_blobstore_nonpart/export/exim_employee'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: ### test.blobstore.path ###/import_blobstore_to_blobstore_nonpart/export/exim_employee
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: IMPORT FROM '### test.blobstore.path ###/import_blobstore_to_blobstore_nonpart/export/exim_employee'
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-PREHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_blobstore_nonpart/export/exim_employee
-PREHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_blobstore_nonpart/import/exim_employee
-PREHOOK: Output: database:default
-POSTHOOK: query: IMPORT FROM '### test.blobstore.path ###/import_blobstore_to_blobstore_nonpart/export/exim_employee'
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-POSTHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_blobstore_nonpart/export/exim_employee
-POSTHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_blobstore_nonpart/import/exim_employee
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-#### A masked pattern was here ####
-1
-2
-3
-4
-5
-6

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_local.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_local.q.out b/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_local.q.out
deleted file mode 100644
index 4d2eeb0..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_local.q.out
+++ /dev/null
@@ -1,161 +0,0 @@
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: EXPORT TABLE exim_employee PARTITION (emp_country='us')
-TO '### test.blobstore.path ###/import_blobstore_to_local/export/exim_employee'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@exim_employee@emp_country=us
-PREHOOK: Output: ### test.blobstore.path ###/import_blobstore_to_local/export/exim_employee
-POSTHOOK: query: EXPORT TABLE exim_employee PARTITION (emp_country='us')
-TO '### test.blobstore.path ###/import_blobstore_to_local/export/exim_employee'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@exim_employee@emp_country=us
-POSTHOOK: Output: ### test.blobstore.path ###/import_blobstore_to_local/export/exim_employee
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: IMPORT FROM '### test.blobstore.path ###/import_blobstore_to_local/export/exim_employee'
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_local/export/exim_employee
-PREHOOK: Output: database:default
-POSTHOOK: query: IMPORT FROM '### test.blobstore.path ###/import_blobstore_to_local/export/exim_employee'
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_local/export/exim_employee
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_warehouse.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_warehouse.q.out b/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_warehouse.q.out
deleted file mode 100644
index 764c86d..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_warehouse.q.out
+++ /dev/null
@@ -1,157 +0,0 @@
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: EXPORT TABLE exim_employee PARTITION (emp_country='us')
-TO '### test.blobstore.path ###/import_blobstore_to_warehouse/export/exim_employee'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@exim_employee@emp_country=us
-PREHOOK: Output: ### test.blobstore.path ###/import_blobstore_to_warehouse/export/exim_employee
-POSTHOOK: query: EXPORT TABLE exim_employee PARTITION (emp_country='us')
-TO '### test.blobstore.path ###/import_blobstore_to_warehouse/export/exim_employee'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@exim_employee@emp_country=us
-POSTHOOK: Output: ### test.blobstore.path ###/import_blobstore_to_warehouse/export/exim_employee
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: IMPORT FROM '### test.blobstore.path ###/import_blobstore_to_warehouse/export/exim_employee'
-PREHOOK: type: IMPORT
-PREHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_warehouse/export/exim_employee
-PREHOOK: Output: database:default
-POSTHOOK: query: IMPORT FROM '### test.blobstore.path ###/import_blobstore_to_warehouse/export/exim_employee'
-POSTHOOK: type: IMPORT
-POSTHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_warehouse/export/exim_employee
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_warehouse_nonpart.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_warehouse_nonpart.q.out b/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_warehouse_nonpart.q.out
deleted file mode 100644
index 9d0b059..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/import_blobstore_to_warehouse_nonpart.q.out
+++ /dev/null
@@ -1,99 +0,0 @@
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-#### A masked pattern was here ####
-1
-2
-3
-4
-5
-6
-PREHOOK: query: EXPORT TABLE exim_employee
-TO '### test.blobstore.path ###/import_blobstore_to_warehouse_nonpart/export/exim_employee'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: ### test.blobstore.path ###/import_blobstore_to_warehouse_nonpart/export/exim_employee
-POSTHOOK: query: EXPORT TABLE exim_employee
-TO '### test.blobstore.path ###/import_blobstore_to_warehouse_nonpart/export/exim_employee'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: ### test.blobstore.path ###/import_blobstore_to_warehouse_nonpart/export/exim_employee
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: IMPORT FROM '### test.blobstore.path ###/import_blobstore_to_warehouse_nonpart/export/exim_employee'
-PREHOOK: type: IMPORT
-PREHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_warehouse_nonpart/export/exim_employee
-PREHOOK: Output: database:default
-POSTHOOK: query: IMPORT FROM '### test.blobstore.path ###/import_blobstore_to_warehouse_nonpart/export/exim_employee'
-POSTHOOK: type: IMPORT
-POSTHOOK: Input: ### test.blobstore.path ###/import_blobstore_to_warehouse_nonpart/export/exim_employee
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-#### A masked pattern was here ####
-1
-2
-3
-4
-5
-6

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/import_local_to_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/import_local_to_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/import_local_to_blobstore.q.out
deleted file mode 100644
index 56343bb..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/import_local_to_blobstore.q.out
+++ /dev/null
@@ -1,159 +0,0 @@
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: EXPORT TABLE exim_employee PARTITION (emp_country='us')
-#### A masked pattern was here ####
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: EXPORT TABLE exim_employee PARTITION (emp_country='us')
-#### A masked pattern was here ####
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: default@exim_employee
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-#### A masked pattern was here ####
-PREHOOK: Input: ### test.blobstore.path ###/import_local_to_blobstore/import/exim_employee
-PREHOOK: Output: database:default
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-#### A masked pattern was here ####
-POSTHOOK: Input: ### test.blobstore.path ###/import_local_to_blobstore/import/exim_employee
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/insert_blobstore_to_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_blobstore_to_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_blobstore_to_blobstore.q.out
deleted file mode 100644
index 4651899..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_blobstore_to_blobstore.q.out
+++ /dev/null
@@ -1,110 +0,0 @@
-PREHOOK: query: DROP TABLE blobstore_source
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_source
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE blobstore_source (
-    a string,
-    b string,
-    c double)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/insert_blobstore_to_blobstore/blobstore_source
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: CREATE TABLE blobstore_source (
-    a string,
-    b string,
-    c double)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/insert_blobstore_to_blobstore/blobstore_source
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: DROP TABLE blobstore_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE blobstore_table LIKE blobstore_source
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/insert_blobstore_to_blobstore/blobstore_table
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_table
-POSTHOOK: query: CREATE TABLE blobstore_table LIKE blobstore_source
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/insert_blobstore_to_blobstore/blobstore_table
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_table
-PREHOOK: query: INSERT OVERWRITE TABLE blobstore_table SELECT * FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@blobstore_table
-POSTHOOK: query: INSERT OVERWRITE TABLE blobstore_table SELECT * FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@blobstore_table
-PREHOOK: query: SELECT COUNT(*) FROM blobstore_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM blobstore_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_table
-#### A masked pattern was here ####
-7
-PREHOOK: query: INSERT INTO TABLE blobstore_table SELECT * FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@blobstore_table
-POSTHOOK: query: INSERT INTO TABLE blobstore_table SELECT * FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@blobstore_table
-PREHOOK: query: SELECT COUNT(*) FROM blobstore_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM blobstore_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_table
-#### A masked pattern was here ####
-14
-PREHOOK: query: SELECT * FROM blobstore_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM blobstore_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_table
-#### A masked pattern was here ####
-1	abc	10.5
-2	def	11.5
-3	ajss	90.23232
-4	djns	89.02002
-5	random	2.99
-6	data	3.002
-7	ne	71.9084
-1	abc	10.5
-2	def	11.5
-3	ajss	90.23232
-4	djns	89.02002
-5	random	2.99
-6	data	3.002
-7	ne	71.9084

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/insert_empty_into_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_empty_into_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_empty_into_blobstore.q.out
deleted file mode 100644
index 8e5e096..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_empty_into_blobstore.q.out
+++ /dev/null
@@ -1,155 +0,0 @@
-PREHOOK: query: DROP TABLE empty
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE empty
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE blobstore_dynamic_partitioning
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_dynamic_partitioning
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE blobstore_list_bucketing
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_list_bucketing
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE empty (
-    id int,
-    name string,
-    dept string,
-    pt string,
-    dt string,
-    hr string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@empty
-POSTHOOK: query: CREATE TABLE empty (
-    id int,
-    name string,
-    dept string,
-    pt string,
-    dt string,
-    hr string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@empty
-PREHOOK: query: CREATE TABLE blobstore_dynamic_partitioning (
-    id int,
-    name string,
-    dept string)
-PARTITIONED BY (
-    pt string,
-    dt string,
-    hr string)
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/insert_empty_into_blobstore/blobstore_dynamic_partitioning
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_dynamic_partitioning
-POSTHOOK: query: CREATE TABLE blobstore_dynamic_partitioning (
-    id int,
-    name string,
-    dept string)
-PARTITIONED BY (
-    pt string,
-    dt string,
-    hr string)
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/insert_empty_into_blobstore/blobstore_dynamic_partitioning
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_dynamic_partitioning
-PREHOOK: query: INSERT INTO TABLE blobstore_dynamic_partitioning PARTITION (pt='a', dt, hr) SELECT id, name, dept, dt, hr FROM empty
-PREHOOK: type: QUERY
-PREHOOK: Input: default@empty
-PREHOOK: Output: default@blobstore_dynamic_partitioning@pt=a
-POSTHOOK: query: INSERT INTO TABLE blobstore_dynamic_partitioning PARTITION (pt='a', dt, hr) SELECT id, name, dept, dt, hr FROM empty
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@empty
-PREHOOK: query: SELECT COUNT(*) FROM blobstore_dynamic_partitioning
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_dynamic_partitioning
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM blobstore_dynamic_partitioning
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_dynamic_partitioning
-#### A masked pattern was here ####
-0
-PREHOOK: query: CREATE TABLE blobstore_list_bucketing (
-    id int,
-    name string,
-    dept string)
-PARTITIONED BY (
-    pt string,
-    dt string,
-    hr string)
-SKEWED BY (id) ON ('1', '2', '3') STORED AS DIRECTORIES
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/insert_empty_into_blobstore/blobstore_list_bucketing
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_list_bucketing
-POSTHOOK: query: CREATE TABLE blobstore_list_bucketing (
-    id int,
-    name string,
-    dept string)
-PARTITIONED BY (
-    pt string,
-    dt string,
-    hr string)
-SKEWED BY (id) ON ('1', '2', '3') STORED AS DIRECTORIES
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/insert_empty_into_blobstore/blobstore_list_bucketing
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_list_bucketing
-PREHOOK: query: INSERT INTO TABLE blobstore_list_bucketing PARTITION (pt='a', dt='a', hr='a') SELECT id, name, dept FROM empty
-PREHOOK: type: QUERY
-PREHOOK: Input: default@empty
-PREHOOK: Output: default@blobstore_list_bucketing@pt=a/dt=a/hr=a
-POSTHOOK: query: INSERT INTO TABLE blobstore_list_bucketing PARTITION (pt='a', dt='a', hr='a') SELECT id, name, dept FROM empty
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@empty
-POSTHOOK: Output: default@blobstore_list_bucketing@pt=a/dt=a/hr=a
-PREHOOK: query: SELECT COUNT(*) FROM blobstore_list_bucketing
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_list_bucketing
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM blobstore_list_bucketing
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_list_bucketing
-#### A masked pattern was here ####
-0
-PREHOOK: query: INSERT INTO TABLE blobstore_dynamic_partitioning PARTITION (pt='b', dt, hr) SELECT id, name, dept, dt, hr FROM empty
-PREHOOK: type: QUERY
-PREHOOK: Input: default@empty
-PREHOOK: Output: default@blobstore_dynamic_partitioning@pt=b
-POSTHOOK: query: INSERT INTO TABLE blobstore_dynamic_partitioning PARTITION (pt='b', dt, hr) SELECT id, name, dept, dt, hr FROM empty
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@empty
-PREHOOK: query: SELECT COUNT(*) FROM blobstore_dynamic_partitioning
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_dynamic_partitioning
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM blobstore_dynamic_partitioning
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_dynamic_partitioning
-#### A masked pattern was here ####
-0
-PREHOOK: query: INSERT INTO TABLE blobstore_list_bucketing PARTITION (pt='b', dt='b', hr='b') SELECT id, name, dept FROM empty
-PREHOOK: type: QUERY
-PREHOOK: Input: default@empty
-PREHOOK: Output: default@blobstore_list_bucketing@pt=b/dt=b/hr=b
-POSTHOOK: query: INSERT INTO TABLE blobstore_list_bucketing PARTITION (pt='b', dt='b', hr='b') SELECT id, name, dept FROM empty
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@empty
-POSTHOOK: Output: default@blobstore_list_bucketing@pt=b/dt=b/hr=b
-POSTHOOK: Lineage: blobstore_list_bucketing PARTITION(pt=b,dt=b,hr=b).dept SIMPLE [(empty)empty.FieldSchema(name:dept, type:string, comment:null), ]
-POSTHOOK: Lineage: blobstore_list_bucketing PARTITION(pt=b,dt=b,hr=b).id SIMPLE [(empty)empty.FieldSchema(name:id, type:int, comment:null), ]
-POSTHOOK: Lineage: blobstore_list_bucketing PARTITION(pt=b,dt=b,hr=b).name SIMPLE [(empty)empty.FieldSchema(name:name, type:string, comment:null), ]
-PREHOOK: query: SELECT COUNT(*) FROM blobstore_list_bucketing
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_list_bucketing
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM blobstore_list_bucketing
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_list_bucketing
-#### A masked pattern was here ####
-0

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
index 663a572..4ed53e5 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
@@ -71,7 +71,6 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                       bucket_count -1
                       column.name.delimiter ,
                       columns id
@@ -81,8 +80,6 @@ STAGE PLANS:
                       location ### test.blobstore.path ###/table1
                       name default.table1
                       numFiles 2
-                      numRows 2
-                      rawDataSize 2
                       serialization.ddl struct table1 { i32 id}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -145,7 +142,6 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns id
@@ -155,8 +151,6 @@ STAGE PLANS:
                 location ### test.blobstore.path ###/table1
                 name default.table1
                 numFiles 2
-                numRows 2
-                rawDataSize 2
                 serialization.ddl struct table1 { i32 id}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -183,7 +177,6 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                     bucket_count -1
                     column.name.delimiter ,
                     columns id
@@ -193,8 +186,6 @@ STAGE PLANS:
                     location ### test.blobstore.path ###/table1
                     name default.table1
                     numFiles 2
-                    numRows 2
-                    rawDataSize 2
                     serialization.ddl struct table1 { i32 id}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -214,7 +205,6 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
               bucket_count -1
               column.name.delimiter ,
               columns id
@@ -224,8 +214,6 @@ STAGE PLANS:
               location ### test.blobstore.path ###/table1
               name default.table1
               numFiles 2
-              numRows 2
-              rawDataSize 2
               serialization.ddl struct table1 { i32 id}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -236,7 +224,6 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns id
@@ -246,8 +233,6 @@ STAGE PLANS:
                 location ### test.blobstore.path ###/table1
                 name default.table1
                 numFiles 2
-                numRows 2
-                rawDataSize 2
                 serialization.ddl struct table1 { i32 id}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -268,7 +253,6 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns id
@@ -278,8 +262,6 @@ STAGE PLANS:
                 location ### test.blobstore.path ###/table1
                 name default.table1
                 numFiles 2
-                numRows 2
-                rawDataSize 2
                 serialization.ddl struct table1 { i32 id}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -302,7 +284,6 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                     bucket_count -1
                     column.name.delimiter ,
                     columns id
@@ -312,8 +293,6 @@ STAGE PLANS:
                     location ### test.blobstore.path ###/table1
                     name default.table1
                     numFiles 2
-                    numRows 2
-                    rawDataSize 2
                     serialization.ddl struct table1 { i32 id}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -333,7 +312,6 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
               bucket_count -1
               column.name.delimiter ,
               columns id
@@ -343,8 +321,6 @@ STAGE PLANS:
               location ### test.blobstore.path ###/table1
               name default.table1
               numFiles 2
-              numRows 2
-              rawDataSize 2
               serialization.ddl struct table1 { i32 id}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -355,7 +331,6 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns id
@@ -365,8 +340,6 @@ STAGE PLANS:
                 location ### test.blobstore.path ###/table1
                 name default.table1
                 numFiles 2
-                numRows 2
-                rawDataSize 2
                 serialization.ddl struct table1 { i32 id}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe


[47/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/JvmMetrics.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/JvmMetrics.java b/common/src/java/org/apache/hadoop/hive/common/JvmMetrics.java
deleted file mode 100644
index 64f2819..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/JvmMetrics.java
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common;
-
-import static org.apache.hadoop.hive.common.JvmMetricsInfo.*;
-
-import org.apache.hadoop.log.metrics.EventCounter;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.Interns;
-
-import java.lang.management.GarbageCollectorMXBean;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryMXBean;
-import java.lang.management.MemoryUsage;
-import java.lang.management.ThreadInfo;
-import java.lang.management.ThreadMXBean;
-import java.util.List;
-import java.util.concurrent.ConcurrentHashMap;
-
-import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
-import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
-
-/**
- * JVM and logging related metrics. Ported from Hadoop JvmMetrics.
- * Mostly used by various servers as a part of the metrics they export.
- */
-public class JvmMetrics implements MetricsSource {
-  enum Singleton {
-    INSTANCE;
-
-    JvmMetrics impl;
-
-    synchronized JvmMetrics init(String processName, String sessionId) {
-      if (impl == null) {
-        impl = create(processName, sessionId, DefaultMetricsSystem.instance());
-      }
-      return impl;
-    }
-  }
-
-  static final float M = 1024*1024;
-
-  final MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
-  final List<GarbageCollectorMXBean> gcBeans =
-      ManagementFactory.getGarbageCollectorMXBeans();
-  final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
-  final String processName, sessionId;
-  private JvmPauseMonitor pauseMonitor = null;
-  final ConcurrentHashMap<String, MetricsInfo[]> gcInfoCache =
-      new ConcurrentHashMap<String, MetricsInfo[]>();
-
-  JvmMetrics(String processName, String sessionId) {
-    this.processName = processName;
-    this.sessionId = sessionId;
-  }
-
-  public void setPauseMonitor(final JvmPauseMonitor pauseMonitor) {
-    this.pauseMonitor = pauseMonitor;
-  }
-
-  public static JvmMetrics create(String processName, String sessionId, MetricsSystem ms) {
-    return ms.register(JvmMetrics.name(), JvmMetrics.description(),
-        new JvmMetrics(processName, sessionId));
-  }
-
-  public static JvmMetrics initSingleton(String processName, String sessionId) {
-    return Singleton.INSTANCE.init(processName, sessionId);
-  }
-
-  @Override
-  public void getMetrics(MetricsCollector collector, boolean all) {
-    MetricsRecordBuilder rb = collector.addRecord(JvmMetrics)
-        .setContext("jvm").tag(ProcessName, processName)
-        .tag(SessionId, sessionId);
-    getMemoryUsage(rb);
-    getGcUsage(rb);
-    getThreadUsage(rb);
-    getEventCounters(rb);
-  }
-
-  private void getMemoryUsage(MetricsRecordBuilder rb) {
-    MemoryUsage memNonHeap = memoryMXBean.getNonHeapMemoryUsage();
-    MemoryUsage memHeap = memoryMXBean.getHeapMemoryUsage();
-    Runtime runtime = Runtime.getRuntime();
-    rb.addGauge(MemNonHeapUsedM, memNonHeap.getUsed() / M)
-        .addGauge(MemNonHeapCommittedM, memNonHeap.getCommitted() / M)
-        .addGauge(MemNonHeapMaxM, memNonHeap.getMax() / M)
-        .addGauge(MemHeapUsedM, memHeap.getUsed() / M)
-        .addGauge(MemHeapCommittedM, memHeap.getCommitted() / M)
-        .addGauge(MemHeapMaxM, memHeap.getMax() / M)
-        .addGauge(MemMaxM, runtime.maxMemory() / M);
-  }
-
-  private void getGcUsage(MetricsRecordBuilder rb) {
-    long count = 0;
-    long timeMillis = 0;
-    for (GarbageCollectorMXBean gcBean : gcBeans) {
-      long c = gcBean.getCollectionCount();
-      long t = gcBean.getCollectionTime();
-      MetricsInfo[] gcInfo = getGcInfo(gcBean.getName());
-      rb.addCounter(gcInfo[0], c).addCounter(gcInfo[1], t);
-      count += c;
-      timeMillis += t;
-    }
-    rb.addCounter(GcCount, count)
-        .addCounter(GcTimeMillis, timeMillis);
-
-    if (pauseMonitor != null) {
-      rb.addCounter(GcNumWarnThresholdExceeded,
-          pauseMonitor.getNumGcWarnThreadholdExceeded());
-      rb.addCounter(GcNumInfoThresholdExceeded,
-          pauseMonitor.getNumGcInfoThresholdExceeded());
-      rb.addCounter(GcTotalExtraSleepTime,
-          pauseMonitor.getTotalGcExtraSleepTime());
-    }
-  }
-
-  private MetricsInfo[] getGcInfo(String gcName) {
-    MetricsInfo[] gcInfo = gcInfoCache.get(gcName);
-    if (gcInfo == null) {
-      gcInfo = new MetricsInfo[2];
-      gcInfo[0] = Interns.info("GcCount" + gcName, "GC Count for " + gcName);
-      gcInfo[1] = Interns
-          .info("GcTimeMillis" + gcName, "GC Time for " + gcName);
-      MetricsInfo[] previousGcInfo = gcInfoCache.putIfAbsent(gcName, gcInfo);
-      if (previousGcInfo != null) {
-        return previousGcInfo;
-      }
-    }
-    return gcInfo;
-  }
-
-  private void getThreadUsage(MetricsRecordBuilder rb) {
-    int threadsNew = 0;
-    int threadsRunnable = 0;
-    int threadsBlocked = 0;
-    int threadsWaiting = 0;
-    int threadsTimedWaiting = 0;
-    int threadsTerminated = 0;
-    long threadIds[] = threadMXBean.getAllThreadIds();
-    for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) {
-      if (threadInfo == null) continue; // race protection
-      switch (threadInfo.getThreadState()) {
-        case NEW:           threadsNew++;           break;
-        case RUNNABLE:      threadsRunnable++;      break;
-        case BLOCKED:       threadsBlocked++;       break;
-        case WAITING:       threadsWaiting++;       break;
-        case TIMED_WAITING: threadsTimedWaiting++;  break;
-        case TERMINATED:    threadsTerminated++;    break;
-      }
-    }
-    rb.addGauge(ThreadsNew, threadsNew)
-        .addGauge(ThreadsRunnable, threadsRunnable)
-        .addGauge(ThreadsBlocked, threadsBlocked)
-        .addGauge(ThreadsWaiting, threadsWaiting)
-        .addGauge(ThreadsTimedWaiting, threadsTimedWaiting)
-        .addGauge(ThreadsTerminated, threadsTerminated);
-  }
-
-  private void getEventCounters(MetricsRecordBuilder rb) {
-    rb.addCounter(LogFatal, EventCounter.getFatal())
-        .addCounter(LogError, EventCounter.getError())
-        .addCounter(LogWarn, EventCounter.getWarn())
-        .addCounter(LogInfo, EventCounter.getInfo());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java b/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java
deleted file mode 100644
index 3ab73c5..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common;
-
-import com.google.common.base.Objects;
-
-import org.apache.hadoop.metrics2.MetricsInfo;
-
-/**
- * JVM and logging related metrics info instances. Ported from Hadoop JvmMetricsInfo.
- */
-public enum JvmMetricsInfo implements MetricsInfo {
-  JvmMetrics("JVM related metrics etc."), // record info
-  // metrics
-  MemNonHeapUsedM("Non-heap memory used in MB"),
-  MemNonHeapCommittedM("Non-heap memory committed in MB"),
-  MemNonHeapMaxM("Non-heap memory max in MB"),
-  MemHeapUsedM("Heap memory used in MB"),
-  MemHeapCommittedM("Heap memory committed in MB"),
-  MemHeapMaxM("Heap memory max in MB"),
-  MemMaxM("Max memory size in MB"),
-  GcCount("Total GC count"),
-  GcTimeMillis("Total GC time in milliseconds"),
-  ThreadsNew("Number of new threads"),
-  ThreadsRunnable("Number of runnable threads"),
-  ThreadsBlocked("Number of blocked threads"),
-  ThreadsWaiting("Number of waiting threads"),
-  ThreadsTimedWaiting("Number of timed waiting threads"),
-  ThreadsTerminated("Number of terminated threads"),
-  LogFatal("Total number of fatal log events"),
-  LogError("Total number of error log events"),
-  LogWarn("Total number of warning log events"),
-  LogInfo("Total number of info log events"),
-  GcNumWarnThresholdExceeded("Number of times that the GC warn threshold is exceeded"),
-  GcNumInfoThresholdExceeded("Number of times that the GC info threshold is exceeded"),
-  GcTotalExtraSleepTime("Total GC extra sleep time in milliseconds");
-
-  private final String desc;
-
-  JvmMetricsInfo(String desc) { this.desc = desc; }
-
-  @Override public String description() { return desc; }
-
-  @Override public String toString() {
-    return Objects.toStringHelper(this)
-        .add("name", name()).add("description", desc)
-        .toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
index 83f3af7..c2a0d9a 100644
--- a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
@@ -25,12 +25,11 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.LoggerContext;
 import org.apache.logging.log4j.core.config.Configurator;
 import org.apache.logging.log4j.core.impl.Log4jContextFactory;
-import org.apache.logging.log4j.spi.DefaultThreadContextMap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -46,15 +45,8 @@ public class LogUtils {
   /**
    * Constants for log masking
    */
-  private static final String KEY_TO_MASK_WITH = "password";
-  private static final String MASKED_VALUE = "###_MASKED_###";
-
-  /**
-   * Constants of the key strings for the logging ThreadContext.
-   */
-  public static final String SESSIONID_LOG_KEY = "sessionId";
-  public static final String QUERYID_LOG_KEY = "queryId";
-  public static final String OPERATIONLOG_LEVEL_KEY = "operationLogLevel";
+  private static String KEY_TO_MASK_WITH = "password";
+  private static String MASKED_VALUE = "###_MASKED_###";
 
   @SuppressWarnings("serial")
   public static class LogInitializationException extends Exception {
@@ -118,8 +110,6 @@ public class LogUtils {
           System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId);
         }
         final boolean async = checkAndSetAsyncLogging(conf);
-        // required for MDC based routing appender so that child threads can inherit the MDC context
-        System.setProperty(DefaultThreadContextMap.INHERITABLE_MAP, "true");
         Configurator.initialize(null, log4jFileName);
         logConfigLocation(conf);
         return "Logging initialized using configuration in " + log4jConfigFile + " Async: " + async;
@@ -162,7 +152,6 @@ public class LogUtils {
     }
     if (hive_l4j != null) {
       final boolean async = checkAndSetAsyncLogging(conf);
-      System.setProperty(DefaultThreadContextMap.INHERITABLE_MAP, "true");
       Configurator.initialize(null, hive_l4j.toString());
       logConfigLocation(conf);
       return (logMessage + "\n" + "Logging initialized using configuration in " + hive_l4j +
@@ -204,22 +193,4 @@ public class LogUtils {
     }
     return value;
   }
-
-  /**
-   * Register logging context so that log system can print QueryId, SessionId, etc for each message
-   */
-  public static void registerLoggingContext(Configuration conf) {
-    MDC.put(SESSIONID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVESESSIONID));
-    MDC.put(QUERYID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID));
-    if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) {
-      MDC.put(OPERATIONLOG_LEVEL_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL));
-    }
-  }
-
-  /**
-   * Unregister logging context
-   */
-  public static void unregisterLoggingContext() {
-    MDC.clear();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/MemoryEstimate.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/MemoryEstimate.java b/common/src/java/org/apache/hadoop/hive/common/MemoryEstimate.java
deleted file mode 100644
index 36ae56f..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/MemoryEstimate.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common;
-
-/**
- * Interface that can be used to provide size estimates based on data structures held in memory for an object instance.
- */
-public interface MemoryEstimate {
-  /**
-   * Returns estimated memory size based {@link org.apache.hadoop.hive.ql.util.JavaDataModel}
-   *
-   * @return estimated memory size in bytes
-   */
-  long getEstimatedMemorySize();
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java b/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index a9e17c2..926b4a6 100644
--- a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@ -49,7 +49,7 @@ import com.fasterxml.jackson.databind.annotation.JsonSerialize;
 
 public class StatsSetupConst {
 
-  protected static final Logger LOG = LoggerFactory.getLogger(StatsSetupConst.class.getName());
+  protected final static Logger LOG = LoggerFactory.getLogger(StatsSetupConst.class.getName());
 
   public enum StatDB {
     fs {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/StringInternUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/StringInternUtils.java b/common/src/java/org/apache/hadoop/hive/common/StringInternUtils.java
index 92d37e8..c729991 100644
--- a/common/src/java/org/apache/hadoop/hive/common/StringInternUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/StringInternUtils.java
@@ -104,21 +104,13 @@ public class StringInternUtils {
    * This method interns all the strings in the given list in place. That is,
    * it iterates over the list, replaces each element with the interned copy
    * and eventually returns the same list.
-   *
-   * Note that the provided List implementation should return an iterator
-   * (via list.listIterator()) method, and that iterator should implement
-   * the set(Object) method. That's what all List implementations in the JDK
-   * provide. However, if some custom List implementation doesn't have this
-   * functionality, this method will return without interning its elements.
    */
   public static List<String> internStringsInList(List<String> list) {
     if (list != null) {
-      try {
-        ListIterator<String> it = list.listIterator();
-        while (it.hasNext()) {
-          it.set(it.next().intern());
-        }
-      } catch (UnsupportedOperationException e) { } // set() not implemented - ignore
+      ListIterator<String> it = list.listIterator();
+      while (it.hasNext()) {
+        it.set(it.next().intern());
+      }
     }
     return list;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java b/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
index 8f55354..334b93e 100644
--- a/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
+++ b/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.common;
 
 import java.util.Arrays;
-import java.util.BitSet;
 
 /**
  * An implementation of {@link org.apache.hadoop.hive.common.ValidTxnList} for use by the compactor.
@@ -41,12 +40,11 @@ public class ValidCompactorTxnList extends ValidReadTxnList {
   }
   /**
    * @param abortedTxnList list of all aborted transactions
-   * @param abortedBits bitset marking whether the corresponding transaction is aborted
    * @param highWatermark highest committed transaction to be considered for compaction,
    *                      equivalently (lowest_open_txn - 1).
    */
-  public ValidCompactorTxnList(long[] abortedTxnList, BitSet abortedBits, long highWatermark) {
-    super(abortedTxnList, abortedBits, highWatermark); // abortedBits should be all true as everything in exceptions are aborted txns
+  public ValidCompactorTxnList(long[] abortedTxnList, long highWatermark) {
+    super(abortedTxnList, highWatermark);
     if(this.exceptions.length <= 0) {
       return;
     }
@@ -77,9 +75,4 @@ public class ValidCompactorTxnList extends ValidReadTxnList {
   public RangeResponse isTxnRangeValid(long minTxnId, long maxTxnId) {
     return highWatermark >= maxTxnId ? RangeResponse.ALL : RangeResponse.NONE;
   }
-
-  @Override
-  public boolean isTxnAborted(long txnid) {
-    return Arrays.binarySearch(exceptions, txnid) >= 0;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java b/common/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java
index 4e57772..2f35917 100644
--- a/common/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java
+++ b/common/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.common;
 import com.google.common.annotations.VisibleForTesting;
 
 import java.util.Arrays;
-import java.util.BitSet;
 
 /**
  * An implementation of {@link org.apache.hadoop.hive.common.ValidTxnList} for use by readers.
@@ -31,27 +30,32 @@ import java.util.BitSet;
 public class ValidReadTxnList implements ValidTxnList {
 
   protected long[] exceptions;
-  protected BitSet abortedBits; // BitSet for flagging aborted transactions. Bit is true if aborted, false if open
   //default value means there are no open txn in the snapshot
   private long minOpenTxn = Long.MAX_VALUE;
   protected long highWatermark;
 
   public ValidReadTxnList() {
-    this(new long[0], new BitSet(), Long.MAX_VALUE, Long.MAX_VALUE);
+    this(new long[0], Long.MAX_VALUE, Long.MAX_VALUE);
   }
 
   /**
    * Used if there are no open transactions in the snapshot
    */
-  public ValidReadTxnList(long[] exceptions, BitSet abortedBits, long highWatermark) {
-    this(exceptions, abortedBits, highWatermark, Long.MAX_VALUE);
+  public ValidReadTxnList(long[] exceptions, long highWatermark) {
+    this(exceptions, highWatermark, Long.MAX_VALUE);
   }
-  public ValidReadTxnList(long[] exceptions, BitSet abortedBits, long highWatermark, long minOpenTxn) {
-    if (exceptions.length > 0) {
+  public ValidReadTxnList(long[] exceptions, long highWatermark, long minOpenTxn) {
+    if (exceptions.length == 0) {
+      this.exceptions = exceptions;
+    } else {
+      this.exceptions = exceptions.clone();
+      Arrays.sort(this.exceptions);
       this.minOpenTxn = minOpenTxn;
+      if(this.exceptions[0] <= 0) {
+        //should never happen of course
+        throw new IllegalArgumentException("Invalid txnid: " + this.exceptions[0] + " found");
+      }
     }
-    this.exceptions = exceptions;
-    this.abortedBits = abortedBits;
     this.highWatermark = highWatermark;
   }
 
@@ -114,28 +118,12 @@ public class ValidReadTxnList implements ValidTxnList {
     buf.append(':');
     buf.append(minOpenTxn);
     if (exceptions.length == 0) {
-      buf.append(':');  // separator for open txns
-      buf.append(':');  // separator for aborted txns
+      buf.append(':');
     } else {
-      StringBuilder open = new StringBuilder();
-      StringBuilder abort = new StringBuilder();
-      for (int i = 0; i < exceptions.length; i++) {
-        if (abortedBits.get(i)) {
-          if (abort.length() > 0) {
-            abort.append(',');
-          }
-          abort.append(exceptions[i]);
-        } else {
-          if (open.length() > 0) {
-            open.append(',');
-          }
-          open.append(exceptions[i]);
-        }
+      for(long except: exceptions) {
+        buf.append(':');
+        buf.append(except);
       }
-      buf.append(':');
-      buf.append(open);
-      buf.append(':');
-      buf.append(abort);
     }
     return buf.toString();
   }
@@ -145,41 +133,13 @@ public class ValidReadTxnList implements ValidTxnList {
     if (src == null || src.length() == 0) {
       highWatermark = Long.MAX_VALUE;
       exceptions = new long[0];
-      abortedBits = new BitSet();
     } else {
       String[] values = src.split(":");
       highWatermark = Long.parseLong(values[0]);
       minOpenTxn = Long.parseLong(values[1]);
-      String[] openTxns = new String[0];
-      String[] abortedTxns = new String[0];
-      if (values.length < 3) {
-        openTxns = new String[0];
-        abortedTxns = new String[0];
-      } else if (values.length == 3) {
-        if (!values[2].isEmpty()) {
-          openTxns = values[2].split(",");
-        }
-      } else {
-        if (!values[2].isEmpty()) {
-          openTxns = values[2].split(",");
-        }
-        if (!values[3].isEmpty()) {
-          abortedTxns = values[3].split(",");
-        }
-      }
-      exceptions = new long[openTxns.length + abortedTxns.length];
-      int i = 0;
-      for (String open : openTxns) {
-        exceptions[i++] = Long.parseLong(open);
-      }
-      for (String abort : abortedTxns) {
-        exceptions[i++] = Long.parseLong(abort);
-      }
-      Arrays.sort(exceptions);
-      abortedBits = new BitSet(exceptions.length);
-      for (String abort : abortedTxns) {
-        int index = Arrays.binarySearch(exceptions, Long.parseLong(abort));
-        abortedBits.set(index);
+      exceptions = new long[values.length - 2];
+      for(int i = 2; i < values.length; ++i) {
+        exceptions[i-2] = Long.parseLong(values[i]);
       }
     }
   }
@@ -197,40 +157,5 @@ public class ValidReadTxnList implements ValidTxnList {
   public long getMinOpenTxn() {
     return minOpenTxn;
   }
-
-  @Override
-  public boolean isTxnAborted(long txnid) {
-    int index = Arrays.binarySearch(exceptions, txnid);
-    return index >= 0 && abortedBits.get(index);
-  }
-
-  @Override
-  public RangeResponse isTxnRangeAborted(long minTxnId, long maxTxnId) {
-    // check the easy cases first
-    if (highWatermark < minTxnId) {
-      return RangeResponse.NONE;
-    }
-
-    int count = 0;  // number of aborted txns found in exceptions
-
-    // traverse the aborted txns list, starting at first aborted txn index
-    for (int i = abortedBits.nextSetBit(0); i >= 0; i = abortedBits.nextSetBit(i + 1)) {
-      long abortedTxnId = exceptions[i];
-      if (abortedTxnId > maxTxnId) {  // we've already gone beyond the specified range
-        break;
-      }
-      if (abortedTxnId >= minTxnId && abortedTxnId <= maxTxnId) {
-        count++;
-      }
-    }
-
-    if (count == 0) {
-      return RangeResponse.NONE;
-    } else if (count == (maxTxnId - minTxnId + 1)) {
-      return RangeResponse.ALL;
-    } else {
-      return RangeResponse.SOME;
-    }
-  }
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java b/common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java
index d4ac02c..5e1e4ee 100644
--- a/common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java
+++ b/common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java
@@ -71,7 +71,7 @@ public interface ValidTxnList {
 
   /**
    * Populate this validTxnList from the string.  It is assumed that the string
-   * was created via {@link #writeToString()} and the exceptions list is sorted.
+   * was created via {@link #writeToString()}.
    * @param src source string.
    */
   public void readFromString(String src);
@@ -89,20 +89,4 @@ public interface ValidTxnList {
    * @return a list of invalid transaction ids
    */
   public long[] getInvalidTransactions();
-
-  /**
-   * Indicates whether a given transaction is aborted.
-   * @param txnid id for the transaction
-   * @return true if aborted, false otherwise
-   */
-  public boolean isTxnAborted(long txnid);
-
-  /**
-   * Find out if a range of transaction ids are aborted.
-   * @param minTxnId minimum txnid to look for, inclusive
-   * @param maxTxnId maximum txnid to look for, inclusive
-   * @return Indicate whether none, some, or all of these transactions are aborted.
-   */
-  public RangeResponse isTxnRangeAborted(long minTxnId, long maxTxnId);
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Connection.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Connection.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Connection.java
deleted file mode 100644
index 0df6f4c..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Connection.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.jsonexplain;
-
-public final class Connection implements Comparable<Connection>{
-  public final String type;
-  public final Vertex from;
-
-  public Connection(String type, Vertex from) {
-    super();
-    this.type = type;
-    this.from = from;
-  }
-
-  @Override
-  public int compareTo(Connection o) {
-    return from.compareTo(o.from);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java
deleted file mode 100644
index 1f01685..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.jsonexplain;
-
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.hadoop.hive.common.jsonexplain.JsonParser;
-import org.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class DagJsonParser implements JsonParser {
-  public final Map<String, Stage> stages = new LinkedHashMap<>();
-  protected final Logger LOG;
-  // the objects that have been printed.
-  public final Set<Object> printSet = new LinkedHashSet<>();
-  // the vertex that should be inlined. <Operator, list of Vertex that is
-  // inlined>
-  public final Map<Op, List<Connection>> inlineMap = new LinkedHashMap<>();
-
-  public DagJsonParser() {
-    super();
-    LOG = LoggerFactory.getLogger(this.getClass().getName());
-  }
-
-  public void extractStagesAndPlans(JSONObject inputObject) throws Exception {
-    // extract stages
-    JSONObject dependency = inputObject.getJSONObject("STAGE DEPENDENCIES");
-    if (dependency != null && dependency.length() > 0) {
-      // iterate for the first time to get all the names of stages.
-      for (String stageName : JSONObject.getNames(dependency)) {
-        this.stages.put(stageName, new Stage(stageName, this));
-      }
-      // iterate for the second time to get all the dependency.
-      for (String stageName : JSONObject.getNames(dependency)) {
-        JSONObject dependentStageNames = dependency.getJSONObject(stageName);
-        this.stages.get(stageName).addDependency(dependentStageNames, this.stages);
-      }
-    }
-    // extract stage plans
-    JSONObject stagePlans = inputObject.getJSONObject("STAGE PLANS");
-    if (stagePlans != null && stagePlans.length() > 0) {
-      for (String stageName : JSONObject.getNames(stagePlans)) {
-        JSONObject stagePlan = stagePlans.getJSONObject(stageName);
-        this.stages.get(stageName).extractVertex(stagePlan);
-      }
-    }
-  }
-
-  /**
-   * @param indentFlag
-   *          help to generate correct indent
-   * @return
-   */
-  public static String prefixString(int indentFlag) {
-    StringBuilder sb = new StringBuilder();
-    for (int index = 0; index < indentFlag; index++) {
-      sb.append("  ");
-    }
-    return sb.toString();
-  }
-
-  /**
-   * @param indentFlag
-   * @param tail
-   *          help to generate correct indent with a specific tail
-   * @return
-   */
-  public static String prefixString(int indentFlag, String tail) {
-    StringBuilder sb = new StringBuilder();
-    for (int index = 0; index < indentFlag; index++) {
-      sb.append("  ");
-    }
-    int len = sb.length();
-    return sb.replace(len - tail.length(), len, tail).toString();
-  }
-
-  @Override
-  public void print(JSONObject inputObject, PrintStream outputStream) throws Exception {
-    LOG.info("JsonParser is parsing:" + inputObject.toString());
-    this.extractStagesAndPlans(inputObject);
-    Printer printer = new Printer();
-    // print out the cbo info
-    if (inputObject.has("cboInfo")) {
-      printer.println(inputObject.getString("cboInfo"));
-      printer.println();
-    }
-    // print out the vertex dependency in root stage
-    for (Stage candidate : this.stages.values()) {
-      if (candidate.tezStageDependency != null && candidate.tezStageDependency.size() > 0) {
-        printer.println("Vertex dependency in root stage");
-        for (Entry<Vertex, List<Connection>> entry : candidate.tezStageDependency.entrySet()) {
-          StringBuilder sb = new StringBuilder();
-          sb.append(entry.getKey().name);
-          sb.append(" <- ");
-          boolean printcomma = false;
-          for (Connection connection : entry.getValue()) {
-            if (printcomma) {
-              sb.append(", ");
-            } else {
-              printcomma = true;
-            }
-            sb.append(connection.from.name + " (" + connection.type + ")");
-          }
-          printer.println(sb.toString());
-        }
-        printer.println();
-      }
-    }
-    // print out all the stages that have no childStages.
-    for (Stage candidate : this.stages.values()) {
-      if (candidate.childStages.isEmpty()) {
-        candidate.print(printer, 0);
-      }
-    }
-    outputStream.println(printer.toString());
-  }
-
-  public void addInline(Op op, Connection connection) {
-    List<Connection> list = inlineMap.get(op);
-    if (list == null) {
-      list = new ArrayList<>();
-      list.add(connection);
-      inlineMap.put(op, list);
-    } else {
-      list.add(connection);
-    }
-  }
-
-  public boolean isInline(Vertex v) {
-    for (List<Connection> list : inlineMap.values()) {
-      for (Connection connection : list) {
-        if (connection.from.equals(v)) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  public abstract String mapEdgeType(String edgeName);
-
-  public abstract String getFrameworkName();
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParserUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParserUtils.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParserUtils.java
deleted file mode 100644
index a518ac1..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParserUtils.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.jsonexplain;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-
-public class DagJsonParserUtils {
-
-  public static List<String> OperatorNoStats = Arrays.asList(new String[] { "File Output Operator",
-      "Reduce Output Operator" });
-
-  public static String renameReduceOutputOperator(String operatorName, Vertex vertex) {
-    if (operatorName.equals("Reduce Output Operator") && vertex.edgeType != null) {
-      return vertex.edgeType;
-    } else {
-      return operatorName;
-    }
-  }
-
-  public static String attrsToString(Map<String, String> attrs) {
-    StringBuffer sb = new StringBuffer();
-    boolean first = true;
-    for (Entry<String, String> entry : attrs.entrySet()) {
-      if (first) {
-        first = false;
-      } else {
-        sb.append(",");
-      }
-      sb.append(entry.getKey() + entry.getValue());
-    }
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParserFactory.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParserFactory.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParserFactory.java
index 2a5d47a..db118bf 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParserFactory.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParserFactory.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.common.jsonexplain;
 
-import org.apache.hadoop.hive.common.jsonexplain.spark.SparkJsonParser;
 import org.apache.hadoop.hive.common.jsonexplain.tez.TezJsonParser;
 import org.apache.hadoop.hive.conf.HiveConf;
 
@@ -36,9 +35,6 @@ public class JsonParserFactory {
     if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
       return new TezJsonParser();
     }
-    if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
-      return new SparkJsonParser();
-    }
     return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
deleted file mode 100644
index 03c5981..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
+++ /dev/null
@@ -1,358 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.jsonexplain;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.hadoop.hive.common.jsonexplain.Vertex.VertexType;
-import org.json.JSONArray;
-import org.json.JSONException;
-import org.json.JSONObject;
-
-public final class Op {
-  public final String name;
-  // tezJsonParser
-  public final DagJsonParser parser;
-  public final String operatorId;
-  public Op parent;
-  public final List<Op> children;
-  public final Map<String, String> attrs;
-  // the jsonObject for this operator
-  public final JSONObject opObject;
-  // the vertex that this operator belongs to
-  public final Vertex vertex;
-  // the vertex that this operator output to
-  public final String outputVertexName;
-  // the Operator type
-  public final OpType type;
-
-  public enum OpType {
-    MAPJOIN, MERGEJOIN, RS, OTHERS
-  };
-
-  public Op(String name, String id, String outputVertexName, List<Op> children,
-      Map<String, String> attrs, JSONObject opObject, Vertex vertex, DagJsonParser tezJsonParser)
-      throws JSONException {
-    super();
-    this.name = name;
-    this.operatorId = id;
-    this.type = deriveOpType(operatorId);
-    this.outputVertexName = outputVertexName;
-    this.children = children;
-    this.attrs = attrs;
-    this.opObject = opObject;
-    this.vertex = vertex;
-    this.parser = tezJsonParser;
-  }
-
-  private OpType deriveOpType(String operatorId) {
-    if (operatorId != null) {
-      if (operatorId.startsWith(OpType.MAPJOIN.toString())) {
-        return OpType.MAPJOIN;
-      } else if (operatorId.startsWith(OpType.MERGEJOIN.toString())) {
-        return OpType.MERGEJOIN;
-      } else if (operatorId.startsWith(OpType.RS.toString())) {
-        return OpType.RS;
-      } else {
-        return OpType.OTHERS;
-      }
-    } else {
-      return OpType.OTHERS;
-    }
-  }
-
-  private void inlineJoinOp() throws Exception {
-    // inline map join operator
-    if (this.type == OpType.MAPJOIN) {
-      JSONObject joinObj = opObject.getJSONObject(this.name);
-      // get the map for posToVertex
-      Map<String, Vertex> posToVertex = new LinkedHashMap<>();
-      if (joinObj.has("input vertices:")) {
-        JSONObject verticeObj = joinObj.getJSONObject("input vertices:");
-        for (String pos : JSONObject.getNames(verticeObj)) {
-          String vertexName = verticeObj.getString(pos);
-          // update the connection
-          Connection c = null;
-          for (Connection connection : vertex.parentConnections) {
-            if (connection.from.name.equals(vertexName)) {
-              posToVertex.put(pos, connection.from);
-              c = connection;
-              break;
-            }
-          }
-          if (c != null) {
-            parser.addInline(this, c);
-          }
-        }
-        // update the attrs
-        this.attrs.remove("input vertices:");
-      }
-      // update the keys to use operator name
-      JSONObject keys = joinObj.getJSONObject("keys:");
-      // find out the vertex for the big table
-      Set<Vertex> parentVertexes = new HashSet<>();
-      for (Connection connection : vertex.parentConnections) {
-        parentVertexes.add(connection.from);
-      }
-      parentVertexes.removeAll(posToVertex.values());
-      Map<String, String> posToOpId = new LinkedHashMap<>();
-      if (keys.length() != 0) {
-        for (String key : JSONObject.getNames(keys)) {
-          // first search from the posToVertex
-          if (posToVertex.containsKey(key)) {
-            Vertex v = posToVertex.get(key);
-            if (v.rootOps.size() == 1) {
-              posToOpId.put(key, v.rootOps.get(0).operatorId);
-            } else if ((v.rootOps.size() == 0 && v.vertexType == VertexType.UNION)) {
-              posToOpId.put(key, v.name);
-            } else {
-              Op joinRSOp = v.getJoinRSOp(vertex);
-              if (joinRSOp != null) {
-                posToOpId.put(key, joinRSOp.operatorId);
-              } else {
-                throw new Exception(
-                    "Can not find join reduceSinkOp for " + v.name + " to join " + vertex.name
-                        + " when hive explain user is trying to identify the operator id.");
-              }
-            }
-          }
-          // then search from parent
-          else if (parent != null) {
-            posToOpId.put(key, parent.operatorId);
-          }
-          // then assume it is from its own vertex
-          else if (parentVertexes.size() == 1) {
-            Vertex v = parentVertexes.iterator().next();
-            parentVertexes.clear();
-            if (v.rootOps.size() == 1) {
-              posToOpId.put(key, v.rootOps.get(0).operatorId);
-            } else if ((v.rootOps.size() == 0 && v.vertexType == VertexType.UNION)) {
-              posToOpId.put(key, v.name);
-            } else {
-              Op joinRSOp = v.getJoinRSOp(vertex);
-              if (joinRSOp != null) {
-                posToOpId.put(key, joinRSOp.operatorId);
-              } else {
-                throw new Exception(
-                    "Can not find join reduceSinkOp for " + v.name + " to join " + vertex.name
-                        + " when hive explain user is trying to identify the operator id.");
-              }
-            }
-          }
-          // finally throw an exception
-          else {
-            throw new Exception(
-                "Can not find the source operator on one of the branches of map join.");
-          }
-        }
-      }
-      this.attrs.remove("keys:");
-      StringBuffer sb = new StringBuffer();
-      JSONArray conditionMap = joinObj.getJSONArray("condition map:");
-      for (int index = 0; index < conditionMap.length(); index++) {
-        JSONObject cond = conditionMap.getJSONObject(index);
-        String k = (String) cond.keys().next();
-        JSONObject condObject = new JSONObject((String)cond.get(k));
-        String type = condObject.getString("type");
-        String left = condObject.getString("left");
-        String right = condObject.getString("right");
-        if (keys.length() != 0) {
-          sb.append(posToOpId.get(left) + "." + keys.get(left) + "=" + posToOpId.get(right) + "."
-              + keys.get(right) + "(" + type + "),");
-        } else {
-          // probably a cross product
-          sb.append("(" + type + "),");
-        }
-      }
-      this.attrs.remove("condition map:");
-      this.attrs.put("Conds:", sb.substring(0, sb.length() - 1));
-    }
-    // should be merge join
-    else {
-      Map<String, String> posToOpId = new LinkedHashMap<>();
-      if (vertex.mergeJoinDummyVertexs.size() == 0) {
-        if (vertex.tagToInput.size() != vertex.parentConnections.size()) {
-          throw new Exception("tagToInput size " + vertex.tagToInput.size()
-              + " is different from parentConnections size " + vertex.parentConnections.size());
-        }
-        for (Entry<String, String> entry : vertex.tagToInput.entrySet()) {
-          Connection c = null;
-          for (Connection connection : vertex.parentConnections) {
-            if (connection.from.name.equals(entry.getValue())) {
-              Vertex v = connection.from;
-              if (v.rootOps.size() == 1) {
-                posToOpId.put(entry.getKey(), v.rootOps.get(0).operatorId);
-              } else if ((v.rootOps.size() == 0 && v.vertexType == VertexType.UNION)) {
-                posToOpId.put(entry.getKey(), v.name);
-              } else {
-                Op joinRSOp = v.getJoinRSOp(vertex);
-                if (joinRSOp != null) {
-                  posToOpId.put(entry.getKey(), joinRSOp.operatorId);
-                } else {
-                  throw new Exception(
-                      "Can not find join reduceSinkOp for " + v.name + " to join " + vertex.name
-                          + " when hive explain user is trying to identify the operator id.");
-                }
-              }
-              c = connection;
-              break;
-            }
-          }
-          if (c == null) {
-            throw new Exception("Can not find " + entry.getValue()
-                + " while parsing keys of merge join operator");
-          }
-        }
-      } else {
-        posToOpId.put(vertex.tag, this.parent.operatorId);
-        for (Vertex v : vertex.mergeJoinDummyVertexs) {
-          if (v.rootOps.size() != 1) {
-            throw new Exception("Can not find a single root operators in a single vertex " + v.name
-                + " when hive explain user is trying to identify the operator id.");
-          }
-          posToOpId.put(v.tag, v.rootOps.get(0).operatorId);
-        }
-      }
-      JSONObject joinObj = opObject.getJSONObject(this.name);
-      // update the keys to use operator name
-      JSONObject keys = joinObj.getJSONObject("keys:");
-      if (keys.length() != 0) {
-        for (String key : JSONObject.getNames(keys)) {
-          if (!posToOpId.containsKey(key)) {
-            throw new Exception(
-                "Can not find the source operator on one of the branches of merge join.");
-          }
-        }
-        // inline merge join operator in a self-join
-        if (this.vertex != null) {
-          for (Vertex v : this.vertex.mergeJoinDummyVertexs) {
-            parser.addInline(this, new Connection(null, v));
-          }
-        }
-      }
-      // update the attrs
-      this.attrs.remove("keys:");
-      StringBuffer sb = new StringBuffer();
-      JSONArray conditionMap = joinObj.getJSONArray("condition map:");
-      for (int index = 0; index < conditionMap.length(); index++) {
-        JSONObject cond = conditionMap.getJSONObject(index);
-        String k = (String) cond.keys().next();
-        JSONObject condObject = new JSONObject((String)cond.get(k));
-        String type = condObject.getString("type");
-        String left = condObject.getString("left");
-        String right = condObject.getString("right");
-        if (keys.length() != 0) {
-          sb.append(posToOpId.get(left) + "." + keys.get(left) + "=" + posToOpId.get(right) + "."
-              + keys.get(right) + "(" + type + "),");
-        } else {
-          // probably a cross product
-          sb.append("(" + type + "),");
-        }
-      }
-      this.attrs.remove("condition map:");
-      this.attrs.put("Conds:", sb.substring(0, sb.length() - 1));
-    }
-  }
-
-  private String getNameWithOpIdStats() {
-    StringBuffer sb = new StringBuffer();
-    sb.append(DagJsonParserUtils.renameReduceOutputOperator(name, vertex));
-    if (operatorId != null) {
-      sb.append(" [" + operatorId + "]");
-    }
-    if (!DagJsonParserUtils.OperatorNoStats.contains(name) && attrs.containsKey("Statistics:")) {
-      sb.append(" (" + attrs.get("Statistics:") + ")");
-    }
-    attrs.remove("Statistics:");
-    return sb.toString();
-  }
-
-  /**
-   * @param printer
-   * @param indentFlag
-   * @param branchOfJoinOp
-   *          This parameter is used to show if it is a branch of a Join
-   *          operator so that we can decide the corresponding indent.
-   * @throws Exception
-   */
-  public void print(Printer printer, int indentFlag, boolean branchOfJoinOp) throws Exception {
-    // print name
-    if (parser.printSet.contains(this)) {
-      printer.println(DagJsonParser.prefixString(indentFlag) + " Please refer to the previous "
-          + this.getNameWithOpIdStats());
-      return;
-    }
-    parser.printSet.add(this);
-    if (!branchOfJoinOp) {
-      printer.println(DagJsonParser.prefixString(indentFlag) + this.getNameWithOpIdStats());
-    } else {
-      printer.println(DagJsonParser.prefixString(indentFlag, "<-") + this.getNameWithOpIdStats());
-    }
-    branchOfJoinOp = false;
-    // if this operator is a Map Join Operator or a Merge Join Operator
-    if (this.type == OpType.MAPJOIN || this.type == OpType.MERGEJOIN) {
-      inlineJoinOp();
-      branchOfJoinOp = true;
-    }
-    // if this operator is the last operator, we summarize the non-inlined
-    // vertex
-    List<Connection> noninlined = new ArrayList<>();
-    if (this.parent == null) {
-      if (this.vertex != null) {
-        for (Connection connection : this.vertex.parentConnections) {
-          if (!parser.isInline(connection.from)) {
-            noninlined.add(connection);
-          }
-        }
-      }
-    }
-    // print attr
-    indentFlag++;
-    if (!attrs.isEmpty()) {
-      printer.println(DagJsonParser.prefixString(indentFlag)
-          + DagJsonParserUtils.attrsToString(attrs));
-    }
-    // print inline vertex
-    if (parser.inlineMap.containsKey(this)) {
-      List<Connection> connections = parser.inlineMap.get(this);
-      Collections.sort(connections);
-      for (Connection connection : connections) {
-        connection.from.print(printer, indentFlag, connection.type, this.vertex);
-      }
-    }
-    // print parent op, i.e., where data comes from
-    if (this.parent != null) {
-      this.parent.print(printer, indentFlag, branchOfJoinOp);
-    }
-    // print next vertex
-    else {
-      Collections.sort(noninlined);
-      for (Connection connection : noninlined) {
-        connection.from.print(printer, indentFlag, connection.type, this.vertex);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Printer.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Printer.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Printer.java
deleted file mode 100644
index 6f040f6..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Printer.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.jsonexplain;
-
-public final class Printer {
-  public static final String lineSeparator = System.getProperty("line.separator");;
-  private final StringBuilder builder = new StringBuilder();
-
-  public void print(String string) {
-    builder.append(string);
-  }
-
-  public void println(String string) {
-    builder.append(string);
-    builder.append(lineSeparator);
-  }
-
-  public void println() {
-    builder.append(lineSeparator);
-  }
-  
-  public String toString() {
-    return builder.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Stage.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Stage.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Stage.java
deleted file mode 100644
index d21a565..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Stage.java
+++ /dev/null
@@ -1,262 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.jsonexplain;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.jsonexplain.Vertex.VertexType;
-import org.json.JSONArray;
-import org.json.JSONException;
-import org.json.JSONObject;
-
-public final class Stage {
-  //external name is used to show at the console
-  String externalName;
-  //internal name is used to track the stages
-  public final String internalName;
-  //tezJsonParser
-  public final DagJsonParser parser;
-  // upstream stages, e.g., root stage
-  public final List<Stage> parentStages = new ArrayList<>();
-  // downstream stages.
-  public final List<Stage> childStages = new ArrayList<>();
-  public final Map<String, Vertex> vertexs =new LinkedHashMap<>();
-  public final Map<String, String> attrs = new TreeMap<>();
-  Map<Vertex, List<Connection>> tezStageDependency;
-  // some stage may contain only a single operator, e.g., create table operator,
-  // fetch operator.
-  Op op;
-
-  public Stage(String name, DagJsonParser tezJsonParser) {
-    super();
-    internalName = name;
-    externalName = name;
-    parser = tezJsonParser;
-  }
-
-  public void addDependency(JSONObject object, Map<String, Stage> stages) throws JSONException {
-    if (object.has("DEPENDENT STAGES")) {
-      String names = object.getString("DEPENDENT STAGES");
-      for (String name : names.split(",")) {
-        Stage parent = stages.get(name.trim());
-        this.parentStages.add(parent);
-        parent.childStages.add(this);
-      }
-    }
-    if (object.has("CONDITIONAL CHILD TASKS")) {
-      String names = object.getString("CONDITIONAL CHILD TASKS");
-      this.externalName = this.internalName + "(CONDITIONAL CHILD TASKS: " + names + ")";
-      for (String name : names.split(",")) {
-        Stage child = stages.get(name.trim());
-        child.externalName = child.internalName + "(CONDITIONAL)";
-        child.parentStages.add(this);
-        this.childStages.add(child);
-      }
-    }
-  }
-
-  /**
-   * @param object
-   * @throws Exception
-   *           If the object of stage contains "Tez", we need to extract the
-   *           vertices and edges Else we need to directly extract operators
-   *           and/or attributes.
-   */
-  public void extractVertex(JSONObject object) throws Exception {
-    if (object.has(this.parser.getFrameworkName())) {
-      this.tezStageDependency = new TreeMap<>();
-      JSONObject tez = (JSONObject) object.get(this.parser.getFrameworkName());
-      JSONObject vertices = tez.getJSONObject("Vertices:");
-      if (tez.has("Edges:")) {
-        JSONObject edges = tez.getJSONObject("Edges:");
-        // iterate for the first time to get all the vertices
-        for (String to : JSONObject.getNames(edges)) {
-          vertexs.put(to, new Vertex(to, vertices.getJSONObject(to), parser));
-        }
-        // iterate for the second time to get all the vertex dependency
-        for (String to : JSONObject.getNames(edges)) {
-          Object o = edges.get(to);
-          Vertex v = vertexs.get(to);
-          // 1 to 1 mapping
-          if (o instanceof JSONObject) {
-            JSONObject obj = (JSONObject) o;
-            String parent = obj.getString("parent");
-            Vertex parentVertex = vertexs.get(parent);
-            if (parentVertex == null) {
-              parentVertex = new Vertex(parent, vertices.getJSONObject(parent), parser);
-              vertexs.put(parent, parentVertex);
-            }
-            String type = obj.getString("type");
-            // for union vertex, we reverse the dependency relationship
-            if (!"CONTAINS".equals(type)) {
-              v.addDependency(new Connection(type, parentVertex));
-              parentVertex.setType(type);
-              parentVertex.children.add(v);
-            } else {
-              parentVertex.addDependency(new Connection(type, v));
-              v.children.add(parentVertex);
-            }
-            this.tezStageDependency.put(v, Arrays.asList(new Connection(type, parentVertex)));
-          } else {
-            // 1 to many mapping
-            JSONArray from = (JSONArray) o;
-            List<Connection> list = new ArrayList<>();
-            for (int index = 0; index < from.length(); index++) {
-              JSONObject obj = from.getJSONObject(index);
-              String parent = obj.getString("parent");
-              Vertex parentVertex = vertexs.get(parent);
-              if (parentVertex == null) {
-                parentVertex = new Vertex(parent, vertices.getJSONObject(parent), parser);
-                vertexs.put(parent, parentVertex);
-              }
-              String type = obj.getString("type");
-              if (!"CONTAINS".equals(type)) {
-                v.addDependency(new Connection(type, parentVertex));
-                parentVertex.setType(type);
-                parentVertex.children.add(v);
-              } else {
-                parentVertex.addDependency(new Connection(type, v));
-                v.children.add(parentVertex);
-              }
-              list.add(new Connection(type, parentVertex));
-            }
-            this.tezStageDependency.put(v, list);
-          }
-        }
-      } else {
-        for (String vertexName : JSONObject.getNames(vertices)) {
-          vertexs.put(vertexName, new Vertex(vertexName, vertices.getJSONObject(vertexName), parser));
-        }
-      }
-      // The opTree in vertex is extracted
-      for (Vertex v : vertexs.values()) {
-        if (v.vertexType == VertexType.MAP || v.vertexType == VertexType.REDUCE) {
-          v.extractOpTree();
-          v.checkMultiReduceOperator();
-        }
-      }
-    } else {
-      String[] names = JSONObject.getNames(object);
-      if (names != null) {
-        for (String name : names) {
-          if (name.contains("Operator")) {
-            this.op = extractOp(name, object.getJSONObject(name));
-          } else {
-            if (!object.get(name).toString().isEmpty()) {
-              attrs.put(name, object.get(name).toString());
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * @param opName
-   * @param opObj
-   * @return
-   * @throws Exception
-   *           This method address the create table operator, fetch operator,
-   *           etc
-   */
-  Op extractOp(String opName, JSONObject opObj) throws Exception {
-    Map<String, String> attrs = new TreeMap<>();
-    Vertex v = null;
-    if (opObj.length() > 0) {
-      String[] names = JSONObject.getNames(opObj);
-      for (String name : names) {
-        Object o = opObj.get(name);
-        if (isPrintable(o) && !o.toString().isEmpty()) {
-          attrs.put(name, o.toString());
-        } else if (o instanceof JSONObject) {
-          JSONObject attrObj = (JSONObject) o;
-          if (attrObj.length() > 0) {
-            if (name.equals("Processor Tree:")) {
-              JSONObject object = new JSONObject(new LinkedHashMap<>());
-              object.put(name, attrObj);
-              v = new Vertex(null, object, parser);
-              v.extractOpTree();
-            } else {
-              for (String attrName : JSONObject.getNames(attrObj)) {
-                if (!attrObj.get(attrName).toString().isEmpty()) {
-                  attrs.put(attrName, attrObj.get(attrName).toString());
-                }
-              }
-            }
-          }
-        } else {
-          throw new Exception("Unsupported object in " + this.internalName);
-        }
-      }
-    }
-    Op op = new Op(opName, null, null, null, attrs, null, v, parser);
-    if (v != null) {
-      parser.addInline(op, new Connection(null, v));
-    }
-    return op;
-  }
-
-  private boolean isPrintable(Object val) {
-    if (val instanceof Boolean || val instanceof String || val instanceof Integer
-        || val instanceof Long || val instanceof Byte || val instanceof Float
-        || val instanceof Double || val instanceof Path) {
-      return true;
-    }
-    if (val != null && val.getClass().isPrimitive()) {
-      return true;
-    }
-    return false;
-  }
-
-  public void print(Printer printer, int indentFlag) throws Exception {
-    // print stagename
-    if (parser.printSet.contains(this)) {
-      printer.println(DagJsonParser.prefixString(indentFlag) + " Please refer to the previous "
-          + externalName);
-      return;
-    }
-    parser.printSet.add(this);
-    printer.println(DagJsonParser.prefixString(indentFlag) + externalName);
-    // print vertexes
-    indentFlag++;
-    for (Vertex candidate : this.vertexs.values()) {
-      if (!parser.isInline(candidate) && candidate.children.isEmpty()) {
-        candidate.print(printer, indentFlag, null, null);
-      }
-    }
-    if (!attrs.isEmpty()) {
-      printer.println(DagJsonParser.prefixString(indentFlag)
-          + DagJsonParserUtils.attrsToString(attrs));
-    }
-    if (op != null) {
-      op.print(printer, indentFlag, false);
-    }
-    indentFlag++;
-    // print dependent stages
-    for (Stage stage : this.parentStages) {
-      stage.print(printer, indentFlag);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
deleted file mode 100644
index c93059d..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.jsonexplain;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.hadoop.hive.common.jsonexplain.Op.OpType;
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.json.JSONArray;
-import org.json.JSONException;
-import org.json.JSONObject;
-
-public final class Vertex implements Comparable<Vertex>{
-  public final String name;
-  //tezJsonParser
-  public final DagJsonParser parser;
-  // vertex's parent connections.
-  public final List<Connection> parentConnections = new ArrayList<>();
-  // vertex's children vertex.
-  public final List<Vertex> children = new ArrayList<>();
-  // the jsonObject for this vertex
-  public final JSONObject vertexObject;
-  // whether this vertex is dummy (which does not really exists but is created),
-  // e.g., a dummy vertex for a mergejoin branch
-  public boolean dummy;
-  // the rootOps in this vertex
-  public final List<Op> rootOps = new ArrayList<>();
-  // we create a dummy vertex for a mergejoin branch for a self join if this
-  // vertex is a mergejoin
-  public final List<Vertex> mergeJoinDummyVertexs = new ArrayList<>();
-  // this vertex has multiple reduce operators
-  public int numReduceOp = 0;
-  // execution mode
-  public String executionMode = "";
-  // tagToInput for reduce work
-  public Map<String, String> tagToInput = new LinkedHashMap<>();
-  // tag
-  public String tag;
-
-  public static enum VertexType {
-    MAP, REDUCE, UNION, UNKNOWN
-  };
-  public VertexType vertexType;
-
-  public static enum EdgeType {
-    BROADCAST, SHUFFLE, MULTICAST, PARTITION_ONLY_SHUFFLE, UNKNOWN
-  };
-  public String edgeType;
-
-  public Vertex(String name, JSONObject vertexObject, DagJsonParser dagJsonParser) {
-    super();
-    this.name = name;
-    if (this.name != null) {
-      if (this.name.contains("Map")) {
-        this.vertexType = VertexType.MAP;
-      } else if (this.name.contains("Reduce")) {
-        this.vertexType = VertexType.REDUCE;
-      } else if (this.name.contains("Union")) {
-        this.vertexType = VertexType.UNION;
-      } else {
-        this.vertexType = VertexType.UNKNOWN;
-      }
-    } else {
-      this.vertexType = VertexType.UNKNOWN;
-    }
-    this.dummy = false;
-    this.vertexObject = vertexObject;
-    parser = dagJsonParser;
-  }
-
-  public void addDependency(Connection connection) throws JSONException {
-    this.parentConnections.add(connection);
-  }
-
-  /**
-   * @throws JSONException
-   * @throws JsonParseException
-   * @throws JsonMappingException
-   * @throws IOException
-   * @throws Exception
-   *           We assume that there is a single top-level Map Operator Tree or a
-   *           Reduce Operator Tree in a vertex
-   */
-  public void extractOpTree() throws JSONException, JsonParseException, JsonMappingException,
-      IOException, Exception {
-    if (vertexObject.length() != 0) {
-      for (String key : JSONObject.getNames(vertexObject)) {
-        if (key.equals("Map Operator Tree:")) {
-          extractOp(vertexObject.getJSONArray(key).getJSONObject(0));
-        } else if (key.equals("Reduce Operator Tree:") || key.equals("Processor Tree:")) {
-          extractOp(vertexObject.getJSONObject(key));
-        } else if (key.equals("Join:")) {
-          // this is the case when we have a map-side SMB join
-          // one input of the join is treated as a dummy vertex
-          JSONArray array = vertexObject.getJSONArray(key);
-          for (int index = 0; index < array.length(); index++) {
-            JSONObject mpOpTree = array.getJSONObject(index);
-            Vertex v = new Vertex(null, mpOpTree, parser);
-            v.extractOpTree();
-            v.dummy = true;
-            mergeJoinDummyVertexs.add(v);
-          }
-        } else if (key.equals("Merge File Operator")) {
-          JSONObject opTree = vertexObject.getJSONObject(key);
-          if (opTree.has("Map Operator Tree:")) {
-            extractOp(opTree.getJSONArray("Map Operator Tree:").getJSONObject(0));
-          } else {
-            throw new Exception("Merge File Operator does not have a Map Operator Tree");
-          }
-        } else if (key.equals("Execution mode:")) {
-          executionMode = " " + vertexObject.getString(key);
-        } else if (key.equals("tagToInput:")) {
-          JSONObject tagToInput = vertexObject.getJSONObject(key);
-          for (String tag : JSONObject.getNames(tagToInput)) {
-            this.tagToInput.put(tag, (String) tagToInput.get(tag));
-          }
-        } else if (key.equals("tag:")) {
-          this.tag = vertexObject.getString(key);
-        } else if (key.equals("Local Work:")) {
-          extractOp(vertexObject.getJSONObject(key));
-        } else {
-          throw new Exception("Unsupported operator tree in vertex " + this.name);
-        }
-      }
-    }
-  }
-
-  /**
-   * @param operator
-   * @param parent
-   * @return
-   * @throws JSONException
-   * @throws JsonParseException
-   * @throws JsonMappingException
-   * @throws IOException
-   * @throws Exception
-   *           assumption: each operator only has one parent but may have many
-   *           children
-   */
-  Op extractOp(JSONObject operator) throws JSONException, JsonParseException, JsonMappingException,
-      IOException, Exception {
-    String[] names = JSONObject.getNames(operator);
-    if (names.length != 1) {
-      throw new Exception("Expect only one operator in " + operator.toString());
-    } else {
-      String opName = names[0];
-      JSONObject attrObj = (JSONObject) operator.get(opName);
-      Map<String, String> attrs = new TreeMap<>();
-      List<Op> children = new ArrayList<>();
-      String id = null;
-      String outputVertexName = null;
-      if (JSONObject.getNames(attrObj) != null) {
-        for (String attrName : JSONObject.getNames(attrObj)) {
-          if (attrName.equals("children")) {
-            Object childrenObj = attrObj.get(attrName);
-            if (childrenObj instanceof JSONObject) {
-              if (((JSONObject) childrenObj).length() != 0) {
-                children.add(extractOp((JSONObject) childrenObj));
-              }
-            } else if (childrenObj instanceof JSONArray) {
-              if (((JSONArray) childrenObj).length() != 0) {
-                JSONArray array = ((JSONArray) childrenObj);
-                for (int index = 0; index < array.length(); index++) {
-                  children.add(extractOp(array.getJSONObject(index)));
-                }
-              }
-            } else {
-              throw new Exception("Unsupported operator " + this.name
-                      + "'s children operator is neither a jsonobject nor a jsonarray");
-            }
-          } else {
-            if (attrName.equals("OperatorId:")) {
-              id = attrObj.get(attrName).toString();
-            } else if (attrName.equals("outputname:")) {
-              outputVertexName = attrObj.get(attrName).toString();
-            } else {
-              if (!attrObj.get(attrName).toString().isEmpty()) {
-                attrs.put(attrName, attrObj.get(attrName).toString());
-              }
-            }
-          }
-        }
-      }
-      Op op = new Op(opName, id, outputVertexName, children, attrs, operator, this, parser);
-      if (!children.isEmpty()) {
-        for (Op child : children) {
-          child.parent = op;
-        }
-      } else {
-        this.rootOps.add(op);
-      }
-      return op;
-    }
-  }
-
-  public void print(Printer printer, int indentFlag, String type, Vertex callingVertex)
-      throws JSONException, Exception {
-    // print vertexname
-    if (parser.printSet.contains(this) && numReduceOp <= 1) {
-      if (type != null) {
-        printer.println(DagJsonParser.prefixString(indentFlag, "<-")
-            + " Please refer to the previous " + this.name + " [" + type + "]");
-      } else {
-        printer.println(DagJsonParser.prefixString(indentFlag, "<-")
-            + " Please refer to the previous " + this.name);
-      }
-      return;
-    }
-    parser.printSet.add(this);
-    if (type != null) {
-      printer.println(DagJsonParser.prefixString(indentFlag, "<-") + this.name + " [" + type + "]"
-          + this.executionMode);
-    } else if (this.name != null) {
-      printer.println(DagJsonParser.prefixString(indentFlag) + this.name + this.executionMode);
-    }
-    // print operators
-    if (numReduceOp > 1 && !(callingVertex.vertexType == VertexType.UNION)) {
-      // find the right op
-      Op choose = null;
-      for (Op op : this.rootOps) {
-        if (op.outputVertexName.equals(callingVertex.name)) {
-          choose = op;
-        }
-      }
-      if (choose != null) {
-        choose.print(printer, indentFlag, false);
-      } else {
-        throw new Exception("Can not find the right reduce output operator for vertex " + this.name);
-      }
-    } else {
-      for (Op op : this.rootOps) {
-        // dummy vertex is treated as a branch of a join operator
-        if (this.dummy) {
-          op.print(printer, indentFlag, true);
-        } else {
-          op.print(printer, indentFlag, false);
-        }
-      }
-    }
-    if (vertexType == VertexType.UNION) {
-      // print dependent vertexs
-      indentFlag++;
-      for (int index = 0; index < this.parentConnections.size(); index++) {
-        Connection connection = this.parentConnections.get(index);
-        connection.from.print(printer, indentFlag, connection.type, this);
-      }
-    }
-  }
-
-  /**
-   * We check if a vertex has multiple reduce operators.
-   */
-  public void checkMultiReduceOperator() {
-    // check if it is a reduce vertex and its children is more than 1;
-    if (this.rootOps.size() < 2) {
-      return;
-    }
-    // check if all the child ops are reduce output operators
-    for (Op op : this.rootOps) {
-      if (op.type == OpType.RS) {
-        numReduceOp++;
-      }
-    }
-  }
-
-  public void setType(String type) {
-    this.edgeType = this.parser.mapEdgeType(type);
-  }
-
-  // The following code should be gone after HIVE-11075 using topological order
-  @Override
-  public int compareTo(Vertex o) {
-    // we print the vertex that has more rs before the vertex that has fewer rs.
-    if (numReduceOp != o.numReduceOp) {
-      return -(numReduceOp - o.numReduceOp);
-    } else {
-      return this.name.compareTo(o.name);
-    }
-  }
-
-  public Op getJoinRSOp(Vertex joinVertex) {
-    if (rootOps.size() == 0) {
-      return null;
-    } else if (rootOps.size() == 1) {
-      if (rootOps.get(0).type == OpType.RS) {
-        return rootOps.get(0);
-      } else {
-        return null;
-      }
-    } else {
-      for (Op op : rootOps) {
-        if (op.type == OpType.RS) {
-          if (op.outputVertexName.equals(joinVertex.name)) {
-            return op;
-          }
-        }
-      }
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/spark/SparkJsonParser.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/spark/SparkJsonParser.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/spark/SparkJsonParser.java
deleted file mode 100644
index 9485aa4..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/spark/SparkJsonParser.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.jsonexplain.spark;
-
-import org.apache.hadoop.hive.common.jsonexplain.DagJsonParser;
-
-
-public class SparkJsonParser extends DagJsonParser {
-
-  @Override
-  public String mapEdgeType(String edgeName) {
-    return edgeName;
-  }
-
-  @Override
-  public String getFrameworkName() {
-    return "Spark";
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
new file mode 100644
index 0000000..d341cb1
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.jsonexplain.tez;
+
+public final class Connection {
+  public final String type;
+  public final Vertex from;
+
+  public Connection(String type, Vertex from) {
+    super();
+    this.type = type;
+    this.from = from;
+  }
+}


[51/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

This reverts commit 187eb760dbd7c8c345bc3613b27cadafd3cdd102.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ed64a74e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ed64a74e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ed64a74e

Branch: refs/heads/hive-14535
Commit: ed64a74e864e17c615fc8dede2a5272d3a18bcb3
Parents: 187eb76
Author: Wei Zheng <we...@apache.org>
Authored: Mon May 8 13:42:07 2017 -0700
Committer: Wei Zheng <we...@apache.org>
Committed: Mon May 8 13:42:07 2017 -0700

----------------------------------------------------------------------
 .gitignore                                      |     2 -
 .travis.yml                                     |     4 +-
 RELEASE_NOTES.txt                               |   649 +-
 accumulo-handler/pom.xml                        |    22 +-
 .../accumulo/AccumuloDefaultIndexScanner.java   |   222 -
 .../hive/accumulo/AccumuloIndexLexicoder.java   |   109 -
 .../hive/accumulo/AccumuloIndexScanner.java     |    56 -
 .../accumulo/AccumuloIndexScannerException.java |    39 -
 .../hive/accumulo/AccumuloStorageHandler.java   |   155 +-
 .../accumulo/mr/AccumuloIndexDefinition.java    |    79 -
 .../mr/AccumuloIndexedOutputFormat.java         |   334 -
 .../mr/HiveAccumuloTableOutputFormat.java       |    62 +-
 .../accumulo/mr/IndexOutputConfigurator.java    |    75 -
 .../hadoop/hive/accumulo/mr/package-info.java   |     4 -
 .../predicate/AccumuloPredicateHandler.java     |    87 +-
 .../predicate/AccumuloRangeGenerator.java       |   123 +-
 .../predicate/PrimitiveComparisonFilter.java    |    13 +-
 .../accumulo/serde/AccumuloIndexParameters.java |   100 -
 .../accumulo/serde/AccumuloSerDeParameters.java |    19 -
 .../hive/accumulo/serde/package-info.java       |     4 -
 .../TestAccumuloDefaultIndexScanner.java        |   218 -
 .../accumulo/TestAccumuloIndexLexicoder.java    |   177 -
 .../accumulo/TestAccumuloIndexParameters.java   |   112 -
 .../accumulo/TestAccumuloStorageHandler.java    |     3 -
 .../predicate/TestAccumuloPredicateHandler.java |    11 +-
 .../predicate/TestAccumuloRangeGenerator.java   |   201 +-
 .../src/test/queries/positive/accumulo_index.q  |    44 -
 .../test/results/positive/accumulo_index.q.out  |   180 -
 beeline/pom.xml                                 |     2 +-
 .../java/org/apache/hive/beeline/BeeLine.java   |    72 -
 .../org/apache/hive/beeline/BeeLineOpts.java    |    15 +-
 .../java/org/apache/hive/beeline/Commands.java  |   103 +-
 .../apache/hive/beeline/HiveSchemaHelper.java   |    12 +-
 .../org/apache/hive/beeline/HiveSchemaTool.java |    71 +-
 .../UserHS2ConnectionFileParser.java            |     2 +-
 .../logs/BeelineInPlaceUpdateStream.java        |    17 -
 beeline/src/main/resources/BeeLine.properties   |     1 -
 .../hive/beeline/TestBeelineArgParsing.java     |    12 -
 .../apache/hive/beeline/TestHiveSchemaTool.java |    17 -
 cli/pom.xml                                     |     2 +-
 .../org/apache/hadoop/hive/cli/RCFileCat.java   |    13 +-
 .../apache/hadoop/hive/cli/TestRCFileCat.java   |     4 +-
 common/pom.xml                                  |    46 +-
 .../hive/common/CopyOnFirstWriteProperties.java |   344 -
 .../apache/hadoop/hive/common/FileUtils.java    |   146 +-
 .../apache/hadoop/hive/common/JvmMetrics.java   |   187 -
 .../hadoop/hive/common/JvmMetricsInfo.java      |    65 -
 .../org/apache/hadoop/hive/common/LogUtils.java |    35 +-
 .../hadoop/hive/common/MemoryEstimate.java      |    29 -
 .../hadoop/hive/common/StatsSetupConst.java     |     2 +-
 .../hadoop/hive/common/StringInternUtils.java   |    16 +-
 .../hive/common/ValidCompactorTxnList.java      |    11 +-
 .../hadoop/hive/common/ValidReadTxnList.java    |   115 +-
 .../apache/hadoop/hive/common/ValidTxnList.java |    18 +-
 .../hive/common/jsonexplain/Connection.java     |    35 -
 .../hive/common/jsonexplain/DagJsonParser.java  |   167 -
 .../common/jsonexplain/DagJsonParserUtils.java  |    53 -
 .../common/jsonexplain/JsonParserFactory.java   |     4 -
 .../hadoop/hive/common/jsonexplain/Op.java      |   358 -
 .../hadoop/hive/common/jsonexplain/Printer.java |    41 -
 .../hadoop/hive/common/jsonexplain/Stage.java   |   262 -
 .../hadoop/hive/common/jsonexplain/Vertex.java  |   323 -
 .../jsonexplain/spark/SparkJsonParser.java      |    35 -
 .../hive/common/jsonexplain/tez/Connection.java |    30 +
 .../hadoop/hive/common/jsonexplain/tez/Op.java  |   356 +
 .../hive/common/jsonexplain/tez/Printer.java    |    41 +
 .../hive/common/jsonexplain/tez/Stage.java      |   262 +
 .../common/jsonexplain/tez/TezJsonParser.java   |   153 +-
 .../jsonexplain/tez/TezJsonParserUtils.java     |    53 +
 .../hive/common/jsonexplain/tez/Vertex.java     |   331 +
 .../hadoop/hive/common/log/InPlaceUpdate.java   |    17 -
 .../hadoop/hive/common/log/ProgressMonitor.java |    17 -
 .../metrics/metrics2/CodahaleMetrics.java       |   192 +-
 .../metrics/metrics2/CodahaleReporter.java      |    29 -
 .../metrics2/ConsoleMetricsReporter.java        |    55 -
 .../metrics/metrics2/JmxMetricsReporter.java    |    56 -
 .../metrics2/JsonFileMetricsReporter.java       |   136 -
 .../metrics/metrics2/Metrics2Reporter.java      |    62 -
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   131 +-
 .../apache/hadoop/hive/conf/HiveConfUtil.java   |    13 +-
 .../apache/hadoop/hive/ql/log/PerfLogger.java   |     1 -
 .../java/org/apache/hive/http/ConfServlet.java  |    10 +-
 .../java/org/apache/hive/http/HttpServer.java   |    69 +-
 .../hadoop/hive/common/TestFileUtils.java       |     1 +
 .../hive/common/TestValidReadTxnList.java       |    29 +-
 .../metrics/metrics2/TestCodahaleMetrics.java   |     7 +-
 .../metrics2/TestCodahaleReportersConf.java     |   145 -
 contrib/pom.xml                                 |     2 +-
 .../clientnegative/case_with_row_sequence.q.out |    16 +-
 data/files/e011_01.txt                          |     4 -
 data/files/events.txt                           |   200 -
 .../metastore_export/csv/TABLE_PARAMS.txt       |   143 +
 .../metastore_export/csv/TAB_COL_STATS.txt      |   425 +
 data/files/vector_ptf_part_simple.txt           |    40 -
 docs/changes/ChangesFancyStyle.css              |   170 +
 docs/changes/ChangesSimpleStyle.css             |    49 +
 docs/changes/changes2html.pl                    |   282 +
 docs/site.css                                   |   305 +
 docs/stylesheets/project.xml                    |    41 +
 docs/stylesheets/site.vsl                       |   317 +
 docs/velocity.properties                        |    17 +
 docs/xdocs/index.xml                            |    38 +
 docs/xdocs/language_manual/cli.xml              |   208 +
 .../data-manipulation-statements.xml            |   234 +
 docs/xdocs/language_manual/joins.xml            |   212 +
 docs/xdocs/language_manual/var_substitution.xml |   130 +
 .../working_with_bucketed_tables.xml            |    87 +
 docs/xdocs/udf/reflect.xml                      |    51 +
 druid-handler/pom.xml                           |    18 +-
 .../hadoop/hive/druid/DruidStorageHandler.java  |    27 +-
 .../hive/druid/DruidStorageHandlerUtils.java    |    72 +-
 .../hadoop/hive/druid/io/DruidOutputFormat.java |    13 +-
 .../druid/io/DruidQueryBasedInputFormat.java    |    12 +-
 .../serde/DruidGroupByQueryRecordReader.java    |     8 +-
 .../hive/druid/TestDruidStorageHandler.java     |    78 +-
 .../hive/ql/io/TestDruidRecordWriter.java       |     8 +-
 errata.txt                                      |     4 -
 hbase-handler/pom.xml                           |     2 +-
 .../src/test/queries/negative/hbase_ddl.q       |     9 -
 .../src/test/queries/positive/hbase_ddl.q       |    20 -
 .../src/test/queries/positive/hbase_queries.q   |     1 -
 .../src/test/results/negative/hbase_ddl.q.out   |    29 -
 .../src/test/results/positive/hbase_ddl.q.out   |   186 -
 hcatalog/build.properties                       |     2 +-
 hcatalog/core/pom.xml                           |    12 +-
 .../apache/hive/hcatalog/cli/TestPermsGrp.java  |     6 +-
 hcatalog/hcatalog-pig-adapter/pom.xml           |     2 +-
 hcatalog/pom.xml                                |    24 +-
 hcatalog/server-extensions/pom.xml              |     2 +-
 .../listener/DbNotificationListener.java        |    85 +-
 .../MetaStoreEventListenerConstants.java        |    33 -
 hcatalog/streaming/pom.xml                      |     2 +-
 .../hcatalog/streaming/StrictRegexWriter.java   |   188 -
 .../hive/hcatalog/streaming/TestStreaming.java  |   115 +-
 hcatalog/webhcat/java-client/pom.xml            |     2 +-
 .../hive/hcatalog/api/TestHCatClient.java       |     2 +-
 hcatalog/webhcat/svr/pom.xml                    |    76 +-
 .../hive/hcatalog/templeton/AppConfig.java      |    37 -
 .../hcatalog/templeton/DeleteDelegator.java     |     6 +-
 .../hive/hcatalog/templeton/HiveDelegator.java  |     2 +-
 .../hive/hcatalog/templeton/JarDelegator.java   |     2 +-
 .../hive/hcatalog/templeton/JobCallable.java    |   115 -
 .../hcatalog/templeton/JobRequestExecutor.java  |   341 -
 .../hcatalog/templeton/LauncherDelegator.java   |   231 +-
 .../hive/hcatalog/templeton/ListDelegator.java  |   148 +-
 .../apache/hive/hcatalog/templeton/Main.java    |    37 +-
 .../hive/hcatalog/templeton/PigDelegator.java   |     2 +-
 .../hcatalog/templeton/SecureProxySupport.java  |     3 -
 .../apache/hive/hcatalog/templeton/Server.java  |    82 +-
 .../hive/hcatalog/templeton/SqoopDelegator.java |     2 +-
 .../hcatalog/templeton/StatusDelegator.java     |    69 +-
 .../hcatalog/templeton/StreamingDelegator.java  |     2 +-
 .../templeton/TooManyRequestsException.java     |    35 -
 .../templeton/tool/TempletonControllerJob.java  |    11 +-
 .../hcatalog/templeton/tool/TempletonUtils.java |     1 -
 .../ConcurrentJobRequestsTestBase.java          |   231 -
 .../templeton/MockAnswerTestHelper.java         |    56 -
 .../templeton/TestConcurrentJobRequests.java    |    79 -
 .../TestConcurrentJobRequestsThreads.java       |   134 -
 ...tConcurrentJobRequestsThreadsAndTimeout.java |   374 -
 hplsql/pom.xml                                  |     2 +-
 .../main/java/org/apache/hive/hplsql/Udf.java   |    26 +-
 .../org/apache/hive/hplsql/TestHplsqlUdf.java   |    59 -
 itests/custom-serde/pom.xml                     |     2 +-
 itests/custom-udfs/pom.xml                      |     2 +-
 itests/custom-udfs/udf-classloader-udf1/pom.xml |     2 +-
 itests/custom-udfs/udf-classloader-udf2/pom.xml |     2 +-
 itests/custom-udfs/udf-classloader-util/pom.xml |     2 +-
 .../udf-vectorized-badexample/pom.xml           |     2 +-
 itests/hcatalog-unit/pom.xml                    |     2 +-
 .../listener/DummyRawStoreFailEvent.java        |     7 -
 .../listener/TestDbNotificationListener.java    |   190 -
 itests/hive-blobstore/pom.xml                   |     2 +-
 ...import_addpartition_blobstore_to_blobstore.q |    45 -
 .../import_addpartition_blobstore_to_local.q    |    44 -
 ...import_addpartition_blobstore_to_warehouse.q |    41 -
 .../import_addpartition_local_to_blobstore.q    |    44 -
 .../import_blobstore_to_blobstore.q             |    30 -
 .../import_blobstore_to_blobstore_nonpart.q     |    25 -
 .../clientpositive/import_blobstore_to_local.q  |    30 -
 .../import_blobstore_to_warehouse.q             |    28 -
 .../import_blobstore_to_warehouse_nonpart.q     |    23 -
 .../clientpositive/import_local_to_blobstore.q  |    31 -
 .../insert_blobstore_to_blobstore.q             |    29 -
 .../insert_empty_into_blobstore.q               |    53 -
 .../test/queries/clientpositive/orc_buckets.q   |    31 -
 .../queries/clientpositive/orc_format_nonpart.q |    30 -
 .../queries/clientpositive/orc_format_part.q    |    67 -
 .../clientpositive/orc_nonstd_partitions_loc.q  |   100 -
 .../queries/clientpositive/rcfile_buckets.q     |    31 -
 .../clientpositive/rcfile_format_nonpart.q      |    30 -
 .../queries/clientpositive/rcfile_format_part.q |    67 -
 .../rcfile_nonstd_partitions_loc.q              |   100 -
 .../clientpositive/zero_rows_blobstore.q        |    19 -
 .../queries/clientpositive/zero_rows_hdfs.q     |    18 -
 .../src/test/resources/hive-site.xml            |     5 -
 ...rt_addpartition_blobstore_to_blobstore.q.out |   283 -
 ...import_addpartition_blobstore_to_local.q.out |   283 -
 ...rt_addpartition_blobstore_to_warehouse.q.out |   271 -
 ...import_addpartition_local_to_blobstore.q.out |   277 -
 .../import_blobstore_to_blobstore.q.out         |   161 -
 .../import_blobstore_to_blobstore_nonpart.q.out |   103 -
 .../import_blobstore_to_local.q.out             |   161 -
 .../import_blobstore_to_warehouse.q.out         |   157 -
 .../import_blobstore_to_warehouse_nonpart.q.out |    99 -
 .../import_local_to_blobstore.q.out             |   159 -
 .../insert_blobstore_to_blobstore.q.out         |   110 -
 .../insert_empty_into_blobstore.q.out           |   155 -
 .../clientpositive/insert_into_table.q.out      |    27 -
 .../results/clientpositive/orc_buckets.q.out    |   183 -
 .../clientpositive/orc_format_nonpart.q.out     |   195 -
 .../clientpositive/orc_format_part.q.out        |   274 -
 .../orc_nonstd_partitions_loc.q.out             |   513 -
 .../results/clientpositive/rcfile_buckets.q.out |   183 -
 .../clientpositive/rcfile_format_nonpart.q.out  |   195 -
 .../clientpositive/rcfile_format_part.q.out     |   274 -
 .../rcfile_nonstd_partitions_loc.q.out          |   533 -
 .../write_final_output_blobstore.q.out          |    20 -
 .../clientpositive/zero_rows_blobstore.q.out    |    91 -
 .../results/clientpositive/zero_rows_hdfs.q.out |    89 -
 itests/hive-jmh/pom.xml                         |     2 +-
 itests/hive-minikdc/pom.xml                     |     2 +-
 .../apache/hive/minikdc/TestSSLWithMiniKdc.java |   101 -
 itests/hive-unit-hadoop2/pom.xml                |     2 +-
 .../hive/ql/security/TestExtendedAcls.java      |   166 +
 ...edMetastoreAuthorizationProviderWithACL.java |     1 +
 itests/hive-unit/pom.xml                        |     2 +-
 .../java/org/hadoop/hive/jdbc/SSLTestUtils.java |   103 -
 .../metastore/TestEmbeddedHiveMetaStore.java    |     2 +
 .../hive/metastore/TestHiveMetaStore.java       |    45 +-
 .../hive/metastore/TestHiveMetaStoreTxns.java   |     8 +-
 .../hive/metastore/TestMetastoreVersion.java    |     4 +-
 .../hive/metastore/TestReplChangeManager.java   |    12 +-
 .../hadoop/hive/ql/TestAutoPurgeTables.java     |   436 -
 ...estDDLWithRemoteMetastoreSecondNamenode.java |    31 -
 .../hadoop/hive/ql/TestLocationQueries.java     |     8 +-
 .../ql/TestMetaStoreLimitPartitionRequest.java  |   319 -
 .../hive/ql/TestReplicationScenarios.java       |   824 +-
 .../hive/ql/security/FolderPermissionBase.java  |   792 +
 .../hive/ql/security/TestFolderPermissions.java |    52 +
 ...StorageBasedMetastoreAuthorizationDrops.java |   205 +
 ...StorageBasedMetastoreAuthorizationReads.java |   127 +
 .../hive/ql/txn/compactor/TestCompactor.java    |    90 +-
 .../hive/beeline/TestBeeLineWithArgs.java       |    13 -
 .../org/apache/hive/beeline/TestSchemaTool.java |     9 +-
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   |    48 +-
 .../apache/hive/jdbc/TestJdbcWithMiniHS2.java   |    44 -
 .../test/java/org/apache/hive/jdbc/TestSSL.java |   150 +-
 .../operation/TestOperationLoggingLayout.java   |    16 +
 itests/pom.xml                                  |     2 +-
 itests/qtest-accumulo/pom.xml                   |     2 +-
 itests/qtest-spark/pom.xml                      |     2 +-
 itests/qtest/pom.xml                            |     2 +-
 .../hadoop/hive/cli/TestBeeLineDriver.java      |     4 +-
 .../test/resources/testconfiguration.properties |    44 +-
 itests/test-serde/pom.xml                       |     2 +-
 itests/util/pom.xml                             |     2 +-
 .../control/AbstractCoreBlobstoreCliDriver.java |    11 +-
 .../hadoop/hive/cli/control/CliConfigs.java     |     1 -
 .../hive/cli/control/CoreAccumuloCliDriver.java |    10 +-
 .../hive/cli/control/CoreBeeLineDriver.java     |    55 +-
 .../hadoop/hive/cli/control/CoreCliDriver.java  |    12 +-
 .../hive/cli/control/CoreCompareCliDriver.java  |    12 +-
 .../hive/cli/control/CoreHBaseCliDriver.java    |     9 +-
 .../cli/control/CoreHBaseNegativeCliDriver.java |     9 +-
 .../hive/cli/control/CoreNegativeCliDriver.java |    12 +-
 .../hive/cli/control/CorePerfCliDriver.java     |    12 +-
 .../hadoop/hive/ql/QTestProcessExecResult.java  |     6 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |   260 +-
 .../hadoop/hive/ql/parse/CoreParseNegative.java |    19 +-
 .../org/apache/hive/beeline/Parallelized.java   |    64 -
 .../java/org/apache/hive/beeline/QFile.java     |   333 -
 .../apache/hive/beeline/QFileBeeLineClient.java |   156 -
 .../org/apache/hive/beeline/package-info.java   |    22 -
 .../org/apache/hive/beeline/qfile/QFile.java    |   273 +
 .../hive/beeline/qfile/QFileBeeLineClient.java  |   149 +
 .../apache/hive/beeline/qfile/package-info.java |    22 +
 jdbc-handler/pom.xml                            |     2 +-
 jdbc/pom.xml                                    |    28 +-
 .../org/apache/hive/jdbc/HiveConnection.java    |    24 +-
 .../org/apache/hive/jdbc/HiveStatement.java     |     6 +-
 .../hive/jdbc/logs/InPlaceUpdateStream.java     |    17 -
 llap-client/pom.xml                             |     2 +-
 .../apache/hadoop/hive/llap/io/api/LlapIo.java  |     1 -
 .../llap/registry/impl/LlapRegistryService.java |     5 +-
 llap-common/pom.xml                             |     2 +-
 .../apache/hadoop/hive/llap/LlapDaemonInfo.java |    92 -
 llap-ext-client/pom.xml                         |     2 +-
 llap-server/bin/runLlapDaemon.sh                |     4 +-
 llap-server/pom.xml                             |    10 +-
 .../llap/IncrementalObjectSizeEstimator.java    |     4 +-
 .../hadoop/hive/llap/cache/BuddyAllocator.java  |   181 +-
 .../hive/llap/cache/EvictionDispatcher.java     |    25 +-
 .../hive/llap/cache/LlapOomDebugDump.java       |     1 -
 .../hadoop/hive/llap/cache/LowLevelCache.java   |     2 +-
 .../hive/llap/cache/LowLevelCacheImpl.java      |    39 -
 .../llap/cache/LowLevelCacheMemoryManager.java  |    39 +-
 .../hive/llap/cache/LowLevelCachePolicy.java    |     2 +-
 .../llap/cache/LowLevelFifoCachePolicy.java     |    26 +-
 .../llap/cache/LowLevelLrfuCachePolicy.java     |    41 +-
 .../hadoop/hive/llap/cache/MemoryManager.java   |     4 +-
 .../hive/llap/cache/SerDeLowLevelCacheImpl.java |    78 +-
 .../hadoop/hive/llap/cache/SimpleAllocator.java |     5 +-
 .../hive/llap/cache/SimpleBufferManager.java    |    10 -
 .../hadoop/hive/llap/cli/LlapServiceDriver.java |     6 +-
 .../hadoop/hive/llap/cli/LlapSliderUtils.java   |    58 -
 .../llap/cli/LlapStatusOptionsProcessor.java    |     1 -
 .../hive/llap/cli/LlapStatusServiceDriver.java  |   751 +-
 .../hive/llap/cli/status/LlapStatusHelpers.java |   449 -
 .../configuration/LlapDaemonConfiguration.java  |     2 +-
 .../llap/daemon/impl/ContainerRunnerImpl.java   |     4 -
 .../impl/EvictingPriorityBlockingQueue.java     |     5 -
 .../hive/llap/daemon/impl/LlapDaemon.java       |    88 +-
 .../hive/llap/daemon/impl/LlapDaemonMXBean.java |     6 -
 .../hadoop/hive/llap/daemon/impl/Scheduler.java |     2 -
 .../llap/daemon/impl/TaskExecutorService.java   |    21 +-
 .../llap/daemon/impl/TaskRunnerCallable.java    |     1 -
 .../services/impl/LlapIoMemoryServlet.java      |    88 -
 .../daemon/services/impl/LlapWebServices.java   |     1 -
 .../hive/llap/io/api/impl/LlapIoImpl.java       |    60 +-
 .../llap/io/decode/OrcColumnVectorProducer.java |     6 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |    39 +-
 .../llap/io/encoded/SerDeEncodedDataReader.java |    22 +-
 .../hive/llap/io/metadata/OrcMetadataCache.java |    24 +-
 .../llap/metrics/LlapDaemonExecutorMetrics.java |     2 +-
 .../llap/shufflehandler/ShuffleHandler.java     |    37 +-
 .../resources/hive-webapps/llap/js/metrics.js   |     6 +-
 .../main/resources/llap-cli-log4j2.properties   |    25 +-
 .../resources/llap-daemon-log4j2.properties     |     6 +-
 .../hive/llap/cache/TestBuddyAllocator.java     |    12 +-
 .../hive/llap/cache/TestLowLevelCacheImpl.java  |     8 +-
 .../llap/cache/TestLowLevelLrfuCachePolicy.java |    19 +-
 .../hive/llap/cache/TestOrcMetadataCache.java   |    16 +-
 .../hive/llap/daemon/MiniLlapCluster.java       |     5 -
 llap-tez/pom.xml                                |     2 +-
 .../metrics/LlapTaskSchedulerMetrics.java       |     2 +-
 metastore/if/hive_metastore.thrift              |    10 +-
 metastore/pom.xml                               |    24 +-
 .../upgrade/derby/022-HIVE-11107.derby.sql      |     4 +-
 .../upgrade/derby/039-HIVE-12274.derby.sql      |    32 -
 .../upgrade/derby/040-HIVE-16399.derby.sql      |     1 -
 .../upgrade/derby/hive-schema-2.2.0.derby.sql   |    20 +-
 .../upgrade/derby/hive-schema-2.3.0.derby.sql   |   340 -
 .../upgrade/derby/hive-schema-3.0.0.derby.sql   |   340 -
 .../derby/hive-txn-schema-2.2.0.derby.sql       |     2 +-
 .../derby/hive-txn-schema-2.3.0.derby.sql       |   134 -
 .../derby/hive-txn-schema-3.0.0.derby.sql       |   134 -
 .../derby/upgrade-2.1.0-to-2.2.0.derby.sql      |     1 -
 .../derby/upgrade-2.2.0-to-2.3.0.derby.sql      |     4 -
 .../derby/upgrade-2.3.0-to-3.0.0.derby.sql      |     3 -
 .../scripts/upgrade/derby/upgrade.order.derby   |     2 -
 .../upgrade/mssql/024-HIVE-12274.mssql.sql      |    18 -
 .../upgrade/mssql/025-HIVE-16399.mssql.sql      |     1 -
 .../upgrade/mssql/hive-schema-2.2.0.mssql.sql   |    30 +-
 .../upgrade/mssql/hive-schema-2.3.0.mssql.sql   |  1023 -
 .../upgrade/mssql/hive-schema-3.0.0.mssql.sql   |  1023 -
 .../mssql/hive-txn-schema-0.14.0.mssql.sql      |     2 +-
 .../mssql/upgrade-2.1.0-to-2.2.0.mssql.sql      |     1 -
 .../mssql/upgrade-2.2.0-to-2.3.0.mssql.sql      |     6 -
 .../mssql/upgrade-2.3.0-to-3.0.0.mssql.sql      |     4 -
 .../scripts/upgrade/mssql/upgrade.order.mssql   |     2 -
 .../upgrade/mysql/039-HIVE-12274.mysql.sql      |    18 -
 .../upgrade/mysql/040-HIVE-16399.mysql.sql      |     1 -
 .../upgrade/mysql/hive-schema-2.2.0.mysql.sql   |    28 +-
 .../upgrade/mysql/hive-schema-2.3.0.mysql.sql   |   853 -
 .../upgrade/mysql/hive-schema-3.0.0.mysql.sql   |   853 -
 .../mysql/hive-txn-schema-2.2.0.mysql.sql       |     2 +-
 .../mysql/hive-txn-schema-2.3.0.mysql.sql       |   135 -
 .../mysql/hive-txn-schema-3.0.0.mysql.sql       |   135 -
 .../mysql/upgrade-2.1.0-to-2.2.0.mysql.sql      |     1 -
 .../mysql/upgrade-2.2.0-to-2.3.0.mysql.sql      |     7 -
 .../mysql/upgrade-2.3.0-to-3.0.0.mysql.sql      |     5 -
 .../scripts/upgrade/mysql/upgrade.order.mysql   |     2 -
 .../upgrade/oracle/039-HIVE-12274.oracle.sql    |    21 -
 .../upgrade/oracle/040-HIVE-16399.oracle.sql    |     1 -
 .../upgrade/oracle/hive-schema-2.2.0.oracle.sql |    30 +-
 .../upgrade/oracle/hive-schema-2.3.0.oracle.sql |   811 -
 .../upgrade/oracle/hive-schema-3.0.0.oracle.sql |   811 -
 .../oracle/hive-txn-schema-2.2.0.oracle.sql     |     2 +-
 .../oracle/hive-txn-schema-2.3.0.oracle.sql     |   133 -
 .../oracle/hive-txn-schema-3.0.0.oracle.sql     |   133 -
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |     1 -
 .../oracle/upgrade-2.2.0-to-2.3.0.oracle.sql    |     6 -
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |     4 -
 .../scripts/upgrade/oracle/upgrade.order.oracle |     2 -
 .../postgres/038-HIVE-12274.postgres.sql        |    18 -
 .../postgres/039-HIVE-16399.postgres.sql        |     1 -
 .../postgres/hive-schema-2.2.0.postgres.sql     |    30 +-
 .../postgres/hive-schema-2.3.0.postgres.sql     |  1478 -
 .../postgres/hive-schema-3.0.0.postgres.sql     |  1478 -
 .../postgres/hive-txn-schema-2.2.0.postgres.sql |     2 +-
 .../postgres/hive-txn-schema-2.3.0.postgres.sql |   133 -
 .../postgres/hive-txn-schema-3.0.0.postgres.sql |   133 -
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |     1 -
 .../upgrade-2.2.0-to-2.3.0.postgres.sql         |     7 -
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |     5 -
 .../upgrade/postgres/upgrade.order.postgres     |     2 -
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |  2499 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |   139 -
 .../ThriftHiveMetastore_server.skeleton.cpp     |     5 -
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  1550 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |    23 +-
 .../hive/metastore/api/GetOpenTxnsResponse.java |   150 +-
 .../metastore/api/InsertEventRequestData.java   |   127 +-
 .../hive/metastore/api/ThriftHiveMetastore.java |  3116 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   |  1534 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |   584 +-
 .../hive_metastore/ThriftHiveMetastore-remote   |     7 -
 .../hive_metastore/ThriftHiveMetastore.py       |   948 +-
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |    60 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |    13 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |    62 -
 .../hadoop/hive/metastore/HiveAlterHandler.java |   444 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   803 +-
 .../hive/metastore/HiveMetaStoreClient.java     |    68 +-
 .../hive/metastore/HiveMetaStoreFsImpl.java     |    21 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |    14 -
 .../hive/metastore/MetaStoreDirectSql.java      |   104 +-
 .../hive/metastore/MetaStoreEventListener.java  |    12 +-
 .../metastore/MetaStoreListenerNotifier.java    |   224 -
 .../hive/metastore/MetaStoreSchemaInfo.java     |    16 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |    28 +-
 .../hadoop/hive/metastore/ObjectStore.java      |   836 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |    12 -
 .../hive/metastore/StatObjectConverter.java     |    42 +-
 .../apache/hadoop/hive/metastore/Warehouse.java |    81 +-
 .../hive/metastore/cache/ByteArrayWrapper.java  |    45 -
 .../hadoop/hive/metastore/cache/CacheUtils.java |   113 -
 .../hive/metastore/cache/CachedStore.java       |  1622 -
 .../hive/metastore/cache/SharedCache.java       |   356 -
 .../metastore/events/AlterPartitionEvent.java   |    14 +-
 .../hive/metastore/events/AlterTableEvent.java  |    12 +-
 .../hive/metastore/events/InsertEvent.java      |    11 -
 .../hive/metastore/events/ListenerEvent.java    |   106 -
 .../hadoop/hive/metastore/hbase/HBaseStore.java |    29 -
 .../hadoop/hive/metastore/hbase/HBaseUtils.java |     2 +-
 .../messaging/AlterPartitionMessage.java        |     2 -
 .../metastore/messaging/AlterTableMessage.java  |     2 -
 .../hive/metastore/messaging/EventUtils.java    |    87 +-
 .../hive/metastore/messaging/InsertMessage.java |     6 -
 .../metastore/messaging/MessageFactory.java     |     9 +-
 .../metastore/messaging/PartitionFiles.java     |     3 -
 .../messaging/event/filters/AndFilter.java      |    39 -
 .../messaging/event/filters/BasicFilter.java    |    33 -
 .../event/filters/DatabaseAndTableFilter.java   |    52 -
 .../event/filters/EventBoundaryFilter.java      |    34 -
 .../event/filters/MessageFormatFilter.java      |    36 -
 .../json/JSONAlterPartitionMessage.java         |     9 +-
 .../messaging/json/JSONAlterTableMessage.java   |     9 +-
 .../messaging/json/JSONInsertMessage.java       |     9 +-
 .../messaging/json/JSONMessageDeserializer.java |     4 -
 .../messaging/json/JSONMessageFactory.java      |    18 +-
 .../hive/metastore/parser/ExpressionTree.java   |     2 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   115 +-
 .../hadoop/hive/metastore/txn/TxnUtils.java     |    17 +-
 metastore/src/model/package.jdo                 |    36 +-
 .../DummyRawStoreControlledCommit.java          |     8 -
 .../DummyRawStoreForJdoConnection.java          |     8 -
 .../metastore/TestHiveMetaStoreTimeout.java     |     1 +
 .../hadoop/hive/metastore/TestObjectStore.java  |    68 -
 .../hive/metastore/VerifyingObjectStore.java    |     2 +-
 .../hive/metastore/cache/TestCachedStore.java   |   238 -
 .../json/JSONMessageDeserializerTest.java       |   106 -
 .../txn/TestValidCompactorTxnList.java          |    63 +-
 packaging/pom.xml                               |     2 +-
 packaging/src/main/assembly/src.xml             |     2 +-
 pom.xml                                         |   126 +-
 ql/pom.xml                                      |    25 +-
 .../UDAFTemplates/VectorUDAFAvg.txt             |     2 +-
 .../UDAFTemplates/VectorUDAFMinMax.txt          |     2 +-
 .../UDAFTemplates/VectorUDAFMinMaxDecimal.txt   |     2 +-
 .../VectorUDAFMinMaxIntervalDayTime.txt         |     2 +-
 .../UDAFTemplates/VectorUDAFMinMaxString.txt    |     4 +-
 .../UDAFTemplates/VectorUDAFMinMaxTimestamp.txt |     2 +-
 .../UDAFTemplates/VectorUDAFSum.txt             |     2 +-
 .../UDAFTemplates/VectorUDAFVar.txt             |     2 +-
 .../UDAFTemplates/VectorUDAFVarDecimal.txt      |     4 +-
 .../java/org/apache/hadoop/hive/ql/Context.java |    11 +-
 .../java/org/apache/hadoop/hive/ql/Driver.java  |   425 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |    21 +-
 .../hadoop/hive/ql/QueryLifeTimeHookRunner.java |   186 -
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |    28 +-
 .../hadoop/hive/ql/exec/ArchiveUtils.java       |     7 +-
 .../apache/hadoop/hive/ql/exec/ColumnInfo.java  |     7 +-
 .../hadoop/hive/ql/exec/ColumnStatsTask.java    |    32 +-
 .../apache/hadoop/hive/ql/exec/CopyTask.java    |     3 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   106 +-
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |    11 +-
 .../exec/ExprNodeConstantDefaultEvaluator.java  |    55 +
 .../hive/ql/exec/ExprNodeEvaluatorFactory.java  |     6 +
 .../hadoop/hive/ql/exec/FetchOperator.java      |    16 -
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |    16 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |    37 +-
 .../hadoop/hive/ql/exec/GroupByOperator.java    |     5 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |     7 +-
 .../hadoop/hive/ql/exec/OperatorFactory.java    |    20 +-
 .../hadoop/hive/ql/exec/ReplCopyTask.java       |     3 +-
 .../hive/ql/exec/SerializationUtilities.java    |    30 -
 .../hadoop/hive/ql/exec/StatsNoJobTask.java     |    15 +-
 .../apache/hadoop/hive/ql/exec/TaskRunner.java  |     7 +
 .../apache/hadoop/hive/ql/exec/TopNHash.java    |     5 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |    34 +-
 .../mapjoin/MapJoinMemoryExhaustionError.java   |    28 -
 .../MapJoinMemoryExhaustionException.java       |    29 +
 .../mapjoin/MapJoinMemoryExhaustionHandler.java |     6 +-
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |    44 +-
 .../hadoop/hive/ql/exec/mr/MapredLocalTask.java |    19 +-
 .../persistence/BytesBytesMultiHashMap.java     |    17 +-
 .../ql/exec/persistence/HashMapWrapper.java     |    10 +-
 .../persistence/HybridHashTableContainer.java   |     5 -
 .../persistence/MapJoinBytesTableContainer.java |    58 +-
 .../exec/persistence/MapJoinTableContainer.java |     3 +-
 .../ql/exec/spark/RemoteHiveSparkClient.java    |     9 +-
 .../hive/ql/exec/spark/SparkPlanGenerator.java  |    23 +-
 .../ql/exec/spark/SparkReduceRecordHandler.java |     6 +-
 .../hadoop/hive/ql/exec/spark/SparkTask.java    |    27 +-
 .../hive/ql/exec/spark/SparkUtilities.java      |     4 +-
 .../spark/status/RemoteSparkJobMonitor.java     |    15 +-
 .../ql/exec/spark/status/SparkJobMonitor.java   |    10 +-
 .../spark/status/impl/RemoteSparkJobStatus.java |     6 -
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |    38 +-
 .../hive/ql/exec/tez/HashTableLoader.java       |    42 +-
 .../hive/ql/exec/tez/ReduceRecordSource.java    |    11 +-
 .../hadoop/hive/ql/exec/tez/TezProcessor.java   |    11 +-
 .../hive/ql/exec/tez/TezSessionPoolManager.java |    21 +-
 .../hive/ql/exec/tez/TezSessionState.java       |     4 -
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |    18 +-
 .../hive/ql/exec/tez/monitoring/Constants.java  |    17 -
 .../hive/ql/exec/tez/monitoring/DAGSummary.java |    72 +-
 .../exec/tez/monitoring/FSCountersSummary.java  |    17 -
 .../ql/exec/tez/monitoring/LLAPioSummary.java   |    17 -
 .../ql/exec/tez/monitoring/PrintSummary.java    |    17 -
 .../QueryExecutionBreakdownSummary.java         |    17 -
 .../ql/exec/tez/monitoring/RenderStrategy.java  |    21 +-
 .../exec/tez/monitoring/TezProgressMonitor.java |    17 -
 .../vector/VectorAggregationBufferBatch.java    |     4 +-
 .../ql/exec/vector/VectorColumnSetInfo.java     |   158 +-
 .../hive/ql/exec/vector/VectorExtractRow.java   |    19 -
 .../ql/exec/vector/VectorGroupByOperator.java   |   164 +-
 .../ql/exec/vector/VectorGroupKeyHelper.java    |    57 +-
 .../ql/exec/vector/VectorHashKeyWrapper.java    |   244 +-
 .../exec/vector/VectorHashKeyWrapperBatch.java  |   456 +-
 .../ql/exec/vector/VectorMapJoinOperator.java   |     3 -
 .../exec/vector/VectorSMBMapJoinOperator.java   |     3 -
 .../ql/exec/vector/VectorizationContext.java    |    52 +-
 .../vector/expressions/CastStringToLong.java    |   271 -
 .../exec/vector/expressions/CuckooSetBytes.java |     4 +-
 .../ql/exec/vector/expressions/OctetLength.java |   149 -
 .../aggregates/VectorAggregateExpression.java   |     2 +-
 .../aggregates/VectorUDAFAvgDecimal.java        |     2 +-
 .../aggregates/VectorUDAFAvgTimestamp.java      |     2 +-
 .../aggregates/VectorUDAFBloomFilter.java       |     4 +-
 .../aggregates/VectorUDAFBloomFilterMerge.java  |     2 +-
 .../expressions/aggregates/VectorUDAFCount.java |     2 +-
 .../aggregates/VectorUDAFCountMerge.java        |     2 +-
 .../aggregates/VectorUDAFCountStar.java         |     2 +-
 .../aggregates/VectorUDAFStdPopTimestamp.java   |     2 +-
 .../aggregates/VectorUDAFStdSampTimestamp.java  |     2 +-
 .../aggregates/VectorUDAFSumDecimal.java        |     2 +-
 .../aggregates/VectorUDAFVarPopTimestamp.java   |     2 +-
 .../aggregates/VectorUDAFVarSampTimestamp.java  |     2 +-
 .../VectorMapJoinGenerateResultOperator.java    |     2 -
 .../fast/VectorMapJoinFastBytesHashMap.java     |     5 -
 .../VectorMapJoinFastBytesHashMultiSet.java     |     5 -
 .../fast/VectorMapJoinFastBytesHashSet.java     |     5 -
 .../fast/VectorMapJoinFastBytesHashTable.java   |     6 -
 .../fast/VectorMapJoinFastHashTable.java        |    13 +-
 .../fast/VectorMapJoinFastHashTableLoader.java  |    47 +-
 .../mapjoin/fast/VectorMapJoinFastKeyStore.java |    11 +-
 .../fast/VectorMapJoinFastLongHashMap.java      |     9 +-
 .../fast/VectorMapJoinFastLongHashMultiSet.java |     5 -
 .../fast/VectorMapJoinFastLongHashSet.java      |     5 -
 .../fast/VectorMapJoinFastLongHashTable.java    |    15 -
 .../fast/VectorMapJoinFastMultiKeyHashMap.java  |     5 -
 .../VectorMapJoinFastMultiKeyHashMultiSet.java  |     4 -
 .../fast/VectorMapJoinFastMultiKeyHashSet.java  |     5 +-
 .../fast/VectorMapJoinFastStringHashMap.java    |     9 -
 .../VectorMapJoinFastStringHashMultiSet.java    |     8 -
 .../fast/VectorMapJoinFastStringHashSet.java    |     8 -
 .../fast/VectorMapJoinFastTableContainer.java   |    16 +-
 .../fast/VectorMapJoinFastValueStore.java       |     8 +-
 .../hashtable/VectorMapJoinHashTable.java       |     3 +-
 .../VectorMapJoinOptimizedHashSet.java          |     5 -
 .../VectorMapJoinOptimizedHashTable.java        |     9 -
 .../VectorMapJoinOptimizedStringHashSet.java    |     8 -
 .../VectorReduceSinkCommonOperator.java         |   173 +-
 .../VectorReduceSinkLongOperator.java           |     2 +-
 .../VectorReduceSinkMultiKeyOperator.java       |     2 +-
 .../VectorReduceSinkObjectHashOperator.java     |   289 -
 .../VectorReduceSinkStringOperator.java         |     2 +-
 .../VectorReduceSinkUniformHashOperator.java    |   218 -
 .../hadoop/hive/ql/history/HiveHistoryImpl.java |     5 +-
 .../apache/hadoop/hive/ql/hooks/HookUtils.java  |    52 +-
 .../hadoop/hive/ql/hooks/HooksLoader.java       |   107 -
 .../hadoop/hive/ql/hooks/LineageInfo.java       |     3 +-
 .../hooks/PostExecOrcRowGroupCountPrinter.java  |     4 +-
 .../ql/hooks/PostExecTezSummaryPrinter.java     |    12 +-
 .../hive/ql/hooks/PostExecutePrinter.java       |     6 +-
 .../hadoop/hive/ql/hooks/PreExecutePrinter.java |     6 +-
 .../ql/hooks/QueryLifeTimeHookContextImpl.java  |    34 +-
 .../hooks/QueryLifeTimeHookWithParseHooks.java  |    41 -
 .../apache/hadoop/hive/ql/index/HiveIndex.java  |     4 +-
 .../hive/ql/io/CombineHiveInputFormat.java      |     7 -
 .../hadoop/hive/ql/io/HiveFileFormatUtils.java  |     2 +-
 .../hadoop/hive/ql/io/HiveInputFormat.java      |     3 +-
 .../org/apache/hadoop/hive/ql/io/RCFile.java    |     2 +-
 .../apache/hadoop/hive/ql/io/orc/OrcFile.java   |     4 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |     8 +-
 .../ql/io/orc/encoded/EncodedReaderImpl.java    |   216 +-
 .../io/parquet/MapredParquetOutputFormat.java   |    10 +-
 .../ql/io/parquet/ParquetRecordReaderBase.java  |    14 +-
 .../ql/io/parquet/timestamp/NanoTimeUtils.java  |    15 +-
 .../vector/VectorizedParquetRecordReader.java   |     5 +-
 .../ql/io/rcfile/stats/PartialScanTask.java     |     6 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    |   282 +-
 .../hadoop/hive/ql/lockmgr/DummyTxnManager.java |     3 +-
 .../hadoop/hive/ql/lockmgr/HiveLockObject.java  |    18 +-
 .../hadoop/hive/ql/lockmgr/HiveTxnManager.java  |    16 +-
 .../hive/ql/lockmgr/HiveTxnManagerImpl.java     |    25 +-
 .../zookeeper/ZooKeeperHiveLockManager.java     |    39 +-
 .../hadoop/hive/ql/log/LogDivertAppender.java   |   249 -
 .../hive/ql/log/LogDivertAppenderForTest.java   |   182 -
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   272 +-
 .../metadata/HiveMaterializedViewsRegistry.java |     3 +-
 .../hive/ql/metadata/HiveMetaStoreChecker.java  |   102 +-
 .../ql/metadata/SessionHiveMetaStoreClient.java |     2 +-
 .../apache/hadoop/hive/ql/metadata/Table.java   |    24 +-
 .../hadoop/hive/ql/metadata/VirtualColumn.java  |     2 +-
 .../hadoop/hive/ql/optimizer/ColumnPruner.java  |    13 +-
 .../ql/optimizer/ColumnPrunerProcFactory.java   |    36 +-
 .../hive/ql/optimizer/ConvertJoinMapJoin.java   |    86 +-
 .../DynamicPartitionPruningOptimization.java    |   183 +-
 .../hive/ql/optimizer/GenMRFileSink1.java       |    11 +-
 .../hive/ql/optimizer/GenMapRedUtils.java       |     6 +-
 .../hive/ql/optimizer/MapJoinProcessor.java     |     8 +-
 .../hadoop/hive/ql/optimizer/Optimizer.java     |     2 +-
 .../ql/optimizer/SetReducerParallelism.java     |     2 -
 .../calcite/CalciteSemanticException.java       |     5 +-
 .../ql/optimizer/calcite/HiveCalciteUtil.java   |     6 -
 .../optimizer/calcite/HivePlannerContext.java   |     9 +-
 .../optimizer/calcite/HiveRelShuttleImpl.java   |     5 -
 .../calcite/reloperators/HiveExtractDate.java   |     8 +-
 .../HiveDruidProjectFilterTransposeRule.java    |    48 -
 .../calcite/rules/HiveFilterJoinRule.java       |    51 +
 .../rules/HivePointLookupOptimizerRule.java     |    95 +-
 .../rules/HiveProjectSortTransposeRule.java     |    20 +-
 .../rules/HiveSortProjectTransposeRule.java     |    48 +-
 .../calcite/rules/HiveSubQueryRemoveRule.java   |   194 +-
 .../HiveMaterializedViewFilterScanRule.java     |     3 +-
 .../stats/FilterSelectivityEstimator.java       |    12 -
 .../calcite/stats/HiveRelMdPredicates.java      |    14 +-
 .../calcite/translator/ASTBuilder.java          |    14 +-
 .../calcite/translator/ASTConverter.java        |     1 +
 .../calcite/translator/ExprNodeConverter.java   |    18 +-
 .../calcite/translator/HiveOpConverter.java     |    21 +-
 .../calcite/translator/RexNodeConverter.java    |    83 +-
 .../translator/SqlFunctionConverter.java        |     1 -
 .../correlation/ReduceSinkDeDuplication.java    |     2 +-
 .../ListBucketingPrunerUtils.java               |     4 +-
 .../physical/GenMRSkewJoinProcessor.java        |    13 +-
 .../physical/GenSparkSkewJoinProcessor.java     |     3 +-
 .../physical/LlapClusterStateForCompile.java    |   132 -
 .../hive/ql/optimizer/physical/LlapDecider.java |    56 +-
 .../physical/LlapPreVectorizationPass.java      |   128 -
 .../optimizer/physical/NullScanOptimizer.java   |    58 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |   333 +-
 .../ql/optimizer/physical/VectorizerReason.java |     2 +-
 .../hive/ql/optimizer/ppr/PartitionPruner.java  |     3 +-
 .../optimizer/spark/SparkMapJoinOptimizer.java  |    73 +-
 .../stats/annotation/StatsRulesProcFactory.java |    16 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |     8 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   221 +-
 .../ql/parse/ColumnStatsSemanticAnalyzer.java   |    19 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |    31 +-
 .../apache/hadoop/hive/ql/parse/EximUtil.java   |   256 +-
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |    22 +-
 .../hive/ql/parse/ExportSemanticAnalyzer.java   |     4 +-
 .../hadoop/hive/ql/parse/FromClauseParser.g     |     3 +-
 .../hive/ql/parse/FunctionSemanticAnalyzer.java |     8 +-
 .../hadoop/hive/ql/parse/GenTezUtils.java       |    33 +-
 .../apache/hadoop/hive/ql/parse/GenTezWork.java |     3 +-
 .../apache/hadoop/hive/ql/parse/HintParser.g    |     4 -
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |     3 +-
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |     2 -
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |    23 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |   101 +-
 .../hadoop/hive/ql/parse/NamedJoinInfo.java     |    65 -
 .../hadoop/hive/ql/parse/ParseContext.java      |    44 +-
 .../apache/hadoop/hive/ql/parse/ParseUtils.java |     1 -
 .../hive/ql/parse/ProcessAnalyzeTable.java      |     6 +-
 .../apache/hadoop/hive/ql/parse/QBJoinTree.java |    16 -
 .../hadoop/hive/ql/parse/QBParseInfo.java       |     9 -
 .../ql/parse/ReplicationSemanticAnalyzer.java   |   737 +-
 .../hadoop/hive/ql/parse/ReplicationSpec.java   |    22 +-
 .../hadoop/hive/ql/parse/RowResolver.java       |    10 -
 .../hadoop/hive/ql/parse/RuntimeValuesInfo.java |    10 -
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   337 +-
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |    23 +-
 .../hive/ql/parse/SemiJoinBranchInfo.java       |    45 -
 .../hadoop/hive/ql/parse/SemiJoinHint.java      |    43 -
 .../hadoop/hive/ql/parse/SubQueryUtils.java     |    21 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java      |     6 +-
 .../hadoop/hive/ql/parse/TezCompiler.java       |   512 +-
 .../hive/ql/parse/TypeCheckProcFactory.java     |     5 +-
 .../ql/parse/UpdateDeleteSemanticAnalyzer.java  |    14 -
 .../hadoop/hive/ql/parse/WindowingSpec.java     |     2 +-
 .../hadoop/hive/ql/parse/repl/DumpType.java     |    45 -
 .../dump/BootStrapReplicationSpecFunction.java  |    54 -
 .../hive/ql/parse/repl/dump/HiveWrapper.java    |    73 -
 .../hadoop/hive/ql/parse/repl/dump/Utils.java   |    50 -
 .../ql/parse/repl/dump/io/DBSerializer.java     |    55 -
 .../parse/repl/dump/io/FunctionSerializer.java  |    49 -
 .../hive/ql/parse/repl/dump/io/JsonWriter.java  |    55 -
 .../parse/repl/dump/io/PartitionSerializer.java |    65 -
 .../repl/dump/io/ReplicationSpecSerializer.java |    36 -
 .../ql/parse/repl/dump/io/TableSerializer.java  |   114 -
 .../dump/io/VersionCompatibleSerializer.java    |    37 -
 .../ql/parse/repl/events/AbstractHandler.java   |    46 -
 .../parse/repl/events/AddPartitionHandler.java  |   114 -
 .../repl/events/AlterPartitionHandler.java      |   112 -
 .../ql/parse/repl/events/AlterTableHandler.java |   102 -
 .../parse/repl/events/CreateTableHandler.java   |    86 -
 .../ql/parse/repl/events/DefaultHandler.java    |    44 -
 .../parse/repl/events/DropPartitionHandler.java |    44 -
 .../ql/parse/repl/events/DropTableHandler.java  |    44 -
 .../hive/ql/parse/repl/events/EventHandler.java |    62 -
 .../parse/repl/events/EventHandlerFactory.java  |    75 -
 .../ql/parse/repl/events/InsertHandler.java     |   110 -
 .../hive/ql/parse/repl/load/DumpMetaData.java   |   143 -
 .../hive/ql/parse/repl/load/MetaData.java       |    64 -
 .../hive/ql/parse/repl/load/MetadataJson.java   |   128 -
 .../parse/spark/SparkProcessAnalyzeTable.java   |     5 +-
 .../hadoop/hive/ql/plan/AbstractVectorDesc.java |     4 +-
 .../hadoop/hive/ql/plan/AlterTableDesc.java     |     6 -
 .../hadoop/hive/ql/plan/CreateTableDesc.java    |     3 +-
 .../hadoop/hive/ql/plan/CreateViewDesc.java     |    22 -
 .../ql/plan/ExprNodeConstantDefaultDesc.java    |    86 +
 .../hive/ql/plan/ExprNodeConstantDesc.java      |     6 +-
 .../hadoop/hive/ql/plan/ExprNodeDescUtils.java  |    71 -
 .../hive/ql/plan/ExprNodeDynamicListDesc.java   |     9 +-
 .../apache/hadoop/hive/ql/plan/GroupByDesc.java |     2 +-
 .../hadoop/hive/ql/plan/HiveOperation.java      |    37 +-
 .../hadoop/hive/ql/plan/ImportTableDesc.java    |   321 -
 .../apache/hadoop/hive/ql/plan/JoinDesc.java    |    18 +-
 .../apache/hadoop/hive/ql/plan/MapJoinDesc.java |    18 +-
 .../hadoop/hive/ql/plan/PartitionDesc.java      |    28 +-
 .../hadoop/hive/ql/plan/ReduceSinkDesc.java     |    29 +-
 .../apache/hadoop/hive/ql/plan/ReduceWork.java  |    67 -
 .../apache/hadoop/hive/ql/plan/SparkWork.java   |    10 +-
 .../apache/hadoop/hive/ql/plan/TableDesc.java   |     4 +-
 .../hadoop/hive/ql/plan/TezEdgeProperty.java    |    18 +-
 .../hive/ql/plan/VectorAppMasterEventDesc.java  |     2 +-
 .../hadoop/hive/ql/plan/VectorFileSinkDesc.java |     2 +-
 .../hadoop/hive/ql/plan/VectorFilterDesc.java   |     2 +-
 .../hadoop/hive/ql/plan/VectorGroupByDesc.java  |     2 +-
 .../hadoop/hive/ql/plan/VectorLimitDesc.java    |     2 +-
 .../hadoop/hive/ql/plan/VectorMapJoinDesc.java  |     2 +-
 .../hadoop/hive/ql/plan/VectorMapJoinInfo.java  |     2 +-
 .../hive/ql/plan/VectorPartitionDesc.java       |     2 +-
 .../hive/ql/plan/VectorReduceSinkDesc.java      |    23 +-
 .../hive/ql/plan/VectorReduceSinkInfo.java      |    98 +-
 .../hadoop/hive/ql/plan/VectorSMBJoinDesc.java  |     2 +-
 .../hadoop/hive/ql/plan/VectorSelectDesc.java   |     2 +-
 .../ql/plan/VectorSparkHashTableSinkDesc.java   |     2 +-
 .../VectorSparkPartitionPruningSinkDesc.java    |     2 +-
 .../hive/ql/plan/VectorTableScanDesc.java       |     2 +-
 .../hive/ql/ppd/SyntheticJoinPredicate.java     |     2 +
 .../ql/processors/CommandProcessorResponse.java |    10 +-
 .../hadoop/hive/ql/processors/HiveCommand.java  |     2 +-
 .../hive/ql/processors/ResetProcessor.java      |    21 +-
 .../hadoop/hive/ql/processors/SetProcessor.java |    15 +-
 .../plugin/sqlstd/SQLAuthorizationUtils.java    |     4 +-
 .../hadoop/hive/ql/session/OperationLog.java    |   127 +-
 .../hadoop/hive/ql/session/SessionState.java    |    25 +-
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |    47 +-
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   |     3 +-
 .../hive/ql/txn/compactor/CompactorMR.java      |    10 +-
 .../org/apache/hadoop/hive/ql/udf/UDFJson.java  |    67 +-
 .../apache/hadoop/hive/ql/udf/UDFLength.java    |    66 +
 .../apache/hadoop/hive/ql/udf/UDFToBoolean.java |     3 +-
 .../apache/hadoop/hive/ql/udf/UDFToByte.java    |     3 +-
 .../apache/hadoop/hive/ql/udf/UDFToInteger.java |     3 +-
 .../apache/hadoop/hive/ql/udf/UDFToLong.java    |     3 +-
 .../apache/hadoop/hive/ql/udf/UDFToShort.java   |     3 +-
 .../generic/GenericUDAFBinarySetFunctions.java  |   452 -
 .../ql/udf/generic/GenericUDAFBloomFilter.java  |    33 -
 .../ql/udf/generic/GenericUDAFComputeStats.java |    22 +-
 .../ql/udf/generic/GenericUDAFCorrelation.java  |    26 +-
 .../generic/GenericUDAFCovarianceSample.java    |    18 +-
 .../hadoop/hive/ql/udf/generic/GenericUDF.java  |     3 +-
 .../udf/generic/GenericUDFCharacterLength.java  |   120 -
 .../ql/udf/generic/GenericUDFExtractUnion.java  |   272 -
 .../hive/ql/udf/generic/GenericUDFGrouping.java |    45 +-
 .../hive/ql/udf/generic/GenericUDFInFile.java   |     3 +-
 .../udf/generic/GenericUDFInternalInterval.java |     4 +-
 .../hive/ql/udf/generic/GenericUDFLength.java   |   129 -
 .../hive/ql/udf/generic/GenericUDFLikeAll.java  |   133 -
 .../hive/ql/udf/generic/GenericUDFLikeAny.java  |   134 -
 .../hive/ql/udf/generic/GenericUDFOPEqual.java  |     9 +
 .../ql/udf/generic/GenericUDFOPNotEqual.java    |     9 +
 .../ql/udf/generic/GenericUDFOctetLength.java   |   114 -
 .../hive/ql/udf/generic/GenericUDFTrunc.java    |    10 +-
 .../ql/udf/generic/GenericUDFWidthBucket.java   |   329 -
 .../hive/metastore/txn/TestTxnHandler.java      |     2 +-
 .../org/apache/hadoop/hive/ql/TestErrorMsg.java |     6 +-
 .../apache/hadoop/hive/ql/TestTxnCommands.java  |    49 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |    24 +-
 .../ql/TestTxnCommands2WithSplitUpdate.java     |    61 +-
 .../hadoop/hive/ql/exec/TestOperators.java      |    72 +-
 .../TestMapJoinMemoryExhaustionHandler.java     |     4 +-
 .../tez/monitoring/TestTezProgressMonitor.java  |    17 -
 .../hive/ql/exec/vector/TestVectorSerDeRow.java |     5 +-
 .../mapjoin/fast/CheckFastRowHashMap.java       |    17 +-
 .../mapjoin/fast/CommonFastHashTable.java       |     4 +-
 .../hadoop/hive/ql/hooks/TestQueryHooks.java    |   162 +-
 .../hadoop/hive/ql/io/orc/TestOrcFile.java      |    17 +-
 .../hive/ql/io/orc/TestOrcRawRecordMerger.java  |     7 +-
 .../io/parquet/timestamp/TestNanoTimeUtils.java |    13 -
 .../hive/ql/lockmgr/TestDbTxnManager.java       |    65 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java      |   376 +-
 .../hive/ql/lockmgr/TestDummyTxnManager.java    |     4 +-
 .../ql/lockmgr/TestEmbeddedLockManager.java     |     4 +-
 .../hive/ql/lockmgr/TestHiveLockObject.java     |    30 +-
 .../zookeeper/TestZookeeperLockManager.java     |     2 +-
 .../hadoop/hive/ql/metadata/TestHive.java       |     2 +-
 .../ql/metadata/TestHiveMetaStoreChecker.java   |   207 +-
 .../calcite/TestCBORuleFiredOnlyOnce.java       |     2 +-
 .../hive/ql/parse/TestHiveDecimalParse.java     |    23 +-
 .../parse/TestReplicationSemanticAnalyzer.java  |    22 +-
 .../ql/parse/repl/dump/HiveWrapperTest.java     |    27 -
 .../repl/events/TestEventHandlerFactory.java    |    62 -
 .../hive/ql/processors/TestResetProcessor.java  |    59 -
 .../hive/ql/txn/compactor/TestInitiator.java    |    11 +-
 .../TestGenericUDAFBinarySetFunctions.java      |   414 -
 .../udf/generic/TestGenericUDFExtractUnion.java |   175 -
 ...UDFExtractUnionObjectInspectorConverter.java |   109 -
 ...estGenericUDFExtractUnionValueConverter.java |   108 -
 .../ql/udf/generic/TestGenericUDFLikeAll.java   |    88 -
 .../ql/udf/generic/TestGenericUDFLikeAny.java   |    87 -
 .../ql/udf/generic/TestGenericUDFTrunc.java     |   283 -
 .../udf/generic/TestGenericUDFWidthBucket.java  |    86 -
 .../clientnegative/bucket_mapjoin_mismatch1.q   |     1 -
 .../bucket_mapjoin_wrong_table_metadata_1.q     |     2 +-
 .../bucket_mapjoin_wrong_table_metadata_2.q     |     1 -
 .../clientnegative/char_pad_convert_fail0.q     |     4 +-
 .../clientnegative/char_pad_convert_fail1.q     |     4 +-
 .../clientnegative/char_pad_convert_fail2.q     |     4 +-
 .../clientnegative/char_pad_convert_fail3.q     |     4 +-
 ...umnstats_partlvl_invalid_values_autogather.q |    17 -
 .../queries/clientnegative/decimal_precision.q  |     4 +-
 .../clientnegative/decimal_precision_1.q        |     4 +-
 .../distinct_windowing_failure1.q               |     2 +-
 .../distinct_windowing_failure2.q               |     2 +-
 .../drop_default_partition_filter.q             |     7 -
 .../queries/clientnegative/invalid_mapjoin1.q   |     1 -
 ql/src/test/queries/clientnegative/join2.q      |     1 -
 ql/src/test/queries/clientnegative/join28.q     |     2 +-
 ql/src/test/queries/clientnegative/join29.q     |     2 +-
 ql/src/test/queries/clientnegative/join32.q     |     1 -
 ql/src/test/queries/clientnegative/join35.q     |     2 +-
 .../test/queries/clientnegative/msck_repair_4.q |    14 -
 .../queries/clientnegative/nvl_mismatch_type.q  |     2 +-
 .../queries/clientnegative/smb_bucketmapjoin.q  |     2 +-
 .../queries/clientnegative/smb_mapjoin_14.q     |     2 +-
 .../sortmerge_mapjoin_mismatch_1.q              |     3 +-
 .../clientnegative/spark_job_max_tasks.q        |     6 -
 .../subquery_scalar_corr_multi_rows.q           |     2 -
 .../subquery_select_complex_expr.q              |     3 +
 .../clientnegative/subquery_select_no_source.q  |     2 -
 .../clientnegative/subquery_select_udf.q        |     2 +
 .../clientnegative/subquery_with_or_cond.q      |     5 +
 .../queries/clientnegative/udf_likeall_wrong1.q |     2 -
 .../queries/clientnegative/udf_likeany_wrong1.q |     2 -
 ql/src/test/queries/clientnegative/union22.q    |     2 +-
 .../clientpositive/alter_table_column_stats.q   |   241 -
 .../alter_table_invalidate_column_stats.q       |   153 +
 .../annotate_stats_deep_filters.q               |     2 +-
 .../clientpositive/auto_sortmerge_join_11.q     |     2 +-
 .../test/queries/clientpositive/avro_decimal.q  |    14 +-
 .../clientpositive/avro_decimal_native.q        |    14 +-
 .../queries/clientpositive/bucket_map_join_1.q  |     2 +-
 .../queries/clientpositive/bucket_map_join_2.q  |     2 +-
 .../queries/clientpositive/bucketcontext_1.q    |     2 +-
 .../queries/clientpositive/bucketcontext_2.q    |     2 +-
 .../queries/clientpositive/bucketcontext_3.q    |     2 +-
 .../queries/clientpositive/bucketcontext_4.q    |     2 +-
 .../queries/clientpositive/bucketcontext_5.q    |     2 +-
 .../queries/clientpositive/bucketcontext_6.q    |     2 +-
 .../queries/clientpositive/bucketcontext_7.q    |     2 +-
 .../queries/clientpositive/bucketcontext_8.q    |     2 +-
 .../queries/clientpositive/bucketmapjoin10.q    |     2 +-
 .../queries/clientpositive/bucketmapjoin11.q    |     2 +-
 .../queries/clientpositive/bucketmapjoin12.q    |     2 +-
 .../queries/clientpositive/bucketmapjoin13.q    |     2 +-
 .../queries/clientpositive/bucketmapjoin5.q     |     2 +-
 .../queries/clientpositive/bucketmapjoin7.q     |     1 -
 .../queries/clientpositive/bucketmapjoin8.q     |     2 +-
 .../queries/clientpositive/bucketmapjoin9.q     |     1 -
 .../clientpositive/bucketmapjoin_negative.q     |     1 -
 .../clientpositive/bucketmapjoin_negative2.q    |     2 +-
 .../clientpositive/bucketmapjoin_negative3.q    |     2 +-
 ql/src/test/queries/clientpositive/cbo_rp_gby.q |     1 +
 .../test/queries/clientpositive/cbo_rp_join.q   |     1 +
 .../test/queries/clientpositive/cbo_rp_limit.q  |     1 +
 .../queries/clientpositive/cbo_rp_semijoin.q    |     1 +
 .../clientpositive/cbo_rp_unionDistinct_2.q     |     1 -
 .../test/queries/clientpositive/cbo_rp_views.q  |     7 +-
 .../queries/clientpositive/cbo_rp_windowing_2.q |     6 +-
 .../queries/clientpositive/char_pad_convert.q   |    12 +-
 ...umn_names_with_leading_and_trailing_spaces.q |     5 -
 .../column_pruner_multiple_children.q           |    19 -
 .../clientpositive/columnstats_infinity.q       |    44 -
 ql/src/test/queries/clientpositive/comments.q   |     4 +-
 .../clientpositive/correlated_join_keys.q       |    34 -
 .../clientpositive/create_with_constraints.q    |    24 +-
 .../test/queries/clientpositive/decimal_10_0.q  |     6 +-
 .../queries/clientpositive/decimal_precision.q  |    28 +-
 .../queries/clientpositive/distinct_windowing.q |     2 +-
 .../clientpositive/distinct_windowing_no_cbo.q  |     2 +-
 .../clientpositive/drop_partitions_filter4.q    |    10 -
 .../test/queries/clientpositive/druid_basic2.q  |    24 -
 .../clientpositive/dynamic_partition_pruning.q  |     3 +-
 .../clientpositive/dynamic_semijoin_reduction.q |    20 +-
 .../dynamic_semijoin_reduction_2.q              |     3 -
 .../dynamic_semijoin_reduction_3.q              |    79 -
 .../dynamic_semijoin_user_level.q               |   107 -
 .../dynpart_sort_opt_vectorization.q            |     2 +-
 .../clientpositive/dynpart_sort_optimization.q  |     2 +-
 .../encryption_auto_purge_tables.q              |    38 -
 .../clientpositive/encryption_drop_partition.q  |     7 +-
 .../clientpositive/encryption_drop_table.q      |    12 +-
 .../encryption_drop_table_in_encrypted_db.q     |    20 -
 .../clientpositive/encryption_move_tbl.q        |     8 +-
 .../test/queries/clientpositive/explainuser_4.q |     1 -
 .../clientpositive/fp_literal_arithmetic.q      |    57 -
 .../clientpositive/groupby_grouping_id1.q       |     2 -
 .../clientpositive/groupby_grouping_id2.q       |     4 -
 .../clientpositive/groupby_grouping_id3.q       |    12 -
 .../clientpositive/groupby_grouping_sets1.q     |    20 -
 .../clientpositive/groupby_grouping_sets2.q     |     4 -
 .../clientpositive/groupby_grouping_sets3.q     |     6 -
 .../clientpositive/groupby_grouping_sets4.q     |     1 -
 .../clientpositive/groupby_grouping_sets5.q     |     2 -
 .../clientpositive/groupby_grouping_sets6.q     |     2 -
 .../groupby_grouping_sets_grouping.q            |    39 -
 .../groupby_grouping_sets_limit.q               |     3 -
 .../infer_bucket_sort_map_operators.q           |     1 -
 .../queries/clientpositive/inputwherefalse.q    |    19 -
 .../queries/clientpositive/is_distinct_from.q   |    46 -
 .../test/queries/clientpositive/jdbc_handler.q  |     1 -
 ql/src/test/queries/clientpositive/join25.q     |     2 +-
 ql/src/test/queries/clientpositive/join26.q     |     1 -
 ql/src/test/queries/clientpositive/join27.q     |     2 +-
 ql/src/test/queries/clientpositive/join30.q     |     2 +-
 ql/src/test/queries/clientpositive/join36.q     |     2 +-
 ql/src/test/queries/clientpositive/join37.q     |     1 -
 ql/src/test/queries/clientpositive/join38.q     |     1 -
 ql/src/test/queries/clientpositive/join39.q     |     1 -
 ql/src/test/queries/clientpositive/join40.q     |     1 -
 .../clientpositive/join_is_not_distinct_from.q  |    71 -
 .../test/queries/clientpositive/join_map_ppr.q  |     1 -
 .../queries/clientpositive/join_on_varchar.q    |     1 -
 .../test/queries/clientpositive/join_reorder.q  |     1 -
 .../test/queries/clientpositive/join_reorder2.q |     1 -
 .../test/queries/clientpositive/join_reorder3.q |     2 +-
 .../test/queries/clientpositive/join_reorder4.q |     2 +-
 .../clientpositive/lateral_view_onview.q        |     6 -
 ql/src/test/queries/clientpositive/llap_text.q  |     2 +-
 .../clientpositive/llap_vector_nohybridgrace.q  |    32 -
 ql/src/test/queries/clientpositive/mapjoin1.q   |     2 -
 .../queries/clientpositive/mapjoin_decimal.q    |    16 +-
 .../queries/clientpositive/mapjoin_distinct.q   |     1 -
 ql/src/test/queries/clientpositive/mergejoin.q  |     2 -
 .../clientpositive/metadata_only_queries.q      |     6 +-
 .../metadata_only_queries_with_filters.q        |     8 +-
 ql/src/test/queries/clientpositive/mm_all.q     |    22 +-
 .../test/queries/clientpositive/msck_repair_0.q |     7 -
 .../queries/clientpositive/named_column_join.q  |    52 -
 .../clientpositive/optimize_filter_literal.q    |    49 -
 .../test/queries/clientpositive/orc_file_dump.q |     6 +-
 .../queries/clientpositive/orc_llap_counters.q  |    10 +-
 .../queries/clientpositive/orc_llap_counters1.q |    10 +-
 .../test/queries/clientpositive/orc_ppd_basic.q |    10 +-
 .../clientpositive/orc_ppd_schema_evol_3a.q     |    10 +-
 .../clientpositive/orc_predicate_pushdown.q     |     4 +-
 .../clientpositive/outer_reference_windowed.q   |    80 -
 .../queries/clientpositive/parallel_colstats.q  |    32 -
 .../queries/clientpositive/parquet_decimal.q    |    12 +-
 .../clientpositive/parquet_int96_timestamp.q    |     2 +-
 .../clientpositive/parquet_ppd_multifiles.q     |     6 +-
 .../clientpositive/parquet_predicate_pushdown.q |     6 +-
 .../clientpositive/partitions_filter_default.q  |    14 -
 ql/src/test/queries/clientpositive/pcs.q        |     3 +-
 .../test/queries/clientpositive/perf/query9.q   |    50 -
 .../clientpositive/position_alias_test_1.q      |     5 +-
 .../queries/clientpositive/primitive_types.q    |     4 +-
 .../queries/clientpositive/quotedid_stats.q     |    11 -
 .../clientpositive/rename_partition_location.q  |    14 -
 ql/src/test/queries/clientpositive/row__id.q    |     4 +-
 .../clientpositive/schema_evol_orc_acid_part.q  |    15 +-
 .../schema_evol_orc_acid_part_update.q          |     3 +-
 .../clientpositive/schema_evol_orc_acid_table.q |     9 +-
 .../schema_evol_orc_acid_table_update.q         |     3 +-
 .../schema_evol_orc_acidvec_part.q              |    46 +-
 .../schema_evol_orc_acidvec_part_update.q       |     3 +-
 .../schema_evol_orc_acidvec_table.q             |    48 +-
 .../schema_evol_orc_acidvec_table_update.q      |     3 +-
 .../schema_evol_orc_nonvec_part.q               |    22 +-
 .../schema_evol_orc_nonvec_part_all_complex.q   |    10 +-
 .../schema_evol_orc_nonvec_part_all_primitive.q |    14 +-
 .../schema_evol_orc_nonvec_table.q              |    14 +-
 .../clientpositive/schema_evol_orc_vec_part.q   |     2 +-
 .../schema_evol_orc_vec_part_all_complex.q      |     2 +-
 .../schema_evol_orc_vec_part_all_primitive.q    |     2 +-
 .../clientpositive/schema_evol_orc_vec_table.q  |     2 +-
 .../schema_evol_text_nonvec_part.q              |    22 +-
 .../schema_evol_text_nonvec_part_all_complex.q  |    10 +-
 ...schema_evol_text_nonvec_part_all_primitive.q |    14 +-
 .../schema_evol_text_nonvec_table.q             |    14 +-
 .../clientpositive/schema_evol_text_vec_part.q  |     2 +-
 .../schema_evol_text_vec_part_all_complex.q     |     2 +-
 .../schema_evol_text_vec_part_all_primitive.q   |     2 +-
 .../clientpositive/schema_evol_text_vec_table.q |     2 +-
 .../schema_evol_text_vecrow_part.q              |     2 +-
 .../schema_evol_text_vecrow_part_all_complex.q  |     2 +-
 ...schema_evol_text_vecrow_part_all_primitive.q |     2 +-
 .../schema_evol_text_vecrow_table.q             |     2 +-
 .../clientpositive/select_column_pruning.q      |     4 -
 .../test/queries/clientpositive/semijoin_hint.q |    89 -
 ql/src/test/queries/clientpositive/skewjoin.q   |     2 +-
 .../test/queries/clientpositive/smb_mapjoin9.q  |     3 +-
 .../test/queries/clientpositive/smb_mapjoin_1.q |     1 -
 .../queries/clientpositive/smb_mapjoin_10.q     |     2 +-
 .../queries/clientpositive/smb_mapjoin_11.q     |     1 -
 .../queries/clientpositive/smb_mapjoin_12.q     |     4 +-
 .../queries/clientpositive/smb_mapjoin_13.q     |     2 +-
 .../queries/clientpositive/smb_mapjoin_16.q     |     2 +-
 .../test/queries/clientpositive/smb_mapjoin_2.q |     2 +-
 .../test/queries/clientpositive/smb_mapjoin_3.q |     1 -
 .../test/queries/clientpositive/smb_mapjoin_7.q |     2 +-
 .../clientpositive/sort_merge_join_desc_1.q     |     2 +-
 .../clientpositive/sort_merge_join_desc_2.q     |     1 -
 .../clientpositive/sort_merge_join_desc_3.q     |     2 +-
 .../clientpositive/sort_merge_join_desc_4.q     |     2 +-
 .../clientpositive/sort_merge_join_desc_5.q     |     1 -
 .../clientpositive/sort_merge_join_desc_6.q     |     2 +-
 .../clientpositive/sort_merge_join_desc_7.q     |     2 +-
 .../clientpositive/sort_merge_join_desc_8.q     |     2 +-
 .../clientpositive/spark_explainuser_1.q        |   671 -
 .../queries/clientpositive/subquery_multi.q     |    15 -
 .../queries/clientpositive/subquery_scalar.q    |     1 -
 .../queries/clientpositive/subquery_select.q    |   138 +-
 .../temp_table_windowing_expressions.q          |     2 +-
 .../test/queries/clientpositive/tez_smb_main.q  |    13 +-
 .../tez_vector_dynpart_hashjoin_1.q             |     1 -
 .../test/queries/clientpositive/tunable_ndv.q   |    64 -
 .../clientpositive/udaf_binarysetfunctions.q    |    58 -
 .../clientpositive/udaf_percentile_approx_23.q  |     1 -
 .../clientpositive/udf_character_length.q       |    29 -
 .../test/queries/clientpositive/udf_likeall.q   |    57 -
 .../test/queries/clientpositive/udf_likeany.q   |    57 -
 .../queries/clientpositive/udf_octet_length.q   |    21 -
 .../clientpositive/udf_round_2_auto_stats.q     |    16 -
 ql/src/test/queries/clientpositive/udf_trunc.q  |   146 +-
 .../queries/clientpositive/udf_width_bucket.q   |   204 -
 .../queries/clientpositive/unionDistinct_1.q    |     4 +-
 .../queries/clientpositive/updateAccessTime.q   |     5 -
 .../clientpositive/vector_binary_join_groupby.q |    22 +-
 .../clientpositive/vector_cast_constant.q       |     4 +-
 .../clientpositive/vector_complex_join.q        |     3 +-
 .../queries/clientpositive/vector_data_types.q  |    16 +-
 .../clientpositive/vector_decimal_10_0.q        |     8 +-
 .../clientpositive/vector_decimal_mapjoin.q     |    14 +-
 .../clientpositive/vector_decimal_precision.q   |    26 +-
 .../clientpositive/vector_decimal_round.q       |    31 +-
 .../clientpositive/vector_decimal_round_2.q     |    92 +-
 .../clientpositive/vector_groupby_cube1.q       |    55 -
 .../vector_groupby_grouping_id1.q               |    23 -
 .../vector_groupby_grouping_id2.q               |    65 -
 .../vector_groupby_grouping_id3.q               |    42 -
 .../vector_groupby_grouping_sets1.q             |    43 -
 .../vector_groupby_grouping_sets2.q             |    36 -
 .../vector_groupby_grouping_sets3.q             |    40 -
 .../vector_groupby_grouping_sets4.q             |    57 -
 .../vector_groupby_grouping_sets5.q             |    39 -
 .../vector_groupby_grouping_sets6.q             |    38 -
 .../vector_groupby_grouping_sets_grouping.q     |   135 -
 .../vector_groupby_grouping_sets_limit.q        |    44 -
 .../vector_groupby_grouping_window.q            |    21 -
 .../clientpositive/vector_groupby_mapjoin.q     |    14 -
 .../clientpositive/vector_groupby_reduce.q      |     2 -
 .../clientpositive/vector_groupby_rollup1.q     |    54 -
 .../queries/clientpositive/vector_order_null.q  |    56 -
 .../clientpositive/vector_ptf_part_simple.q     |   268 -
 .../clientpositive/vector_string_concat.q       |     4 +-
 .../vector_udf_character_length.q               |    31 -
 .../clientpositive/vector_udf_octet_length.q    |    23 -
 .../clientpositive/vectorized_bucketmapjoin1.q  |     2 +-
 .../queries/clientpositive/vectorized_case.q    |    18 -
 .../vectorized_dynamic_partition_pruning.q      |     3 +-
 .../vectorized_dynamic_semijoin_reduction.q     |     2 -
 .../vectorized_dynamic_semijoin_reduction2.q    |     4 -
 .../clientpositive/vectorized_parquet_types.q   |     5 +-
 .../queries/clientpositive/vectorized_ptf.q     |    46 +-
 ql/src/test/queries/clientpositive/windowing.q  |     2 +-
 .../queries/clientpositive/windowing_distinct.q |     8 +-
 .../clientpositive/windowing_expressions.q      |     2 +-
 .../windowing_multipartitioning.q               |     6 +-
 .../queries/clientpositive/windowing_navfn.q    |     6 +-
 .../queries/clientpositive/windowing_ntile.q    |     4 +-
 .../clientpositive/windowing_order_null.q       |     2 +-
 .../clientpositive/windowing_range_multiorder.q |     2 +-
 .../queries/clientpositive/windowing_rank.q     |    28 +-
 .../clientpositive/windowing_streaming.q        |     4 +-
 .../queries/clientpositive/windowing_udaf.q     |     4 +-
 .../clientpositive/windowing_windowspec.q       |     4 +-
 .../clientpositive/windowing_windowspec2.q      |     2 +-
 .../clientpositive/zero_rows_single_insert.q    |    17 -
 .../results/clientnegative/acid_overwrite.q.out |     2 +-
 .../clientnegative/alter_non_native.q.out       |     2 +-
 .../alter_view_as_select_with_partition.q.out   |     1 -
 .../clientnegative/alter_view_failure6.q.out    |     2 +-
 .../results/clientnegative/bad_exec_hooks.q.out |     2 +-
 .../clientnegative/char_pad_convert_fail0.q.out |     4 +-
 .../clientnegative/char_pad_convert_fail1.q.out |     4 +-
 .../clientnegative/char_pad_convert_fail2.q.out |     4 +-
 .../clientnegative/char_pad_convert_fail3.q.out |     4 +-
 ...tats_partlvl_invalid_values_autogather.q.out |    69 -
 .../clientnegative/decimal_precision.q.out      |     6 +-
 .../clientnegative/decimal_precision_1.q.out    |     6 +-
 .../distinct_windowing_failure1.q.out           |     4 +-
 .../distinct_windowing_failure2.q.out           |     4 +-
 .../drop_default_partition_filter.q.out         |    23 -
 .../test/results/clientnegative/external1.q.out |     2 +-
 .../clientnegative/input_part0_neg.q.out        |     2 +-
 .../insert_into_with_schema.q.out               |     2 +-
 .../insert_into_with_schema1.q.out              |     2 +-
 .../insert_into_with_schema2.q.out              |     2 +-
 .../results/clientnegative/msck_repair_4.q.out  |    22 -
 .../clientnegative/nvl_mismatch_type.q.out      |     4 +-
 .../ptf_negative_InvalidValueBoundary.q.out     |     3 +-
 .../spark/spark_job_max_tasks.q.out             |    77 -
 .../subquery_corr_grandparent.q.out             |     2 +-
 .../clientnegative/subquery_in_select.q.out     |     1 +
 .../subquery_scalar_corr_multi_rows.q.out       |     5 -
 .../subquery_scalar_multi_columns.q.out         |     4 +-
 .../subquery_scalar_multi_rows.q.out            |     3 +-
 .../subquery_select_aggregate.q.out             |     2 +-
 .../subquery_select_complex_expr.q.out          |     1 +
 .../subquery_select_no_source.q.out             |     1 -
 .../clientnegative/subquery_select_udf.q.out    |     1 +
 .../clientnegative/subquery_with_or_cond.q.out  |     1 +
 .../clientnegative/udf_likeall_wrong1.q.out     |     1 -
 .../clientnegative/udf_likeany_wrong1.q.out     |     1 -
 ql/src/test/results/clientnegative/union2.q.out |     2 +-
 .../clientnegative/wrong_column_type.q.out      |     2 +-
 .../clientpositive/acid_table_stats.q.out       |    16 +-
 .../clientpositive/add_part_multiple.q.out      |    16 +-
 ...lter_numbuckets_partitioned_table2_h23.q.out |     9 -
 ...alter_numbuckets_partitioned_table_h23.q.out |     8 -
 .../alter_partition_clusterby_sortby.q.out      |     1 -
 .../alter_partition_coltype.q.out               |     2 +-
 .../alter_table_add_partition.q.out             |     3 -
 .../alter_table_column_stats.q.out              |  2706 --
 .../alter_table_invalidate_column_stats.q.out   |   932 +
 .../clientpositive/alter_table_serde2.q.out     |     2 -
 .../annotate_stats_deep_filters.q.out           |     4 +-
 .../clientpositive/autoColumnStats_3.q.out      |     1 -
 .../clientpositive/autoColumnStats_4.q.out      |     4 +-
 .../clientpositive/autoColumnStats_7.q.out      |     2 +-
 .../clientpositive/autoColumnStats_8.q.out      |     4 +-
 .../results/clientpositive/avro_decimal.q.out   |    28 +-
 .../clientpositive/avro_decimal_native.q.out    |    28 +-
 .../avro_schema_evolution_native.q.out          |     2 -
 .../results/clientpositive/ba_table_udfs.q.out  |     2 +-
 .../beeline/drop_with_concurrency.q.out         |    63 +-
 .../beeline/escape_comments.q.out               |   428 +-
 .../beeline/select_dummy_source.q.out           |   251 -
 .../clientpositive/beeline/smb_mapjoin_1.q.out  |   490 -
 .../clientpositive/beeline/smb_mapjoin_10.q.out |   107 -
 .../clientpositive/beeline/smb_mapjoin_11.q.out |  2161 -
 .../clientpositive/beeline/smb_mapjoin_12.q.out |   430 -
 .../clientpositive/beeline/smb_mapjoin_13.q.out |   388 -
 .../clientpositive/beeline/smb_mapjoin_16.q.out |    96 -
 .../clientpositive/beeline/smb_mapjoin_2.q.out  |   498 -
 .../clientpositive/beeline/smb_mapjoin_3.q.out  |   494 -
 .../clientpositive/beeline/smb_mapjoin_7.q.out  |  1268 -
 .../clientpositive/bucket_map_join_spark1.q.out |     8 +-
 .../clientpositive/bucket_map_join_spark2.q.out |     8 +-
 .../clientpositive/bucket_map_join_spark3.q.out |     8 +-
 .../results/clientpositive/bucketmapjoin5.q.out |    12 +-
 .../clientpositive/bucketmapjoin_negative.q.out |     2 +-
 .../bucketmapjoin_negative2.q.out               |     2 +-
 .../bucketsortoptimize_insert_3.q.out           |     4 +-
 ql/src/test/results/clientpositive/cast1.q.out  |     6 +-
 .../clientpositive/cbo_rp_auto_join1.q.out      |     4 +-
 .../clientpositive/cbo_rp_outer_join_ppr.q.out  |   297 +-
 ql/src/test/results/clientpositive/char_1.q.out |     8 +-
 .../clientpositive/char_pad_convert.q.out       |    20 +-
 .../columnStatsUpdateForStatsOptimizer_2.q.out  |     8 +-
 ...names_with_leading_and_trailing_spaces.q.out |    20 -
 .../column_pruner_multiple_children.q.out       |   189 -
 .../clientpositive/columnstats_infinity.q.out   |   295 -
 .../clientpositive/columnstats_partlvl.q.out    |     4 +-
 .../clientpositive/columnstats_partlvl_dp.q.out |    16 +-
 .../clientpositive/columnstats_tbllvl.q.out     |    16 +-
 .../test/results/clientpositive/comments.q.out  |    76 +-
 .../results/clientpositive/complex_alias.q.out  |     8 +-
 .../clientpositive/constant_prop_3.q.out        |     4 +-
 .../results/clientpositive/constprog2.q.out     |    31 +-
 .../clientpositive/correlated_join_keys.q.out   |   258 -
 .../clientpositive/correlationoptimizer13.q.out |     8 +-
 .../clientpositive/create_like_view.q.out       |     1 -
 .../clientpositive/create_or_replace_view.q.out |     4 -
 .../create_table_like_stats.q.out               |     2 -
 .../create_view_partitioned.q.out               |     3 -
 .../create_with_constraints.q.out               |    56 +-
 .../results/clientpositive/decimal_10_0.q.out   |     8 +-
 .../clientpositive/decimal_precision.q.out      |    56 +-
 .../results/clientpositive/decimal_udf.q.out    |    12 +-
 .../clientpositive/default_file_format.q.out    |    10 -
 .../results/clientpositive/deleteAnalyze.q.out  |    20 +-
 .../clientpositive/describe_syntax.q.out        |     2 -
 .../results/clientpositive/describe_table.q.out |     3 -
 .../display_colstats_tbllvl.q.out               |     8 +-
 .../clientpositive/distinct_windowing.q.out     |     4 +-
 .../distinct_windowing_no_cbo.q.out             |     8 +-
 .../drop_partitions_filter4.q.out               |    71 -
 .../results/clientpositive/druid_basic2.q.out   |   376 +-
 .../clientpositive/druid_intervals.q.out        |   138 +-
 .../clientpositive/druid_timeseries.q.out       |    48 +-
 .../results/clientpositive/druid_topn.q.out     |   328 +-
 .../clientpositive/dynamic_rdd_cache.q.out      |    56 +-
 .../encryption_auto_purge_tables.q.out          |   157 -
 .../encrypted/encryption_drop_partition.q.out   |    37 +-
 .../encrypted/encryption_drop_table.q.out       |    59 +-
 .../encryption_drop_table_in_encrypted_db.q.out |    53 -
 .../encrypted/encryption_move_tbl.q.out         |    50 +-
 .../clientpositive/escape_comments.q.out        |     1 -
 .../results/clientpositive/except_all.q.out     |    16 +-
 .../clientpositive/exim_hidden_files.q.out      |     1 -
 .../clientpositive/filter_cond_pushdown.q.out   |   124 +-
 .../clientpositive/filter_join_breaktask2.q.out |    46 +-
 .../clientpositive/fouter_join_ppr.q.out        |   594 +-
 .../clientpositive/fp_literal_arithmetic.q.out  |   338 -
 .../test/results/clientpositive/groupby12.q.out |     2 +-
 .../test/results/clientpositive/groupby5.q.out  |     2 +-
 .../clientpositive/groupby5_noskew.q.out        |     2 +-
 .../results/clientpositive/groupby7_map.q.out   |     4 +-
 .../groupby7_map_multi_single_reducer.q.out     |     4 +-
 .../clientpositive/groupby7_map_skew.q.out      |     4 +-
 .../clientpositive/groupby7_noskew.q.out        |     4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |     4 +-
 .../test/results/clientpositive/groupby8.q.out  |     8 +-
 .../results/clientpositive/groupby8_map.q.out   |     4 +-
 .../clientpositive/groupby8_map_skew.q.out      |     4 +-
 .../clientpositive/groupby8_noskew.q.out        |     4 +-
 .../test/results/clientpositive/groupby9.q.out  |    28 +-
 .../clientpositive/groupby_cube_multi_gby.q.out |     2 +-
 .../clientpositive/groupby_grouping_id1.q.out   |   120 +-
 .../clientpositive/groupby_grouping_id3.q.out   |   139 -
 .../clientpositive/groupby_grouping_sets1.q.out |   496 +-
 .../clientpositive/groupby_grouping_sets2.q.out |    62 +-
 .../clientpositive/groupby_grouping_sets3.q.out |    41 +-
 .../clientpositive/groupby_grouping_sets5.q.out |    36 +-
 .../clientpositive/groupby_grouping_sets6.q.out |     4 +-
 .../groupby_grouping_sets_grouping.q.out        |   470 +-
 .../groupby_grouping_sets_limit.q.out           |    34 +-
 .../clientpositive/groupby_join_pushdown.q.out  |    26 +-
 .../groupby_multi_single_reducer.q.out          |     2 +-
 .../clientpositive/groupby_position.q.out       |    34 +-
 .../clientpositive/groupby_sort_skew_1_23.q.out |     4 +-
 .../test/results/clientpositive/having2.q.out   |    86 +-
 .../clientpositive/index_auto_unused.q.out      |    38 +-
 .../clientpositive/index_auto_update.q.out      |     4 +-
 .../clientpositive/infer_bucket_sort.q.out      |     6 +-
 .../infer_bucket_sort_grouping_operators.q.out  |    30 +-
 .../infer_bucket_sort_map_operators.q.out       |    53 +-
 .../infer_bucket_sort_reducers_power_two.q.out  |     6 +-
 ql/src/test/results/clientpositive/input8.q.out |     4 +-
 ql/src/test/results/clientpositive/input9.q.out |     4 +-
 .../results/clientpositive/input_part10.q.out   |    24 +-
 .../clientpositive/inputwherefalse.q.out        |    55 -
 .../insert_values_orig_table_use_metadata.q.out |    18 +-
 ql/src/test/results/clientpositive/join25.q.out |   114 +-
 ql/src/test/results/clientpositive/join26.q.out |   418 +-
 ql/src/test/results/clientpositive/join27.q.out |   112 +-
 ql/src/test/results/clientpositive/join30.q.out |    66 +-
 ql/src/test/results/clientpositive/join36.q.out |   114 +-
 ql/src/test/results/clientpositive/join37.q.out |   114 +-
 ql/src/test/results/clientpositive/join38.q.out |   108 +-
 ql/src/test/results/clientpositive/join39.q.out |    86 +-
 ql/src/test/results/clientpositive/join40.q.out |   117 +-
 ql/src/test/results/clientpositive/join45.q.out |    64 +-
 .../clientpositive/join_alt_syntax.q.out        |   118 +-
 .../clientpositive/join_cond_pushdown_1.q.out   |   100 +-
 .../clientpositive/join_cond_pushdown_3.q.out   |   100 +-
 .../join_cond_pushdown_unqual1.q.out            |     2 +-
 .../join_cond_pushdown_unqual3.q.out            |     2 +-
 .../results/clientpositive/join_merging.q.out   |    72 +-
 .../clientpositive/join_on_varchar.q.out        |    46 +-
 .../results/clientpositive/join_reorder.q.out   |   106 +-
 .../results/clientpositive/join_reorder4.q.out  |   327 +-
 .../clientpositive/lateral_view_onview.q.out    |   205 +-
 .../limit_pushdown_negative.q.out               |     4 +-
 .../llap/acid_bucket_pruning.q.out              |     6 +-
 .../clientpositive/llap/autoColumnStats_1.q.out |     1 -
 .../clientpositive/llap/autoColumnStats_2.q.out |     5 +-
 .../llap/auto_smb_mapjoin_14.q.out              |     4 +-
 .../llap/auto_sortmerge_join_9.q.out            |     4 +-
 .../clientpositive/llap/bucket_groupby.q.out    |   426 +-
 .../clientpositive/llap/bucketmapjoin1.q.out    |   212 +-
 .../clientpositive/llap/bucketmapjoin2.q.out    |   156 +-
 .../clientpositive/llap/bucketmapjoin3.q.out    |   104 +-
 .../clientpositive/llap/bucketmapjoin4.q.out    |   104 +-
 .../clientpositive/llap/cbo_rp_lineage2.q.out   |    58 +-
 .../clientpositive/llap/cbo_rp_views.q.out      |     4 +-
 .../llap/cbo_rp_windowing_2.q.out               |    54 +-
 .../results/clientpositive/llap/cbo_views.q.out |     2 +-
 .../results/clientpositive/llap/cluster.q.out   |   690 +-
 ...names_with_leading_and_trailing_spaces.q.out |    20 -
 .../llap/column_table_stats.q.out               |    18 +-
 .../llap/column_table_stats_orc.q.out           |    12 +-
 .../llap/constprog_semijoin.q.out               |    16 +-
 .../llap/correlationoptimizer1.q.out            |    32 +-
 .../results/clientpositive/llap/cte_1.q.out     | 37426 ++++++++---------
 .../clientpositive/llap/deleteAnalyze.q.out     |    16 +-
 .../llap/dynamic_partition_pruning.q.out        |    39 +-
 .../llap/dynamic_partition_pruning_2.q.out      |    72 +-
 .../llap/dynamic_semijoin_reduction.q.out       |  1159 +-
 .../llap/dynamic_semijoin_reduction_2.q.out     |     8 +-
 .../llap/dynamic_semijoin_reduction_3.q.out     |   266 +-
 .../llap/dynamic_semijoin_user_level.q.out      |  1486 -
 .../llap/dynpart_sort_opt_vectorization.q.out   |    50 +-
 .../llap/dynpart_sort_optimization.q.out        |     4 +-
 .../llap/dynpart_sort_optimization2.q.out       |     4 +-
 .../clientpositive/llap/except_distinct.q.out   |     2 +-
 .../clientpositive/llap/explainuser_1.q.out     |  1356 +-
 .../clientpositive/llap/explainuser_2.q.out     |  2302 +-
 .../clientpositive/llap/explainuser_4.q.out     |     6 +-
 .../llap/filter_join_breaktask2.q.out           |    46 +-
 .../llap/groupby_grouping_id2.q.out             |     9 -
 .../llap/hybridgrace_hashjoin_2.q.out           |     4 +-
 .../clientpositive/llap/intersect_all.q.out     |  1976 +-
 .../llap/intersect_distinct.q.out               |   892 +-
 .../clientpositive/llap/is_distinct_from.q.out  |   335 -
 .../clientpositive/llap/jdbc_handler.q.out      |    52 +-
 .../clientpositive/llap/join_filters.q.out      |     8 +-
 .../llap/join_is_not_distinct_from.q.out        |  1673 -
 .../clientpositive/llap/join_nulls.q.out        |     2 +-
 .../clientpositive/llap/lateral_view.q.out      |    12 +-
 .../clientpositive/llap/limit_pushdown.q.out    |     4 +-
 .../clientpositive/llap/limit_pushdown3.q.out   |     4 +-
 .../results/clientpositive/llap/lineage2.q.out  |    58 +-
 .../results/clientpositive/llap/lineage3.q.out  |     4 +-
 .../clientpositive/llap/llap_stats.q.out        |     4 +-
 .../llap/llap_vector_nohybridgrace.q.out        |   356 -
 .../clientpositive/llap/mapjoin_decimal.q.out   |    32 +-
 .../results/clientpositive/llap/mergejoin.q.out |   470 +-
 .../llap/metadata_only_queries.q.out            |    12 +-
 .../metadata_only_queries_with_filters.q.out    |    16 +-
 .../clientpositive/llap/multiMapJoin1.q.out     |     4 +-
 .../clientpositive/llap/multi_column_in.q.out   |     2 +-
 .../llap/multi_count_distinct_null.q.out        |    24 +-
 .../llap/multi_insert_lateral_view.q.out        |   316 +-
 .../llap/offset_limit_ppd_optimizer.q.out       |     4 +-
 .../clientpositive/llap/orc_analyze.q.out       |    32 +-
 .../clientpositive/llap/orc_create.q.out        |     4 -
 .../clientpositive/llap/orc_llap_counters.q.out |    20 +-
 .../llap/orc_llap_counters1.q.out               |    20 +-
 .../clientpositive/llap/orc_merge10.q.out       |     4 +-
 .../clientpositive/llap/orc_merge11.q.out       |   110 +-
 .../clientpositive/llap/orc_merge12.q.out       |     4 +-
 .../clientpositive/llap/orc_ppd_basic.q.out     |    20 +-
 .../clientpositive/llap/orc_ppd_decimal.q.out   |     4 +-
 .../llap/orc_ppd_schema_evol_3a.q.out           |    20 +-
 .../llap/orc_predicate_pushdown.q.out           |    14 +-
 .../clientpositive/llap/parallel_colstats.q.out |  1516 -
 .../llap/parquet_predicate_pushdown.q.out       |    14 +-
 .../clientpositive/llap/parquet_types.q.out     |     2 +-
 .../llap/partition_multilevels.q.out            |   244 +-
 .../test/results/clientpositive/llap/ptf.q.out  |    12 +-
 .../llap/reduce_deduplicate_extended.q.out      |    78 +-
 .../llap/schema_evol_orc_acid_part.q.out        |    91 +-
 .../llap/schema_evol_orc_acid_table.q.out       |    11 -
 .../llap/schema_evol_orc_acidvec_part.q.out     |   700 +-
 .../llap/schema_evol_orc_acidvec_table.q.out    |   686 -
 .../llap/schema_evol_orc_nonvec_part.q.out      |   450 +-
 ...chema_evol_orc_nonvec_part_all_complex.q.out |   150 +-
 ...ema_evol_orc_nonvec_part_all_primitive.q.out |   250 +-
 .../llap/schema_evol_orc_nonvec_table.q.out     |   250 +-
 .../clientpositive/llap/schema_evol_stats.q.out |     2 -
 .../llap/schema_evol_text_nonvec_part.q.out     |   450 +-
 ...hema_evol_text_nonvec_part_all_complex.q.out |   150 +-
 ...ma_evol_text_nonvec_part_all_primitive.q.out |   250 +-
 .../llap/schema_evol_text_nonvec_table.q.out    |   250 +-
 .../clientpositive/llap/semijoin_hint.q.out     |  2646 --
 .../results/clientpositive/llap/skewjoin.q.out  |    98 +-
 .../clientpositive/llap/skiphf_aggr.q.out       |     4 +-
 .../clientpositive/llap/smb_mapjoin_14.q.out    |   207 +-
 .../clientpositive/llap/smb_mapjoin_15.q.out    |   276 +-
 .../clientpositive/llap/smb_mapjoin_17.q.out    |    98 +-
 .../clientpositive/llap/smb_mapjoin_4.q.out     |   889 +-
 .../clientpositive/llap/smb_mapjoin_5.q.out     |   889 +-
 .../clientpositive/llap/smb_mapjoin_6.q.out     |   274 +-
 .../results/clientpositive/llap/stats11.q.out   |   104 +-
 .../clientpositive/llap/stats_noscan_1.q.out    |     1 -
 .../clientpositive/llap/subquery_exists.q.out   |    41 +-
 .../clientpositive/llap/subquery_in.q.out       |   630 +-
 .../clientpositive/llap/subquery_multi.q.out    |  1522 +-
 .../clientpositive/llap/subquery_notin.q.out    |   692 +-
 .../clientpositive/llap/subquery_scalar.q.out   |  2780 +-
 .../clientpositive/llap/subquery_select.q.out   |  5377 +--
 .../llap/table_access_keys_stats.q.out          |     4 +-
 .../llap/tez_dynpart_hashjoin_1.q.out           |     2 +-
 .../clientpositive/llap/tez_join_hash.q.out     |    10 +-
 .../clientpositive/llap/tez_smb_main.q.out      |   426 -
 .../clientpositive/llap/tez_union2.q.out        |   922 +-
 .../llap/tez_union_multiinsert.q.out            |   182 +-
 .../llap/tez_vector_dynpart_hashjoin_1.q.out    |     4 +-
 .../llap/tez_vector_dynpart_hashjoin_2.q.out    |     8 +-
 .../clientpositive/llap/unionDistinct_1.q.out   |  4226 +-
 .../clientpositive/llap/union_fast_stats.q.out  |    12 +-
 .../llap/vector_adaptor_usage_mode.q.out        |     2 +-
 .../llap/vector_aggregate_without_gby.q.out     |    12 +-
 .../llap/vector_auto_smb_mapjoin_14.q.out       |    23 +-
 .../llap/vector_between_columns.q.out           |    26 +-
 .../clientpositive/llap/vector_between_in.q.out |   102 +-
 .../llap/vector_binary_join_groupby.q.out       |   116 +-
 .../clientpositive/llap/vector_bround.q.out     |     4 +-
 .../clientpositive/llap/vector_bucket.q.out     |    25 +-
 .../llap/vector_cast_constant.q.out             |     8 +-
 .../clientpositive/llap/vector_char_2.q.out     |    20 +-
 .../llap/vector_char_mapjoin1.q.out             |    33 +-
 .../llap/vector_char_simple.q.out               |     4 +-
 .../clientpositive/llap/vector_coalesce.q.out   |    20 +-
 .../clientpositive/llap/vector_coalesce_2.q.out |    10 +-
 .../llap/vector_complex_all.q.out               |     6 +-
 .../clientpositive/llap/vector_count.q.out      |    16 +-
 .../llap/vector_count_distinct.q.out            |     9 +-
 .../clientpositive/llap/vector_data_types.q.out |    36 +-
 .../llap/vector_decimal_10_0.q.out              |    12 +-
 .../llap/vector_decimal_aggregate.q.out         |    14 +-
 .../llap/vector_decimal_expressions.q.out       |     4 +-
 .../llap/vector_decimal_mapjoin.q.out           |    30 +-
 .../llap/vector_decimal_precision.q.out         |    52 +-
 .../llap/vector_decimal_round.q.out             |   123 +-
 .../llap/vector_decimal_round_2.q.out           |   210 +-
 .../llap/vector_decimal_udf.q.out               |    44 +-
 .../clientpositive/llap/vector_distinct_2.q.out |     2 +-
 .../llap/vector_empty_where.q.out               |    36 +-
 .../clientpositive/llap/vector_groupby4.q.out   |    11 +-
 .../clientpositive/llap/vector_groupby6.q.out   |    11 +-
 .../clientpositive/llap/vector_groupby_3.q.out  |     8 +-
 .../llap/vector_groupby_cube1.q.out             |   773 -
 .../llap/vector_groupby_grouping_id1.q.out      |   179 -
 .../llap/vector_groupby_grouping_id2.q.out      |   359 -
 .../llap/vector_groupby_grouping_id3.q.out      |   370 -
 .../llap/vector_groupby_grouping_sets1.q.out    |   668 -
 .../llap/vector_groupby_grouping_sets2.q.out    |   469 -
 .../llap/vector_groupby_grouping_sets3.q.out    |   314 -
 .../llap/vector_groupby_grouping_sets4.q.out    |   554 -
 .../llap/vector_groupby_grouping_sets5.q.out    |   371 -
 .../llap/vector_groupby_grouping_sets6.q.out    |   192 -
 .../vector_groupby_grouping_sets_grouping.q.out |  1224 -
 .../vector_groupby_grouping_sets_limit.q.out    |   650 -
 .../llap/vector_groupby_grouping_window.q.out   |   157 -
 .../llap/vector_groupby_mapjoin.q.out           |   142 +-
 .../llap/vector_groupby_reduce.q.out            |    79 +-
 .../llap/vector_groupby_rollup1.q.out           |   610 -
 .../llap/vector_grouping_sets.q.out             |    77 +-
 .../clientpositive/llap/vector_if_expr.q.out    |     9 +-
 .../llap/vector_include_no_sel.q.out            |     8 +-
 .../clientpositive/llap/vector_inner_join.q.out |    18 +-
 .../clientpositive/llap/vector_interval_1.q.out |    72 +-
 .../clientpositive/llap/vector_interval_2.q.out |   104 +-
 .../llap/vector_interval_arithmetic.q.out       |    58 +-
 .../llap/vector_interval_mapjoin.q.out          |    10 +-
 .../clientpositive/llap/vector_join30.q.out     |   251 +-
 .../llap/vector_left_outer_join2.q.out          |     8 +-
 .../llap/vector_leftsemi_mapjoin.q.out          |   838 +-
 .../llap/vector_mapjoin_reduce.q.out            |   167 +-
 .../llap/vector_non_string_partition.q.out      |   144 +-
 .../llap/vector_nullsafe_join.q.out             |    36 +-
 .../llap/vector_number_compare_projection.q.out |    18 +-
 .../clientpositive/llap/vector_order_null.q.out |  1360 -
 .../clientpositive/llap/vector_orderby_5.q.out  |    15 +-
 .../llap/vector_outer_join0.q.out               |     4 +-
 .../llap/vector_outer_join1.q.out               |    17 +-
 .../llap/vector_outer_join2.q.out               |    13 +-
 .../llap/vector_partition_diff_num_cols.q.out   |    35 +-
 .../llap/vector_partitioned_date_time.q.out     |    82 +-
 .../llap/vector_ptf_part_simple.q.out           |  3032 --
 .../clientpositive/llap/vector_reduce1.q.out    |     9 +-
 .../clientpositive/llap/vector_reduce2.q.out    |     9 +-
 .../clientpositive/llap/vector_reduce3.q.out    |     9 +-
 .../llap/vector_reduce_groupby_decimal.q.out    |     6 +-
 .../llap/vector_string_concat.q.out             |    14 +-
 .../llap/vector_tablesample_rows.q.out          |     7 +-
 .../llap/vector_udf_character_length.q.out      |   287 -
 .../llap/vector_udf_octet_length.q.out          |   222 -
 .../llap/vector_varchar_mapjoin1.q.out          |     6 +-
 .../llap/vector_varchar_simple.q.out            |     4 +-
 .../llap/vector_when_case_null.q.out            |     2 +-
 .../clientpositive/llap/vectorization_0.q.out   |   100 +-
 .../clientpositive/llap/vectorization_13.q.out  |    12 +-
 .../clientpositive/llap/vectorization_15.q.out  |    10 +-
 .../clientpositive/llap/vectorization_17.q.out  |     2 +-
 .../clientpositive/llap/vectorization_7.q.out   |     8 +-
 .../clientpositive/llap/vectorization_8.q.out   |     8 +-
 .../llap/vectorization_div0.q.out               |   414 +-
 .../llap/vectorization_limit.q.out              |   469 +-
 .../llap/vectorization_offset_limit.q.out       |     4 +-
 .../llap/vectorization_short_regress.q.out      |   132 +-
 .../llap/vectorized_bucketmapjoin1.q.out        |    29 +-
 .../clientpositive/llap/vectorized_case.q.out   |   254 -
 .../llap/vectorized_date_funcs.q.out            |    14 +-
 .../llap/vectorized_distinct_gby.q.out          |     2 +-
 .../vectorized_dynamic_partition_pruning.q.out  |   518 +-
 .../vectorized_dynamic_semijoin_reduction.q.out |   246 +-
 ...vectorized_dynamic_semijoin_reduction2.q.out |    32 +-
 .../clientpositive/llap/vectorized_join46.q.out |    58 +-
 .../llap/vectorized_mapjoin.q.out               |     2 +-
 .../llap/vectorized_mapjoin2.q.out              |     9 +-
 .../llap/vectorized_parquet.q.out               |   168 +-
 .../llap/vectorized_parquet_types.q.out         |   189 +-
 .../clientpositive/llap/vectorized_ptf.q.out    |  2552 +-
 .../llap/vectorized_shufflejoin.q.out           |     4 +-
 .../llap/vectorized_timestamp.q.out             |    22 +-
 .../llap/vectorized_timestamp_funcs.q.out       |    43 +-
 .../results/clientpositive/llap/windowing.q.out |    54 +-
 .../clientpositive/llap/windowing_gby.q.out     |    42 +-
 .../llap/windowing_windowspec2.q.out            |     4 +-
 .../test/results/clientpositive/llap_text.q.out |     4 +-
 .../clientpositive/louter_join_ppr.q.out        |   331 +-
 .../test/results/clientpositive/mapjoin1.q.out  |   340 +-
 .../clientpositive/mapjoin_distinct.q.out       |   256 +-
 .../results/clientpositive/mapjoin_hook.q.out   |     4 +-
 .../test/results/clientpositive/mergejoin.q.out |    23 +-
 .../results/clientpositive/mergejoins.q.out     |     2 +-
 .../clientpositive/metadata_only_queries.q.out  |    12 +-
 .../metadata_only_queries_with_filters.q.out    |    16 +-
 .../results/clientpositive/msck_repair_0.q.out  |     8 -
 .../clientpositive/multi_insert_gby3.q.out      |     6 +-
 .../clientpositive/multi_insert_mixed.q.out     |     6 +-
 .../clientpositive/multigroupby_singlemr.q.out  |     4 +-
 .../clientpositive/named_column_join.q.out      |   482 -
 .../clientpositive/nested_column_pruning.q.out  |     8 +-
 .../results/clientpositive/null_column.q.out    |     4 +-
 .../optimize_filter_literal.q.out               |   147 -
 .../results/clientpositive/orc_file_dump.q.out  |   120 +-
 .../results/clientpositive/orc_merge10.q.out    |     4 +-
 .../results/clientpositive/orc_merge11.q.out    |   110 +-
 .../results/clientpositive/orc_merge12.q.out    |     4 +-
 .../results/clientpositive/outer_join_ppr.q.out |   289 +-
 .../outer_reference_windowed.q.out              |   847 -
 .../clientpositive/parallel_colstats.q.out      |  1529 -
 .../clientpositive/parquet_decimal.q.out        |    24 +-
 .../parquet_mixed_partition_formats.q.out       |     1 -
 .../clientpositive/parquet_partitioned.q.out    |     1 -
 .../clientpositive/parquet_ppd_multifiles.q.out |    12 +-
 .../results/clientpositive/parquet_serde.q.out  |     1 -
 ..._non_dictionary_encoding_vectorization.q.out |     4 +-
 .../parquet_types_vectorization.q.out           |     4 +-
 .../partitions_filter_default.q.out             |    67 -
 ql/src/test/results/clientpositive/pcs.q.out    |   150 +-
 .../results/clientpositive/perf/query1.q.out    |   260 +-
 .../results/clientpositive/perf/query12.q.out   |     2 +-
 .../results/clientpositive/perf/query13.q.out   |   186 +-
 .../results/clientpositive/perf/query14.q.out   |  2004 +-
 .../results/clientpositive/perf/query15.q.out   |   120 +-
 .../results/clientpositive/perf/query16.q.out   |    60 +-
 .../results/clientpositive/perf/query17.q.out   |   214 +-
 .../results/clientpositive/perf/query18.q.out   |     2 +-
 .../results/clientpositive/perf/query19.q.out   |   190 +-
 .../results/clientpositive/perf/query20.q.out   |    70 +-
 .../results/clientpositive/perf/query21.q.out   |     2 +-
 .../results/clientpositive/perf/query22.q.out   |    98 +-
 .../results/clientpositive/perf/query23.q.out   |   832 +-
 .../results/clientpositive/perf/query25.q.out   |   212 +-
 .../results/clientpositive/perf/query26.q.out   |   126 +-
 .../results/clientpositive/perf/query29.q.out   |   212 +-
 .../results/clientpositive/perf/query3.q.out    |    70 +-
 .../results/clientpositive/perf/query30.q.out   |   353 +-
 .../results/clientpositive/perf/query31.q.out   |   606 +-
 .../results/clientpositive/perf/query32.q.out   |   156 +-
 .../results/clientpositive/perf/query36.q.out   |     4 +-
 .../results/clientpositive/perf/query37.q.out   |     2 +-
 .../results/clientpositive/perf/query38.q.out   |   210 +-
 .../results/clientpositive/perf/query39.q.out   |   196 +-
 .../results/clientpositive/perf/query40.q.out   |     2 +-
 .../results/clientpositive/perf/query42.q.out   |    70 +-
 .../results/clientpositive/perf/query46.q.out   |   126 +-
 .../results/clientpositive/perf/query48.q.out   |   158 +-
 .../results/clientpositive/perf/query5.q.out    |   302 +-
 .../results/clientpositive/perf/query51.q.out   |    84 +-
 .../results/clientpositive/perf/query52.q.out   |    70 +-
 .../results/clientpositive/perf/query54.q.out   |   244 +-
 .../results/clientpositive/perf/query55.q.out   |    70 +-
 .../results/clientpositive/perf/query58.q.out   |   472 +-
 .../results/clientpositive/perf/query6.q.out    |   351 +-
 .../results/clientpositive/perf/query64.q.out   |  1218 +-
 .../results/clientpositive/perf/query65.q.out   |   202 +-
 .../results/clientpositive/perf/query66.q.out   |     2 +-
 .../results/clientpositive/perf/query67.q.out   |     2 +-
 .../results/clientpositive/perf/query68.q.out   |   126 +-
 .../results/clientpositive/perf/query69.q.out   |   204 +-
 .../results/clientpositive/perf/query7.q.out    |   126 +-
 .../results/clientpositive/perf/query70.q.out   |   154 +-
 .../results/clientpositive/perf/query71.q.out   |   202 +-
 .../results/clientpositive/perf/query72.q.out   |   334 +-
 .../results/clientpositive/perf/query75.q.out   |    14 +-
 .../results/clientpositive/perf/query79.q.out   |    98 +-
 .../results/clientpositive/perf/query8.q.out    |   222 +-
 .../results/clientpositive/perf/query80.q.out   |     6 +-
 .../results/clientpositive/perf/query81.q.out   |   355 +-
 .../results/clientpositive/perf/query82.q.out   |     2 +-
 .../results/clientpositive/perf/query83.q.out   |   462 +-
 .../results/clientpositive/perf/query85.q.out   |   266 +-
 .../results/clientpositive/perf/query86.q.out   |     4 +-
 .../results/clientpositive/perf/query87.q.out   |   210 +-
 .../results/clientpositive/perf/query88.q.out   |   224 +-
 .../results/clientpositive/perf/query89.q.out   |    98 +-
 .../results/clientpositive/perf/query9.q.out    |   829 -
 .../results/clientpositive/perf/query91.q.out   |   178 +-
 .../results/clientpositive/perf/query92.q.out   |    42 +-
 .../results/clientpositive/perf/query97.q.out   |    42 +-
 .../results/clientpositive/perf/query98.q.out   |    70 +-
 .../clientpositive/position_alias_test_1.q.out  |   113 +-
 .../clientpositive/ppd_constant_expr.q.out      |     8 +-
 .../test/results/clientpositive/ppd_gby.q.out   |    48 +-
 .../test/results/clientpositive/ppd_gby2.q.out  |    48 +-
 .../test/results/clientpositive/ppd_join2.q.out |   108 +-
 .../test/results/clientpositive/ppd_join3.q.out |   170 +-
 .../clientpositive/ppd_outer_join1.q.out        |    74 +-
 .../results/clientpositive/ppd_windowing1.q.out |    36 +-
 .../clientpositive/primitive_types.q.out        |    10 +-
 .../results/clientpositive/ptfgroupbyjoin.q.out |    40 +-
 .../results/clientpositive/quotedid_stats.q.out |    86 -
 .../reduce_deduplicate_extended2.q.out          |   167 +-
 .../clientpositive/remove_exprs_stats.q.out     |     6 +-
 .../rename_partition_location.q.out             |    23 -
 .../clientpositive/router_join_ppr.q.out        |   319 +-
 .../test/results/clientpositive/row__id.q.out   |    34 +-
 .../clientpositive/select_column_pruning.q.out  |   141 -
 .../test/results/clientpositive/semijoin5.q.out |     2 +-
 .../results/clientpositive/show_functions.q.out |    19 -
 .../test/results/clientpositive/skewjoin.q.out  |   198 +-
 .../results/clientpositive/smb_mapjoin_20.q.out |     6 +-
 .../spark/add_part_multiple.q.out               |    16 +-
 .../spark/auto_smb_mapjoin_14.q.out             |     4 +-
 .../spark/auto_sortmerge_join_9.q.out           |     4 +-
 .../spark/avro_decimal_native.q.out             |    28 +-
 .../spark/bucket_map_join_spark1.q.out          |     8 +-
 .../spark/bucket_map_join_spark2.q.out          |     8 +-
 .../spark/bucket_map_join_spark3.q.out          |     8 +-
 .../clientpositive/spark/bucketmapjoin1.q.out   |   664 +-
 .../clientpositive/spark/bucketmapjoin2.q.out   |   624 +-
 .../clientpositive/spark/bucketmapjoin3.q.out   |   376 +-
 .../clientpositive/spark/bucketmapjoin4.q.out   |   326 +-
 .../clientpositive/spark/bucketmapjoin5.q.out   |    12 +-
 .../spark/bucketmapjoin_negative.q.out          |     2 +-
 .../spark/bucketmapjoin_negative2.q.out         |     2 +-
 .../spark/constprog_semijoin.q.out              |    16 +-
 .../spark/dynamic_rdd_cache.q.out               |    52 +-
 .../spark/filter_join_breaktask2.q.out          |    46 +-
 .../results/clientpositive/spark/groupby5.q.out |     2 +-
 .../clientpositive/spark/groupby5_noskew.q.out  |     2 +-
 .../clientpositive/spark/groupby7_map.q.out     |     4 +-
 .../groupby7_map_multi_single_reducer.q.out     |     4 +-
 .../spark/groupby7_map_skew.q.out               |     4 +-
 .../clientpositive/spark/groupby7_noskew.q.out  |     4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |     4 +-
 .../results/clientpositive/spark/groupby8.q.out |     8 +-
 .../clientpositive/spark/groupby8_map.q.out     |     4 +-
 .../spark/groupby8_map_skew.q.out               |     4 +-
 .../clientpositive/spark/groupby8_noskew.q.out  |     4 +-
 .../results/clientpositive/spark/groupby9.q.out |    28 +-
 .../spark/groupby_grouping_id2.q.out            |     9 -
 .../clientpositive/spark/groupby_position.q.out |    30 +-
 .../spark/infer_bucket_sort_map_operators.q.out |    54 +-
 .../results/clientpositive/spark/join25.q.out   |    56 +-
 .../results/clientpositive/spark/join26.q.out   |   192 +-
 .../results/clientpositive/spark/join27.q.out   |    54 +-
 .../results/clientpositive/spark/join30.q.out   |    56 +-
 .../results/clientpositive/spark/join36.q.out   |    64 +-
 .../results/clientpositive/spark/join37.q.out   |    56 +-
 .../results/clientpositive/spark/join38.q.out   |   106 +-
 .../results/clientpositive/spark/join39.q.out   |    32 +-
 .../clientpositive/spark/join_alt_syntax.q.out  |    98 +-
 .../spark/join_cond_pushdown_1.q.out            |    86 +-
 .../spark/join_cond_pushdown_3.q.out            |    86 +-
 .../spark/join_cond_pushdown_unqual1.q.out      |     2 +-
 .../spark/join_cond_pushdown_unqual3.q.out      |     2 +-
 .../clientpositive/spark/join_merging.q.out     |    80 +-
 .../clientpositive/spark/join_reorder.q.out     |   106 +-
 .../clientpositive/spark/join_reorder4.q.out    |   339 +-
 .../clientpositive/spark/limit_pushdown.q.out   |     4 +-
 .../clientpositive/spark/louter_join_ppr.q.out  |   331 +-
 .../results/clientpositive/spark/mapjoin1.q.out |   284 +-
 .../clientpositive/spark/mapjoin_decimal.q.out  |    32 +-
 .../clientpositive/spark/mapjoin_distinct.q.out |   192 +-
 .../clientpositive/spark/mergejoins.q.out       |     2 +-
 .../spark/metadata_only_queries.q.out           |    12 +-
 .../metadata_only_queries_with_filters.q.out    |    16 +-
 .../spark/multi_insert_gby3.q.out               |     4 +-
 .../spark/multi_insert_lateral_view.q.out       |   316 +-
 .../spark/multi_insert_mixed.q.out              |     4 +-
 .../spark/multigroupby_singlemr.q.out           |     4 +-
 .../clientpositive/spark/outer_join_ppr.q.out   |   289 +-
 .../clientpositive/spark/ppd_join2.q.out        |   160 +-
 .../clientpositive/spark/ppd_join3.q.out        |   110 +-
 .../clientpositive/spark/ppd_outer_join1.q.out  |    74 +-
 .../test/results/clientpositive/spark/ptf.q.out |    12 +-
 .../clientpositive/spark/router_join_ppr.q.out  |   319 +-
 .../results/clientpositive/spark/skewjoin.q.out |   198 +-
 .../clientpositive/spark/smb_mapjoin_14.q.out   |   822 +-
 .../clientpositive/spark/smb_mapjoin_15.q.out   |   566 +-
 .../clientpositive/spark/smb_mapjoin_17.q.out   |   168 +-
 .../clientpositive/spark/smb_mapjoin_20.q.out   |     6 +-
 .../clientpositive/spark/smb_mapjoin_4.q.out    |  1093 +-
 .../clientpositive/spark/smb_mapjoin_5.q.out    |  1093 +-
 .../clientpositive/spark/smb_mapjoin_6.q.out    |   360 +-
 .../spark/spark_explainuser_1.q.out             |  5921 ---
 .../results/clientpositive/spark/stats10.q.out  |     1 -
 .../results/clientpositive/spark/stats12.q.out  |     1 -
 .../results/clientpositive/spark/stats13.q.out  |     2 -
 .../results/clientpositive/spark/stats14.q.out  |     2 -
 .../results/clientpositive/spark/stats15.q.out  |     2 -
 .../results/clientpositive/spark/stats2.q.out   |     2 -
 .../results/clientpositive/spark/stats3.q.out   |     1 -
 .../results/clientpositive/spark/stats6.q.out   |     1 -
 .../results/clientpositive/spark/stats7.q.out   |     1 -
 .../results/clientpositive/spark/stats8.q.out   |     2 -
 .../clientpositive/spark/stats_noscan_1.q.out   |     1 -
 .../clientpositive/spark/subquery_exists.q.out  |    37 +-
 .../clientpositive/spark/subquery_in.q.out      |   623 +-
 .../spark/table_access_keys_stats.q.out         |     4 +-
 .../results/clientpositive/spark/union17.q.out  |    40 +-
 .../results/clientpositive/spark/union19.q.out  |    20 +-
 .../clientpositive/spark/union_remove_15.q.out  |     1 -
 .../clientpositive/spark/union_remove_16.q.out  |     1 -
 .../clientpositive/spark/union_remove_17.q.out  |     1 -
 .../clientpositive/spark/union_remove_18.q.out  |     1 -
 .../clientpositive/spark/union_remove_19.q.out  |     4 +-
 .../spark/vector_between_in.q.out               |   122 +-
 .../spark/vector_cast_constant.q.out            |     8 +-
 .../spark/vector_count_distinct.q.out           |    14 +-
 .../spark/vector_data_types.q.out               |    36 +-
 .../spark/vector_decimal_aggregate.q.out        |    19 +-
 .../spark/vector_decimal_mapjoin.q.out          |    28 +-
 .../spark/vector_distinct_2.q.out               |     7 +-
 .../clientpositive/spark/vector_groupby_3.q.out |    13 +-
 .../spark/vector_mapjoin_reduce.q.out           |   157 +-
 .../clientpositive/spark/vector_orderby_5.q.out |    20 +-
 .../spark/vector_outer_join1.q.out              |    49 +-
 .../spark/vector_outer_join2.q.out              |    25 +-
 .../spark/vector_string_concat.q.out            |    16 +-
 .../clientpositive/spark/vectorization_0.q.out  |    99 +-
 .../clientpositive/spark/vectorization_13.q.out |    12 +-
 .../clientpositive/spark/vectorization_15.q.out |    10 +-
 .../clientpositive/spark/vectorization_17.q.out |     2 +-
 .../spark/vectorization_div0.q.out              |     8 +-
 .../spark/vectorization_short_regress.q.out     |   132 +-
 .../clientpositive/spark/vectorized_case.q.out  |   250 -
 .../clientpositive/spark/vectorized_ptf.q.out   |  2571 +-
 .../spark/vectorized_shufflejoin.q.out          |    18 +-
 .../spark/vectorized_timestamp_funcs.q.out      |    43 +-
 .../clientpositive/spark/windowing.q.out        |    54 +-
 .../test/results/clientpositive/stats10.q.out   |     1 -
 .../test/results/clientpositive/stats12.q.out   |     1 -
 .../test/results/clientpositive/stats13.q.out   |     2 -
 .../test/results/clientpositive/stats14.q.out   |     2 -
 .../test/results/clientpositive/stats15.q.out   |     2 -
 ql/src/test/results/clientpositive/stats2.q.out |     2 -
 ql/src/test/results/clientpositive/stats3.q.out |     1 -
 ql/src/test/results/clientpositive/stats4.q.out |     2 -
 ql/src/test/results/clientpositive/stats6.q.out |     1 -
 ql/src/test/results/clientpositive/stats7.q.out |     1 -
 ql/src/test/results/clientpositive/stats8.q.out |     2 -
 .../results/clientpositive/stats_noscan_1.q.out |     1 -
 .../test/results/clientpositive/structin.q.out  |     1 -
 .../subq_where_serialization.q.out              |   112 +-
 .../clientpositive/subquery_exists.q.out        |    43 +-
 .../clientpositive/subquery_in_having.q.out     |    81 +-
 .../clientpositive/subquery_notexists.q.out     |    20 +-
 .../subquery_notexists_having.q.out             |    24 +-
 .../clientpositive/subquery_notin_having.q.out  |   130 +-
 .../subquery_unqualcolumnrefs.q.out             |    76 +-
 .../temp_table_display_colstats_tbllvl.q.out    |    11 +-
 .../temp_table_windowing_expressions.q.out      |     4 +-
 .../clientpositive/tez/explainanalyze_2.q.out   |   230 +-
 .../clientpositive/tez/explainanalyze_3.q.out   |   127 +-
 .../clientpositive/tez/explainuser_3.q.out      |   150 +-
 .../clientpositive/tez/orc_merge12.q.out        |     4 +-
 .../clientpositive/tez/vector_aggregate_9.q.out |     4 +
 .../tez/vector_auto_smb_mapjoin_14.q.out        |    15 +-
 .../tez/vector_between_columns.q.out            |    18 +-
 .../clientpositive/tez/vector_between_in.q.out  |   102 +-
 .../tez/vector_binary_join_groupby.q.out        |    13 +-
 .../tez/vector_cast_constant.q.out              |     4 +
 .../clientpositive/tez/vector_char_2.q.out      |    12 +-
 .../tez/vector_char_mapjoin1.q.out              |    33 +-
 .../clientpositive/tez/vector_char_simple.q.out |     4 +-
 .../clientpositive/tez/vector_coalesce.q.out    |    20 +-
 .../clientpositive/tez/vector_coalesce_2.q.out  |     2 +-
 .../tez/vector_join_part_col_char.q.out         |    18 +-
 .../tez/vector_non_string_partition.q.out       |     8 +-
 .../clientpositive/tez/vectorization_div0.q.out |     8 +-
 .../tez/vectorization_limit.q.out               |    69 +-
 .../results/clientpositive/tez_join_hash.q.out  |     5 -
 .../results/clientpositive/tunable_ndv.q.out    |   220 -
 .../udaf_binarysetfunctions.q.out               |   464 -
 .../test/results/clientpositive/udaf_corr.q.out |    13 +-
 .../clientpositive/udaf_covar_samp.q.out        |    16 +-
 ql/src/test/results/clientpositive/udf1.q.out   |    32 +-
 ql/src/test/results/clientpositive/udf3.q.out   |    20 +-
 .../results/clientpositive/udf_between.q.out    |     4 +-
 .../clientpositive/udf_character_length.q.out   |   269 -
 .../results/clientpositive/udf_length.q.out     |     2 +-
 .../results/clientpositive/udf_likeall.q.out    |   187 -
 .../results/clientpositive/udf_likeany.q.out    |   187 -
 .../clientpositive/udf_octet_length.q.out       |   221 -
 .../clientpositive/udf_round_2_auto_stats.q.out |    55 -
 .../test/results/clientpositive/udf_trunc.q.out |   371 +-
 .../clientpositive/udf_width_bucket.q.out       |   680 -
 .../clientpositive/unicode_comments.q.out       |     1 -
 .../test/results/clientpositive/union17.q.out   |    40 +-
 .../test/results/clientpositive/union19.q.out   |    20 +-
 .../clientpositive/union_remove_15.q.out        |     1 -
 .../clientpositive/union_remove_16.q.out        |     1 -
 .../clientpositive/union_remove_17.q.out        |     1 -
 .../clientpositive/union_remove_18.q.out        |     1 -
 .../clientpositive/union_remove_19.q.out        |     4 +-
 .../clientpositive/updateAccessTime.q.out       |    16 -
 .../test/results/clientpositive/varchar_1.q.out |     4 +-
 .../clientpositive/vector_between_columns.q.out |     8 +-
 .../vector_binary_join_groupby.q.out            |   131 +-
 .../results/clientpositive/vector_bucket.q.out  |    21 +-
 .../clientpositive/vector_cast_constant.q.out   |    25 +-
 .../results/clientpositive/vector_char_2.q.out  |    50 +-
 .../clientpositive/vector_char_mapjoin1.q.out   |    12 +-
 .../clientpositive/vector_char_simple.q.out     |     4 +-
 .../clientpositive/vector_coalesce.q.out        |    20 +-
 .../clientpositive/vector_coalesce_2.q.out      |     8 +-
 .../results/clientpositive/vector_count.q.out   |    16 +-
 .../clientpositive/vector_data_types.q.out      |    36 +-
 .../clientpositive/vector_decimal_10_0.q.out    |    12 +-
 .../vector_decimal_aggregate.q.out              |    16 +-
 .../vector_decimal_expressions.q.out            |     4 +-
 .../clientpositive/vector_decimal_mapjoin.q.out |    28 +-
 .../vector_decimal_precision.q.out              |    52 +-
 .../clientpositive/vector_decimal_round.q.out   |   111 +-
 .../clientpositive/vector_decimal_round_2.q.out |   190 +-
 .../clientpositive/vector_distinct_2.q.out      |     4 +-
 .../clientpositive/vector_empty_where.q.out     |    16 +-
 .../clientpositive/vector_groupby4.q.out        |    21 +-
 .../clientpositive/vector_groupby6.q.out        |    21 +-
 .../clientpositive/vector_groupby_3.q.out       |    10 +-
 .../clientpositive/vector_groupby_mapjoin.q.out |   165 +-
 .../clientpositive/vector_groupby_reduce.q.out  |   128 +-
 .../clientpositive/vector_grouping_sets.q.out   |    51 +-
 .../results/clientpositive/vector_if_expr.q.out |     4 +-
 .../clientpositive/vector_include_no_sel.q.out  |     4 +-
 .../clientpositive/vector_interval_1.q.out      |    32 +-
 .../vector_interval_arithmetic.q.out            |    28 +-
 .../vector_interval_mapjoin.q.out               |     6 +-
 .../clientpositive/vector_mapjoin_reduce.q.out  |   112 +-
 .../vector_mr_diff_schema_alias.q.out           |    18 +-
 .../vector_non_constant_in_expr.q.out           |     4 +-
 .../vector_non_string_partition.q.out           |     8 +-
 .../clientpositive/vector_order_null.q.out      |  1096 -
 .../clientpositive/vector_orderby_5.q.out       |    27 +-
 .../clientpositive/vector_outer_join1.q.out     |     4 +-
 .../clientpositive/vector_outer_join2.q.out     |     4 +-
 .../clientpositive/vector_outer_join3.q.out     |     6 +-
 .../clientpositive/vector_outer_join4.q.out     |     6 +-
 .../clientpositive/vector_outer_join6.q.out     |     4 +-
 .../results/clientpositive/vector_reduce1.q.out |     4 +-
 .../results/clientpositive/vector_reduce2.q.out |     4 +-
 .../results/clientpositive/vector_reduce3.q.out |     4 +-
 .../vector_reduce_groupby_decimal.q.out         |    21 +-
 .../clientpositive/vector_string_concat.q.out   |    29 +-
 .../clientpositive/vector_string_decimal.q.out  |    23 +-
 .../vector_tablesample_rows.q.out               |    17 +-
 .../vector_udf_character_length.q.out           |   315 -
 .../vector_udf_octet_length.q.out               |   254 -
 .../clientpositive/vector_varchar_simple.q.out  |     4 +-
 .../clientpositive/vector_when_case_null.q.out  |     4 +-
 .../clientpositive/vectorization_13.q.out       |    46 +-
 .../clientpositive/vectorization_14.q.out       |     9 +-
 .../clientpositive/vectorization_15.q.out       |    19 +-
 .../clientpositive/vectorization_7.q.out        |     8 +-
 .../clientpositive/vectorization_8.q.out        |     8 +-
 .../clientpositive/vectorization_div0.q.out     |     8 +-
 .../clientpositive/vectorization_limit.q.out    |    79 +-
 .../vectorization_offset_limit.q.out            |     4 +-
 .../clientpositive/vectorized_case.q.out        |   222 -
 .../clientpositive/vectorized_date_funcs.q.out  |    21 +-
 .../clientpositive/vectorized_mapjoin2.q.out    |     4 +-
 .../vectorized_parquet_types.q.out              |    23 +-
 .../clientpositive/vectorized_shufflejoin.q.out |    23 +-
 .../clientpositive/vectorized_timestamp.q.out   |     4 +-
 .../vectorized_timestamp_funcs.q.out            |    20 +-
 .../clientpositive/windowing_distinct.q.out     |    16 +-
 .../clientpositive/windowing_expressions.q.out  |     4 +-
 .../results/clientpositive/windowing_gby2.q.out |    34 +-
 .../windowing_multipartitioning.q.out           |    12 +-
 .../clientpositive/windowing_navfn.q.out        |    12 +-
 .../clientpositive/windowing_ntile.q.out        |     8 +-
 .../clientpositive/windowing_order_null.q.out   |     4 +-
 .../windowing_range_multiorder.q.out            |     4 +-
 .../results/clientpositive/windowing_rank.q.out |    56 +-
 .../clientpositive/windowing_streaming.q.out    |     4 +-
 .../results/clientpositive/windowing_udaf.q.out |     4 +-
 .../clientpositive/windowing_windowspec.q.out   |   112 +-
 .../zero_rows_single_insert.q.out               |   113 -
 serde/pom.xml                                   |     2 +-
 .../hive/serde2/ColumnProjectionUtils.java      |     6 +-
 .../apache/hadoop/hive/serde2/SerDeUtils.java   |     2 +-
 .../apache/hadoop/hive/serde2/WriteBuffers.java |    25 +-
 .../hive/serde2/avro/AvroDeserializer.java      |     2 +-
 .../hadoop/hive/serde2/avro/AvroSerDe.java      |     6 +-
 .../fast/BinarySortableDeserializeRead.java     |    39 +-
 .../lazy/fast/LazySimpleDeserializeRead.java    |     4 +-
 .../hive/serde2/lazy/fast/StringToDouble.java   |    21 +-
 .../hive/serde2/lazybinary/LazyBinaryUtils.java |     2 +-
 .../objectinspector/ObjectInspectorFactory.java |    11 +-
 .../StandardStructObjectInspector.java          |     2 +-
 .../hive/serde2/typeinfo/TypeInfoUtils.java     |     2 +-
 .../hive/serde2/TestColumnProjectionUtils.java  |    14 -
 .../binarysortable/TestBinarySortableFast.java  |    13 +-
 service-rpc/pom.xml                             |     2 +-
 service/pom.xml                                 |   100 +-
 .../org/apache/hive/tmpl/QueryProfileTmpl.jamon |    10 +-
 .../hive/service/cli/JobProgressUpdate.java     |    17 -
 .../cli/ProgressMonitorStatusMapper.java        |    17 -
 .../cli/TezProgressMonitorStatusMapper.java     |    17 -
 .../cli/operation/HiveCommandOperation.java     |     8 +-
 .../cli/operation/LogDivertAppender.java        |   249 +
 .../hive/service/cli/operation/Operation.java   |    73 +-
 .../service/cli/operation/OperationManager.java |    32 +-
 .../service/cli/operation/SQLOperation.java     |    40 +-
 .../service/cli/session/HiveSessionImpl.java    |    46 +-
 .../service/cli/session/SessionManager.java     |     4 +-
 .../cli/thrift/ThriftHttpCLIService.java        |    33 +-
 .../hive-webapps/hiveserver2/hiveserver2.jsp    |     8 +-
 .../hive/service/server/TestHS2HttpServer.java  |     9 +-
 shims/0.23/pom.xml                              |     6 +-
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |    16 +-
 shims/aggregator/pom.xml                        |     2 +-
 shims/common/pom.xml                            |     7 +-
 .../org/apache/hadoop/hive/io/HdfsUtils.java    |     4 +-
 .../hive/io/HiveIOExceptionHandlerChain.java    |     2 +-
 .../hive/io/HiveIOExceptionHandlerUtil.java     |     4 +-
 .../apache/hadoop/hive/shims/ShimLoader.java    |     2 +-
 shims/pom.xml                                   |     2 +-
 shims/scheduler/pom.xml                         |     8 +-
 spark-client/pom.xml                            |     6 +-
 .../hive/spark/client/SparkClientUtilities.java |     3 +-
 .../org/apache/hive/spark/client/rpc/Rpc.java   |     3 +-
 .../apache/hive/spark/client/rpc/RpcServer.java |     2 +-
 storage-api/LICENSE                             |   203 -
 storage-api/NOTICE                              |     6 -
 storage-api/pom.xml                             |     4 +-
 .../hive/common/type/FastHiveDecimalImpl.java   |   143 +-
 .../hadoop/hive/common/type/RandomTypeUtil.java |    10 +-
 .../hive/ql/exec/vector/VectorizedRowBatch.java |    42 -
 .../hadoop/hive/ql/util/JavaDataModel.java      |    26 +-
 .../org/apache/hive/common/util/Murmur3.java    |    98 +-
 .../ql/exec/vector/TestStructColumnVector.java  |     3 +-
 .../apache/hive/common/util/TestMurmur3.java    |    24 -
 testutils/pom.xml                               |     2 +-
 .../ptest2/conf/cloudhost.properties.example    |    19 +-
 .../hive/testutils/jdbc/HiveBurnInClient.java   |     4 +-
 vector-code-gen/pom.xml                         |     2 +-
 1976 files changed, 77004 insertions(+), 172268 deletions(-)
----------------------------------------------------------------------



[07/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/PrintSummary.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/PrintSummary.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/PrintSummary.java
index 5bb6bf1..6311335 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/PrintSummary.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/PrintSummary.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hadoop.hive.ql.exec.tez.monitoring;
 
 import org.apache.hadoop.hive.ql.session.SessionState;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/QueryExecutionBreakdownSummary.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/QueryExecutionBreakdownSummary.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/QueryExecutionBreakdownSummary.java
index 271e3c6..1625ce1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/QueryExecutionBreakdownSummary.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/QueryExecutionBreakdownSummary.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hadoop.hive.ql.exec.tez.monitoring;
 
 import org.apache.hadoop.hive.ql.log.PerfLogger;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/RenderStrategy.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/RenderStrategy.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/RenderStrategy.java
index 3aebbe1..2535b10 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/RenderStrategy.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/RenderStrategy.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hadoop.hive.ql.exec.tez.monitoring;
 
 import org.apache.hadoop.hive.common.log.InPlaceUpdate;
@@ -96,9 +79,7 @@ class RenderStrategy {
               perfLogger.PerfLogBegin(TezJobMonitor.CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s);
             }
 
-            if (!perfLogger.endTimeHasMethod(PerfLogger.TEZ_RUN_VERTEX + s)) {
-              perfLogger.PerfLogEnd(TezJobMonitor.CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s);
-            }
+            perfLogger.PerfLogEnd(TezJobMonitor.CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s);
           }
           if (complete < total && (complete > 0 || running > 0 || failed > 0)) {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezProgressMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezProgressMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezProgressMonitor.java
index 9739ad7..3475fc2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezProgressMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezProgressMonitor.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hadoop.hive.ql.exec.tez.monitoring;
 
 import org.apache.hadoop.hive.common.log.ProgressMonitor;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationBufferBatch.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationBufferBatch.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationBufferBatch.java
index 84128e8..630046d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationBufferBatch.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationBufferBatch.java
@@ -57,7 +57,7 @@ public class VectorAggregationBufferBatch {
   /**
    * Memory consumed by a set of aggregation buffers
    */
-  private long aggregatorsFixedSize;
+  private int aggregatorsFixedSize;
 
   /**
    * Array of indexes for aggregators that have variable size
@@ -76,7 +76,7 @@ public class VectorAggregationBufferBatch {
    * Returns the fixed size consumed by the aggregation buffers
    * @return
    */
-  public long getAggregatorsFixedSize() {
+  public int getAggregatorsFixedSize() {
     return aggregatorsFixedSize;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnSetInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnSetInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnSetInfo.java
index 7ac4f07..935b47b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnSetInfo.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnSetInfo.java
@@ -20,8 +20,10 @@ package org.apache.hadoop.hive.ql.exec.vector;
 
 import java.util.Arrays;
 
-import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector.Type;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 
 /**
  * Class to keep information on a set of typed vector columns.  Used by
@@ -62,87 +64,147 @@ public class VectorColumnSetInfo {
    */
   protected int[] intervalDayTimeIndices;
 
-  final protected int keyCount;
-  private int addKeyIndex;
+  /**
+   * Helper class for looking up a key value based on key index.
+   */
+  public class KeyLookupHelper {
+    public int longIndex;
+    public int doubleIndex;
+    public int stringIndex;
+    public int decimalIndex;
+    public int timestampIndex;
+    public int intervalDayTimeIndex;
+
+    private static final int INDEX_UNUSED = -1;
+
+    private void resetIndices() {
+        this.longIndex = this.doubleIndex = this.stringIndex = this.decimalIndex =
+            timestampIndex = intervalDayTimeIndex = INDEX_UNUSED;
+    }
+    public void setLong(int index) {
+      resetIndices();
+      this.longIndex= index;
+    }
+
+    public void setDouble(int index) {
+      resetIndices();
+      this.doubleIndex = index;
+    }
+
+    public void setString(int index) {
+      resetIndices();
+      this.stringIndex = index;
+    }
+
+    public void setDecimal(int index) {
+      resetIndices();
+      this.decimalIndex = index;
+    }
+
+    public void setTimestamp(int index) {
+      resetIndices();
+      this.timestampIndex= index;
+    }
+
+    public void setIntervalDayTime(int index) {
+      resetIndices();
+      this.intervalDayTimeIndex= index;
+    }
+  }
+
+  /**
+   * Lookup vector to map from key index to primitive type index.
+   */
+  protected KeyLookupHelper[] indexLookup;
 
-  private int addLongIndex;
-  private int addDoubleIndex;
-  private int addStringIndex;
-  private int addDecimalIndex;
-  private int addTimestampIndex;
-  private int addIntervalDayTimeIndex;
+  private int keyCount;
+  private int addIndex;
 
-  // Given the keyIndex these arrays return:
-  //   The ColumnVector.Type,
-  //   The type specific index into longIndices, doubleIndices, etc...
-  protected ColumnVector.Type[] columnVectorTypes;
-  protected int[] columnTypeSpecificIndices;
+  protected int longIndicesIndex;
+  protected int doubleIndicesIndex;
+  protected int stringIndicesIndex;
+  protected int decimalIndicesIndex;
+  protected int timestampIndicesIndex;
+  protected int intervalDayTimeIndicesIndex;
 
   protected VectorColumnSetInfo(int keyCount) {
     this.keyCount = keyCount;
-    this.addKeyIndex = 0;
+    this.addIndex = 0;
 
     // We'll over allocate and then shrink the array for each type
     longIndices = new int[this.keyCount];
-    addLongIndex = 0;
+    longIndicesIndex = 0;
     doubleIndices = new int[this.keyCount];
-    addDoubleIndex  = 0;
+    doubleIndicesIndex  = 0;
     stringIndices = new int[this.keyCount];
-    addStringIndex = 0;
+    stringIndicesIndex = 0;
     decimalIndices = new int[this.keyCount];
-    addDecimalIndex = 0;
+    decimalIndicesIndex = 0;
     timestampIndices = new int[this.keyCount];
-    addTimestampIndex = 0;
+    timestampIndicesIndex = 0;
     intervalDayTimeIndices = new int[this.keyCount];
-    addIntervalDayTimeIndex = 0;
-
-    columnVectorTypes = new ColumnVector.Type[this.keyCount];
-    columnTypeSpecificIndices = new int[this.keyCount];
+    intervalDayTimeIndicesIndex = 0;
+    indexLookup = new KeyLookupHelper[this.keyCount];
   }
 
+  protected void addKey(String outputType) throws HiveException {
+    indexLookup[addIndex] = new KeyLookupHelper();
+
+    String typeName = VectorizationContext.mapTypeNameSynonyms(outputType);
 
-  protected void addKey(ColumnVector.Type columnVectorType) throws HiveException {
+    TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName);
+    Type columnVectorType = VectorizationContext.getColumnVectorTypeFromTypeInfo(typeInfo);
 
     switch (columnVectorType) {
     case LONG:
-      longIndices[addLongIndex] = addKeyIndex;
-      columnTypeSpecificIndices[addKeyIndex] = addLongIndex++;
+      longIndices[longIndicesIndex] = addIndex;
+      indexLookup[addIndex].setLong(longIndicesIndex);
+      ++longIndicesIndex;
       break;
+
     case DOUBLE:
-      doubleIndices[addDoubleIndex] = addKeyIndex;
-      columnTypeSpecificIndices[addKeyIndex] = addDoubleIndex++;
+      doubleIndices[doubleIndicesIndex] = addIndex;
+      indexLookup[addIndex].setDouble(doubleIndicesIndex);
+      ++doubleIndicesIndex;
       break;
+
     case BYTES:
-      stringIndices[addStringIndex]= addKeyIndex;
-      columnTypeSpecificIndices[addKeyIndex] = addStringIndex++;
+      stringIndices[stringIndicesIndex]= addIndex;
+      indexLookup[addIndex].setString(stringIndicesIndex);
+      ++stringIndicesIndex;
       break;
+
     case DECIMAL:
-      decimalIndices[addDecimalIndex]= addKeyIndex;
-      columnTypeSpecificIndices[addKeyIndex] = addDecimalIndex++;
-        break;
+      decimalIndices[decimalIndicesIndex]= addIndex;
+      indexLookup[addIndex].setDecimal(decimalIndicesIndex);
+      ++decimalIndicesIndex;
+      break;
+
     case TIMESTAMP:
-      timestampIndices[addTimestampIndex] = addKeyIndex;
-      columnTypeSpecificIndices[addKeyIndex] = addTimestampIndex++;
+      timestampIndices[timestampIndicesIndex] = addIndex;
+      indexLookup[addIndex].setTimestamp(timestampIndicesIndex);
+      ++timestampIndicesIndex;
       break;
+
     case INTERVAL_DAY_TIME:
-      intervalDayTimeIndices[addIntervalDayTimeIndex] = addKeyIndex;
-      columnTypeSpecificIndices[addKeyIndex] = addIntervalDayTimeIndex++;
+      intervalDayTimeIndices[intervalDayTimeIndicesIndex] = addIndex;
+      indexLookup[addIndex].setIntervalDayTime(intervalDayTimeIndicesIndex);
+      ++intervalDayTimeIndicesIndex;
       break;
+
     default:
       throw new HiveException("Unexpected column vector type " + columnVectorType);
     }
 
-    columnVectorTypes[addKeyIndex] = columnVectorType;
-    addKeyIndex++;
+    addIndex++;
   }
 
-
-  protected void finishAdding() throws HiveException {
-    longIndices = Arrays.copyOf(longIndices, addLongIndex);
-    doubleIndices = Arrays.copyOf(doubleIndices, addDoubleIndex);
-    stringIndices = Arrays.copyOf(stringIndices, addStringIndex);
-    decimalIndices = Arrays.copyOf(decimalIndices, addDecimalIndex);
-    timestampIndices = Arrays.copyOf(timestampIndices, addTimestampIndex);
-    intervalDayTimeIndices = Arrays.copyOf(intervalDayTimeIndices, addIntervalDayTimeIndex);
+  protected void finishAdding() {
+    longIndices = Arrays.copyOf(longIndices, longIndicesIndex);
+    doubleIndices = Arrays.copyOf(doubleIndices, doubleIndicesIndex);
+    stringIndices = Arrays.copyOf(stringIndices, stringIndicesIndex);
+    decimalIndices = Arrays.copyOf(decimalIndices, decimalIndicesIndex);
+    timestampIndices = Arrays.copyOf(timestampIndices, timestampIndicesIndex);
+    intervalDayTimeIndices = Arrays.copyOf(intervalDayTimeIndices, intervalDayTimeIndicesIndex);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractRow.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractRow.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractRow.java
index defaf90..94eaf56 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractRow.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractRow.java
@@ -150,25 +150,6 @@ public class VectorExtractRow {
   }
 
   /*
-   * Initialize using an ObjectInspector array and a column projection array.
-   */
-  public void init(TypeInfo[] typeInfos, int[] projectedColumns)
-      throws HiveException {
-
-    final int count = typeInfos.length;
-    allocateArrays(count);
-
-    for (int i = 0; i < count; i++) {
-
-      int projectionColumnNum = projectedColumns[i];
-
-      TypeInfo typeInfo = typeInfos[i];
-
-      initEntry(i, projectionColumnNum, typeInfo);
-    }
-  }
-
-  /*
    * Initialize using data type names.
    * No projection -- the column range 0 .. types.size()-1
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
index 30916a0..fef7c2a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
@@ -22,21 +22,16 @@ import java.lang.management.ManagementFactory;
 import java.lang.management.MemoryMXBean;
 import java.lang.ref.SoftReference;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.exec.KeyWrapper;
 import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.ConstantVectorExpression;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.IdentityExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;
@@ -57,8 +52,6 @@ import org.apache.hadoop.io.DataOutputBuffer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javolution.util.FastBitSet;
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
@@ -117,24 +110,6 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
 
   private transient VectorAssignRow vectorAssignRow;
 
-  /*
-   * Grouping sets members.
-   */
-  private transient boolean groupingSetsPresent;
-
-  // The field bits (i.e. which fields to include) or "id" for each grouping set.
-  private transient int[] groupingSets;
-
-  // The position in the column keys of the dummy grouping set id column.
-  private transient int groupingSetsPosition;
-
-  // The planner puts a constant field in for the dummy grouping set id.  We will overwrite it
-  // as we process the grouping sets.
-  private transient ConstantVectorExpression groupingSetsDummyVectorExpression;
-
-  // We translate the grouping set bit field into a boolean arrays.
-  private transient boolean[][] allGroupingSetsOverrideIsNulls;
-
   private transient int numEntriesHashTable;
 
   private transient long maxHashTblMemory;
@@ -169,32 +144,6 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
       // Do nothing.
     }
 
-    protected abstract void doProcessBatch(VectorizedRowBatch batch, boolean isFirstGroupingSet,
-        boolean[] currentGroupingSetsOverrideIsNulls) throws HiveException;
-
-    @Override
-    public void processBatch(VectorizedRowBatch batch) throws HiveException {
-
-      if (!groupingSetsPresent) {
-        doProcessBatch(batch, false, null);
-        return;
-      }
-
-      // We drive the doProcessBatch logic with the same batch but different
-      // grouping set id and null variation.
-      // PERFORMANCE NOTE: We do not try to reuse columns and generate the KeyWrappers anew...
-
-      final int size = groupingSets.length;
-      for (int i = 0; i < size; i++) {
-
-        // NOTE: We are overwriting the constant vector value...
-        groupingSetsDummyVectorExpression.setLongValue(groupingSets[i]);
-        groupingSetsDummyVectorExpression.evaluate(batch);
-
-        doProcessBatch(batch, (i == 0), allGroupingSetsOverrideIsNulls[i]);
-      }
-    }
-
     /**
      * Evaluates the aggregators on the current batch.
      * The aggregationBatchInfo must have been prepared
@@ -258,8 +207,7 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
     }
 
     @Override
-    public void doProcessBatch(VectorizedRowBatch batch, boolean isFirstGroupingSet,
-        boolean[] currentGroupingSetsOverrideIsNulls) throws HiveException {
+    public void processBatch(VectorizedRowBatch batch) throws HiveException {
       for (int i = 0; i < aggregators.length; ++i) {
         aggregators[i].aggregateInput(aggregationBuffers.getAggregationBuffer(i), batch);
       }
@@ -286,7 +234,7 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
     /**
      * Total per hashtable entry fixed memory (does not depend on key/agg values).
      */
-    private long fixedHashEntrySize;
+    private int fixedHashEntrySize;
 
     /**
      * Average per hashtable entry variable size memory (depends on key/agg value).
@@ -374,32 +322,17 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
             HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL.defaultIntVal;
       }
 
-      sumBatchSize = 0;
-
       mapKeysAggregationBuffers = new HashMap<KeyWrapper, VectorAggregationBufferRow>();
       computeMemoryLimits();
       LOG.debug("using hash aggregation processing mode");
     }
 
     @Override
-    public void doProcessBatch(VectorizedRowBatch batch, boolean isFirstGroupingSet,
-        boolean[] currentGroupingSetsOverrideIsNulls) throws HiveException {
-
-      if (!groupingSetsPresent || isFirstGroupingSet) {
-
-        // Evaluate the key expressions once.
-        for(int i = 0; i < keyExpressions.length; ++i) {
-          keyExpressions[i].evaluate(batch);
-        }
-      }
+    public void processBatch(VectorizedRowBatch batch) throws HiveException {
 
       // First we traverse the batch to evaluate and prepare the KeyWrappers
       // After this the KeyWrappers are properly set and hash code is computed
-      if (!groupingSetsPresent) {
-        keyWrappersBatch.evaluateBatch(batch);
-      } else {
-        keyWrappersBatch.evaluateBatchGroupingSets(batch, currentGroupingSetsOverrideIsNulls);
-      }
+      keyWrappersBatch.evaluateBatch(batch);
 
       // Next we locate the aggregation buffer set for each key
       prepareBatchAggregationBufferSets(batch);
@@ -458,18 +391,10 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
       // to bump its internal version.
       aggregationBatchInfo.startBatch();
 
-      if (batch.size == 0) {
-        return;
-      }
-
       // We now have to probe the global hash and find-or-allocate
       // the aggregation buffers to use for each key present in the batch
       VectorHashKeyWrapper[] keyWrappers = keyWrappersBatch.getVectorHashKeyWrappers();
-
-      final int n = keyExpressions.length == 0 ? 1 : batch.size;
-      // note - the row mapping is not relevant when aggregationBatchInfo::getDistinctBufferSetCount() == 1
-
-      for (int i=0; i < n; ++i) {
+      for (int i=0; i < batch.size; ++i) {
         VectorHashKeyWrapper kw = keyWrappers[i];
         VectorAggregationBufferRow aggregationBuffer = mapKeysAggregationBuffers.get(kw);
         if (null == aggregationBuffer) {
@@ -682,24 +607,10 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
     }
 
     @Override
-    public void doProcessBatch(VectorizedRowBatch batch, boolean isFirstGroupingSet,
-        boolean[] currentGroupingSetsOverrideIsNulls) throws HiveException {
-
-      if (!groupingSetsPresent || isFirstGroupingSet) {
-
-        // Evaluate the key expressions once.
-        for(int i = 0; i < keyExpressions.length; ++i) {
-          keyExpressions[i].evaluate(batch);
-        }
-      }
-
+    public void processBatch(VectorizedRowBatch batch) throws HiveException {
       // First we traverse the batch to evaluate and prepare the KeyWrappers
       // After this the KeyWrappers are properly set and hash code is computed
-      if (!groupingSetsPresent) {
-        keyWrappersBatch.evaluateBatch(batch);
-      } else {
-        keyWrappersBatch.evaluateBatchGroupingSets(batch, currentGroupingSetsOverrideIsNulls);
-      }
+      keyWrappersBatch.evaluateBatch(batch);
 
       VectorHashKeyWrapper[] batchKeys = keyWrappersBatch.getVectorHashKeyWrappers();
 
@@ -791,10 +702,7 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
     @Override
     public void initialize(Configuration hconf) throws HiveException {
       inGroup = false;
-
-      // We do not include the dummy grouping set column in the output.  So we pass outputKeyLength
-      // instead of keyExpressions.length
-      groupKeyHelper = new VectorGroupKeyHelper(outputKeyLength);
+      groupKeyHelper = new VectorGroupKeyHelper(keyExpressions.length);
       groupKeyHelper.init(keyExpressions);
       groupAggregators = allocateAggregationBuffer();
       buffer = new DataOutputBuffer();
@@ -817,18 +725,11 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
     }
 
     @Override
-    public void doProcessBatch(VectorizedRowBatch batch, boolean isFirstGroupingSet,
-        boolean[] currentGroupingSetsOverrideIsNulls) throws HiveException {
+    public void processBatch(VectorizedRowBatch batch) throws HiveException {
       assert(inGroup);
       if (first) {
         // Copy the group key to output batch now.  We'll copy in the aggregates at the end of the group.
         first = false;
-
-        // Evaluate the key expressions of just this first batch to get the correct key.
-        for (int i = 0; i < outputKeyLength; i++) {
-          keyExpressions[i].evaluate(batch);
-        }
-
         groupKeyHelper.copyGroupKey(batch, outputBatch, buffer);
       }
 
@@ -877,49 +778,6 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
     super(ctx);
   }
 
-  private void setupGroupingSets() {
-
-    groupingSetsPresent = conf.isGroupingSetsPresent();
-    if (!groupingSetsPresent) {
-      groupingSets = null;
-      groupingSetsPosition = -1;
-      groupingSetsDummyVectorExpression = null;
-      allGroupingSetsOverrideIsNulls = null;
-      return;
-    }
-
-    groupingSets = ArrayUtils.toPrimitive(conf.getListGroupingSets().toArray(new Integer[0]));
-    groupingSetsPosition = conf.getGroupingSetPosition();
-
-    allGroupingSetsOverrideIsNulls = new boolean[groupingSets.length][];
-
-    int pos = 0;
-    for (int groupingSet: groupingSets) {
-
-      // Create the mapping corresponding to the grouping set
-
-      // Assume all columns are null, except the dummy column is always non-null.
-      boolean[] groupingSetsOverrideIsNull = new boolean[keyExpressions.length];
-      Arrays.fill(groupingSetsOverrideIsNull, true);
-      groupingSetsOverrideIsNull[groupingSetsPosition] = false;
-
-      // Add keys of this grouping set.
-      FastBitSet bitset = GroupByOperator.groupingSet2BitSet(groupingSet, groupingSetsPosition);
-      for (int keyPos = bitset.nextClearBit(0); keyPos < groupingSetsPosition;
-        keyPos = bitset.nextClearBit(keyPos+1)) {
-        groupingSetsOverrideIsNull[keyPos] = false;
-      }
-
-      allGroupingSetsOverrideIsNulls[pos] =  groupingSetsOverrideIsNull;
-      pos++;
-    }
-
-    // The last key column is the dummy grouping set id.
-    //
-    // Figure out which (scratch) column was used so we can overwrite the dummy id.
-
-    groupingSetsDummyVectorExpression = (ConstantVectorExpression) keyExpressions[groupingSetsPosition];
-  }
 
   @Override
   protected void initializeOp(Configuration hconf) throws HiveException {
@@ -973,19 +831,15 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
 
     forwardCache = new Object[outputKeyLength + aggregators.length];
 
-    setupGroupingSets();
-
     switch (vectorDesc.getProcessingMode()) {
     case GLOBAL:
       Preconditions.checkState(outputKeyLength == 0);
-      Preconditions.checkState(!groupingSetsPresent);
       processingMode = this.new ProcessingModeGlobalAggregate();
       break;
     case HASH:
       processingMode = this.new ProcessingModeHashAggregate();
       break;
     case MERGE_PARTIAL:
-      Preconditions.checkState(!groupingSetsPresent);
       processingMode = this.new ProcessingModeReduceMergePartial();
       break;
     case STREAMING:

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
index 64706ad..50d0452 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
@@ -19,12 +19,8 @@
 package org.apache.hadoop.hive.ql.exec.vector;
 
 import java.io.IOException;
-
-import org.apache.hadoop.hive.ql.exec.vector.ColumnVector.Type;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.io.DataOutputBuffer;
 
 /**
@@ -32,25 +28,14 @@ import org.apache.hadoop.io.DataOutputBuffer;
  */
 public class VectorGroupKeyHelper extends VectorColumnSetInfo {
 
-  private int[] outputColumnNums;
-
   public VectorGroupKeyHelper(int keyCount) {
     super(keyCount);
    }
 
   void init(VectorExpression[] keyExpressions) throws HiveException {
-
-    // NOTE: To support pruning the grouping set id dummy key by VectorGroupbyOpeator MERGE_PARTIAL
-    // case, we use the keyCount passed to the constructor and not keyExpressions.length.
-
-    // Inspect the output type of each key expression.  And, remember the output columns.
-    outputColumnNums = new int[keyCount];
-    for(int i=0; i < keyCount; ++i) {
-      String typeName = VectorizationContext.mapTypeNameSynonyms(keyExpressions[i].getOutputType());
-      TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName);
-      Type columnVectorType = VectorizationContext.getColumnVectorTypeFromTypeInfo(typeInfo);
-      addKey(columnVectorType);
-      outputColumnNums[i] = keyExpressions[i].getOutputColumn();
+    // Inspect the output type of each key expression.
+    for(int i=0; i < keyExpressions.length; ++i) {
+      addKey(keyExpressions[i].getOutputType());
     }
     finishAdding();
   }
@@ -65,9 +50,9 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
   public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outputBatch,
           DataOutputBuffer buffer) throws HiveException {
     for(int i = 0; i< longIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[longIndices[i]];
-      LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[columnIndex];
-      LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[columnIndex];
+      int keyIndex = longIndices[i];
+      LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[keyIndex];
+      LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[keyIndex];
 
       // This vectorized code pattern says: 
       //    If the input batch has no nulls at all (noNulls is true) OR
@@ -91,9 +76,9 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<doubleIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[doubleIndices[i]];
-      DoubleColumnVector inputColumnVector = (DoubleColumnVector) inputBatch.cols[columnIndex];
-      DoubleColumnVector outputColumnVector = (DoubleColumnVector) outputBatch.cols[columnIndex];
+      int keyIndex = doubleIndices[i];
+      DoubleColumnVector inputColumnVector = (DoubleColumnVector) inputBatch.cols[keyIndex];
+      DoubleColumnVector outputColumnVector = (DoubleColumnVector) outputBatch.cols[keyIndex];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
         outputColumnVector.vector[outputBatch.size] = inputColumnVector.vector[0];
       } else {
@@ -102,9 +87,9 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<stringIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[stringIndices[i]];
-      BytesColumnVector inputColumnVector = (BytesColumnVector) inputBatch.cols[columnIndex];
-      BytesColumnVector outputColumnVector = (BytesColumnVector) outputBatch.cols[columnIndex];
+      int keyIndex = stringIndices[i];
+      BytesColumnVector inputColumnVector = (BytesColumnVector) inputBatch.cols[keyIndex];
+      BytesColumnVector outputColumnVector = (BytesColumnVector) outputBatch.cols[keyIndex];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
         // Copy bytes into scratch buffer.
         int start = buffer.getLength();
@@ -121,9 +106,9 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<decimalIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[decimalIndices[i]];
-      DecimalColumnVector inputColumnVector = (DecimalColumnVector) inputBatch.cols[columnIndex];
-      DecimalColumnVector outputColumnVector = (DecimalColumnVector) outputBatch.cols[columnIndex];
+      int keyIndex = decimalIndices[i];
+      DecimalColumnVector inputColumnVector = (DecimalColumnVector) inputBatch.cols[keyIndex];
+      DecimalColumnVector outputColumnVector = (DecimalColumnVector) outputBatch.cols[keyIndex];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
 
         // Since we store references to HiveDecimalWritable instances, we must use the update method instead
@@ -135,9 +120,9 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<timestampIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[timestampIndices[i]];
-      TimestampColumnVector inputColumnVector = (TimestampColumnVector) inputBatch.cols[columnIndex];
-      TimestampColumnVector outputColumnVector = (TimestampColumnVector) outputBatch.cols[columnIndex];
+      int keyIndex = timestampIndices[i];
+      TimestampColumnVector inputColumnVector = (TimestampColumnVector) inputBatch.cols[keyIndex];
+      TimestampColumnVector outputColumnVector = (TimestampColumnVector) outputBatch.cols[keyIndex];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
 
         outputColumnVector.setElement(outputBatch.size, 0, inputColumnVector);
@@ -147,9 +132,9 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<intervalDayTimeIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[intervalDayTimeIndices[i]];
-      IntervalDayTimeColumnVector inputColumnVector = (IntervalDayTimeColumnVector) inputBatch.cols[columnIndex];
-      IntervalDayTimeColumnVector outputColumnVector = (IntervalDayTimeColumnVector) outputBatch.cols[columnIndex];
+      int keyIndex = intervalDayTimeIndices[i];
+      IntervalDayTimeColumnVector inputColumnVector = (IntervalDayTimeColumnVector) inputBatch.cols[keyIndex];
+      IntervalDayTimeColumnVector outputColumnVector = (IntervalDayTimeColumnVector) outputBatch.cols[keyIndex];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
 
         outputColumnVector.setElement(outputBatch.size, 0, inputColumnVector);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java
index 3e1fcdd..5de59b1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector;
 
-import org.apache.hive.common.util.Murmur3;
-
 import java.sql.Timestamp;
 import java.util.Arrays;
 
@@ -32,8 +30,6 @@ import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 
-import com.google.common.base.Preconditions;
-
 /**
  * A hash map key wrapper for vectorized processing.
  * It stores the key values as primitives in arrays for each supported primitive type.
@@ -43,17 +39,6 @@ import com.google.common.base.Preconditions;
  */
 public class VectorHashKeyWrapper extends KeyWrapper {
 
-  public static final class HashContext {
-    private final Murmur3.IncrementalHash32 bytesHash = new Murmur3.IncrementalHash32();
-
-    public static Murmur3.IncrementalHash32 getBytesHash(HashContext ctx) {
-      if (ctx == null) {
-        return new Murmur3.IncrementalHash32();
-      }
-      return ctx.bytesHash;
-    }
-  }
-
   private static final int[] EMPTY_INT_ARRAY = new int[0];
   private static final long[] EMPTY_LONG_ARRAY = new long[0];
   private static final double[] EMPTY_DOUBLE_ARRAY = new double[0];
@@ -74,25 +59,15 @@ public class VectorHashKeyWrapper extends KeyWrapper {
   private HiveDecimalWritable[] decimalValues;
 
   private Timestamp[] timestampValues;
-  private static Timestamp ZERO_TIMESTAMP = new Timestamp(0);
 
   private HiveIntervalDayTime[] intervalDayTimeValues;
-  private static HiveIntervalDayTime ZERO_INTERVALDAYTIME= new HiveIntervalDayTime(0, 0);
 
-  // NOTE: The null array is indexed by keyIndex, which is not available internally.  The mapping
-  //       from a long, double, etc index to key index is kept once in the separate
-  //       VectorColumnSetInfo object.
   private boolean[] isNull;
-
   private int hashcode;
 
-  private HashContext hashCtx;
-
-  private VectorHashKeyWrapper(HashContext ctx, int longValuesCount, int doubleValuesCount,
+  private VectorHashKeyWrapper(int longValuesCount, int doubleValuesCount,
           int byteValuesCount, int decimalValuesCount, int timestampValuesCount,
-          int intervalDayTimeValuesCount,
-          int keyCount) {
-    hashCtx = ctx;
+          int intervalDayTimeValuesCount) {
     longValues = longValuesCount > 0 ? new long[longValuesCount] : EMPTY_LONG_ARRAY;
     doubleValues = doubleValuesCount > 0 ? new double[doubleValuesCount] : EMPTY_DOUBLE_ARRAY;
     decimalValues = decimalValuesCount > 0 ? new HiveDecimalWritable[decimalValuesCount] : EMPTY_DECIMAL_ARRAY;
@@ -116,23 +91,23 @@ public class VectorHashKeyWrapper extends KeyWrapper {
     for(int i = 0; i < intervalDayTimeValuesCount; ++i) {
       intervalDayTimeValues[i] = new HiveIntervalDayTime();
     }
-    isNull = new boolean[keyCount];
+    isNull = new boolean[longValuesCount + doubleValuesCount + byteValuesCount +
+                         decimalValuesCount + timestampValuesCount + intervalDayTimeValuesCount];
     hashcode = 0;
   }
 
   private VectorHashKeyWrapper() {
   }
 
-  public static VectorHashKeyWrapper allocate(HashContext ctx, int longValuesCount, int doubleValuesCount,
+  public static VectorHashKeyWrapper allocate(int longValuesCount, int doubleValuesCount,
       int byteValuesCount, int decimalValuesCount, int timestampValuesCount,
-      int intervalDayTimeValuesCount, int keyCount) {
+      int intervalDayTimeValuesCount) {
     if ((longValuesCount + doubleValuesCount + byteValuesCount + decimalValuesCount
         + timestampValuesCount + intervalDayTimeValuesCount) == 0) {
       return EMPTY_KEY_WRAPPER;
     }
-    return new VectorHashKeyWrapper(ctx, longValuesCount, doubleValuesCount, byteValuesCount,
-        decimalValuesCount, timestampValuesCount, intervalDayTimeValuesCount,
-        keyCount);
+    return new VectorHashKeyWrapper(longValuesCount, doubleValuesCount, byteValuesCount,
+        decimalValuesCount, timestampValuesCount, intervalDayTimeValuesCount);
   }
 
   @Override
@@ -142,44 +117,45 @@ public class VectorHashKeyWrapper extends KeyWrapper {
 
   @Override
   public void setHashKey() {
-    // compute locally and assign
-    int hash = Arrays.hashCode(longValues) ^
+    hashcode = Arrays.hashCode(longValues) ^
         Arrays.hashCode(doubleValues) ^
         Arrays.hashCode(isNull);
 
     for (int i = 0; i < decimalValues.length; i++) {
       // Use the new faster hash code since we are hashing memory objects.
-      hash ^= decimalValues[i].newFasterHashCode();
+      hashcode ^= decimalValues[i].newFasterHashCode();
     }
 
     for (int i = 0; i < timestampValues.length; i++) {
-      hash ^= timestampValues[i].hashCode();
+      hashcode ^= timestampValues[i].hashCode();
     }
 
     for (int i = 0; i < intervalDayTimeValues.length; i++) {
-      hash ^= intervalDayTimeValues[i].hashCode();
+      hashcode ^= intervalDayTimeValues[i].hashCode();
     }
 
     // This code, with branches and all, is not executed if there are no string keys
-    Murmur3.IncrementalHash32 bytesHash = null;
     for (int i = 0; i < byteValues.length; ++i) {
       /*
        *  Hashing the string is potentially expensive so is better to branch.
        *  Additionally not looking at values for nulls allows us not reset the values.
        */
-      if (byteLengths[i] == -1) {
-        continue;
-      }
-      if (bytesHash == null) {
-        bytesHash = HashContext.getBytesHash(hashCtx);
-        bytesHash.start(hash);
+      if (!isNull[longValues.length + doubleValues.length + i]) {
+        byte[] bytes = byteValues[i];
+        int start = byteStarts[i];
+        int length = byteLengths[i];
+        if (length == bytes.length && start == 0) {
+          hashcode ^= Arrays.hashCode(bytes);
+        }
+        else {
+          // Unfortunately there is no Arrays.hashCode(byte[], start, length)
+          for(int j = start; j < start + length; ++j) {
+            // use 461 as is a (sexy!) prime.
+            hashcode ^= 461 * bytes[j];
+          }
+        }
       }
-      bytesHash.add(byteValues[i], byteStarts[i], byteLengths[i]);
     }
-    if (bytesHash != null) {
-      hash = bytesHash.end();
-    }
-    this.hashcode = hash;
   }
 
   @Override
@@ -191,7 +167,6 @@ public class VectorHashKeyWrapper extends KeyWrapper {
   public boolean equals(Object that) {
     if (that instanceof VectorHashKeyWrapper) {
       VectorHashKeyWrapper keyThat = (VectorHashKeyWrapper)that;
-      // not comparing hashCtx - irrelevant
       return hashcode == keyThat.hashcode &&
           Arrays.equals(longValues, keyThat.longValues) &&
           Arrays.equals(doubleValues, keyThat.doubleValues) &&
@@ -209,7 +184,7 @@ public class VectorHashKeyWrapper extends KeyWrapper {
     //By the time we enter here the byteValues.lentgh and isNull must have already been compared
     for (int i = 0; i < byteValues.length; ++i) {
       // the byte comparison is potentially expensive so is better to branch on null
-      if (byteLengths[i] != -1) {
+      if (!isNull[longValues.length + doubleValues.length + i]) {
         if (!StringExpr.equal(
             byteValues[i],
             byteStarts[i],
@@ -232,7 +207,6 @@ public class VectorHashKeyWrapper extends KeyWrapper {
   }
 
   public void duplicateTo(VectorHashKeyWrapper clone) {
-    clone.hashCtx = hashCtx;
     clone.longValues = (longValues.length > 0) ? longValues.clone() : EMPTY_LONG_ARRAY;
     clone.doubleValues = (doubleValues.length > 0) ? doubleValues.clone() : EMPTY_DOUBLE_ARRAY;
     clone.isNull = isNull.clone();
@@ -254,7 +228,7 @@ public class VectorHashKeyWrapper extends KeyWrapper {
       for (int i = 0; i < byteValues.length; ++i) {
         // avoid allocation/copy of nulls, because it potentially expensive.
         // branch instead.
-        if (byteLengths[i] != -1) {
+        if (!isNull[longValues.length + doubleValues.length + i]) {
           clone.byteValues[i] = Arrays.copyOfRange(byteValues[i],
               byteStarts[i], byteStarts[i] + byteLengths[i]);
         }
@@ -300,141 +274,106 @@ public class VectorHashKeyWrapper extends KeyWrapper {
     throw new UnsupportedOperationException();
   }
 
-  public void assignLong(int index, long v) {
-    longValues[index] = v;
+  public void assignDouble(int index, double d) {
+    doubleValues[index] = d;
+    isNull[longValues.length + index] = false;
   }
 
-  public void assignNullLong(int keyIndex, int index) {
-    isNull[keyIndex] = true;
-    longValues[index] = 0; // assign 0 to simplify hashcode
+  public void assignNullDouble(int index) {
+    doubleValues[index] = 0; // assign 0 to simplify hashcode
+    isNull[longValues.length + index] = true;
   }
 
-  public void assignDouble(int index, double d) {
-    doubleValues[index] = d;
+  public void assignLong(int index, long v) {
+    longValues[index] = v;
+    isNull[index] = false;
   }
 
-  public void assignNullDouble(int keyIndex, int index) {
-    isNull[keyIndex] = true;
-    doubleValues[index] = 0; // assign 0 to simplify hashcode
+  public void assignNullLong(int index) {
+    longValues[index] = 0; // assign 0 to simplify hashcode
+    isNull[index] = true;
   }
 
   public void assignString(int index, byte[] bytes, int start, int length) {
-    Preconditions.checkState(bytes != null);
     byteValues[index] = bytes;
     byteStarts[index] = start;
     byteLengths[index] = length;
+    isNull[longValues.length + doubleValues.length + index] = false;
   }
 
-  public void assignNullString(int keyIndex, int index) {
-    isNull[keyIndex] = true;
-    byteValues[index] = null;
-    byteStarts[index] = 0;
-    // We need some value that indicates NULL.
-    byteLengths[index] = -1;
+  public void assignNullString(int index) {
+    // We do not assign the value to byteValues[] because the value is never used on null
+    isNull[longValues.length + doubleValues.length + index] = true;
   }
 
   public void assignDecimal(int index, HiveDecimalWritable value) {
     decimalValues[index].set(value);
+    isNull[longValues.length + doubleValues.length + byteValues.length + index] = false;
   }
 
-  public void assignNullDecimal(int keyIndex, int index) {
-    isNull[keyIndex] = true;
-    decimalValues[index].set(HiveDecimal.ZERO); // assign 0 to simplify hashcode
+  public void assignNullDecimal(int index) {
+      isNull[longValues.length + doubleValues.length + byteValues.length + index] = true;
   }
 
   public void assignTimestamp(int index, Timestamp value) {
     timestampValues[index] = value;
+    isNull[longValues.length + doubleValues.length + byteValues.length +
+           decimalValues.length + index] = false;
   }
 
   public void assignTimestamp(int index, TimestampColumnVector colVector, int elementNum) {
     colVector.timestampUpdate(timestampValues[index], elementNum);
+    isNull[longValues.length + doubleValues.length + byteValues.length +
+           decimalValues.length + index] = false;
   }
 
-  public void assignNullTimestamp(int keyIndex, int index) {
-    isNull[keyIndex] = true;
-    timestampValues[index] = ZERO_TIMESTAMP; // assign 0 to simplify hashcode
+  public void assignNullTimestamp(int index) {
+      isNull[longValues.length + doubleValues.length + byteValues.length +
+             decimalValues.length + index] = true;
   }
 
   public void assignIntervalDayTime(int index, HiveIntervalDayTime value) {
     intervalDayTimeValues[index].set(value);
+    isNull[longValues.length + doubleValues.length + byteValues.length +
+           decimalValues.length + timestampValues.length + index] = false;
   }
 
   public void assignIntervalDayTime(int index, IntervalDayTimeColumnVector colVector, int elementNum) {
     intervalDayTimeValues[index].set(colVector.asScratchIntervalDayTime(elementNum));
+    isNull[longValues.length + doubleValues.length + byteValues.length +
+           decimalValues.length + timestampValues.length + index] = false;
   }
 
-  public void assignNullIntervalDayTime(int keyIndex, int index) {
-    isNull[keyIndex] = true;
-    intervalDayTimeValues[index] = ZERO_INTERVALDAYTIME; // assign 0 to simplify hashcode
+  public void assignNullIntervalDayTime(int index) {
+      isNull[longValues.length + doubleValues.length + byteValues.length +
+             decimalValues.length + timestampValues.length + index] = true;
   }
 
   @Override
   public String toString()
   {
-    StringBuilder sb = new StringBuilder();
-    boolean isFirst = true;
-    if (longValues.length > 0) {
-      isFirst = false;
-      sb.append("longs ");
-      sb.append(Arrays.toString(longValues));
-    }
-    if (doubleValues.length > 0) {
-      if (isFirst) {
-        isFirst = false;
-      } else {
-        sb.append(", ");
-      }
-      sb.append("doubles ");
-      sb.append(Arrays.toString(doubleValues));
-    }
-    if (byteValues.length > 0) {
-      if (isFirst) {
-        isFirst = false;
-      } else {
-        sb.append(", ");
-      }
-      sb.append("byte lengths ");
-      sb.append(Arrays.toString(byteLengths));
-    }
-    if (decimalValues.length > 0) {
-      if (isFirst) {
-        isFirst = false;
-      } else {
-        sb.append(", ");
-      }
-      sb.append("decimals ");
-      sb.append(Arrays.toString(decimalValues));
-    }
-    if (timestampValues.length > 0) {
-      if (isFirst) {
-        isFirst = false;
-      } else {
-        sb.append(", ");
-      }
-      sb.append("timestamps ");
-      sb.append(Arrays.toString(timestampValues));
-    }
-    if (intervalDayTimeValues.length > 0) {
-      if (isFirst) {
-        isFirst = false;
-      } else {
-        sb.append(", ");
-      }
-      sb.append("interval day times ");
-      sb.append(Arrays.toString(intervalDayTimeValues));
-    }
+    return String.format("%d[%s] %d[%s] %d[%s] %d[%s] %d[%s] %d[%s]",
+        longValues.length, Arrays.toString(longValues),
+        doubleValues.length, Arrays.toString(doubleValues),
+        byteValues.length, Arrays.toString(byteValues),
+        decimalValues.length, Arrays.toString(decimalValues),
+        timestampValues.length, Arrays.toString(timestampValues),
+        intervalDayTimeValues.length, Arrays.toString(intervalDayTimeValues));
+  }
 
-    if (isFirst) {
-      isFirst = false;
-    } else {
-      sb.append(", ");
-    }
-    sb.append("nulls ");
-    sb.append(Arrays.toString(isNull));
+  public boolean getIsLongNull(int i) {
+    return isNull[i];
+  }
 
-    return sb.toString();
+  public boolean getIsDoubleNull(int i) {
+    return isNull[longValues.length + i];
   }
 
+  public boolean getIsBytesNull(int i) {
+    return isNull[longValues.length + doubleValues.length + i];
+  }
+
+
   public long getLongValue(int i) {
     return longValues[i];
   }
@@ -464,29 +403,35 @@ public class VectorHashKeyWrapper extends KeyWrapper {
     return variableSize;
   }
 
+  public boolean getIsDecimalNull(int i) {
+    return isNull[longValues.length + doubleValues.length + byteValues.length + i];
+  }
+
   public HiveDecimalWritable getDecimal(int i) {
     return decimalValues[i];
   }
 
-  public Timestamp getTimestamp(int i) {
-    return timestampValues[i];
+  public boolean getIsTimestampNull(int i) {
+    return isNull[longValues.length + doubleValues.length + byteValues.length +
+                  decimalValues.length + i];
   }
 
-  public HiveIntervalDayTime getIntervalDayTime(int i) {
-    return intervalDayTimeValues[i];
+  public Timestamp getTimestamp(int i) {
+    return timestampValues[i];
   }
 
-  public void clearIsNull() {
-    Arrays.fill(isNull, false);
+  public boolean getIsIntervalDayTimeNull(int i) {
+    return isNull[longValues.length + doubleValues.length + byteValues.length +
+                  decimalValues.length + timestampValues.length + i];
   }
 
-  public boolean isNull(int keyIndex) {
-    return isNull[keyIndex];
+  public HiveIntervalDayTime getIntervalDayTime(int i) {
+    return intervalDayTimeValues[i];
   }
 
   public static final class EmptyVectorHashKeyWrapper extends VectorHashKeyWrapper {
     private EmptyVectorHashKeyWrapper() {
-      super(null, 0, 0, 0, 0, 0, 0, /* keyCount */ 0);
+      super(0, 0, 0, 0, 0, 0);
       // no need to override assigns - all assign ops will fail due to 0 size
     }
 
@@ -506,3 +451,4 @@ public class VectorHashKeyWrapper extends KeyWrapper {
     }
   }
 }
+


[23/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 4e3b2af..df49615 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size839;
-            ::apache::thrift::protocol::TType _etype842;
-            xfer += iprot->readListBegin(_etype842, _size839);
-            this->success.resize(_size839);
-            uint32_t _i843;
-            for (_i843 = 0; _i843 < _size839; ++_i843)
+            uint32_t _size840;
+            ::apache::thrift::protocol::TType _etype843;
+            xfer += iprot->readListBegin(_etype843, _size840);
+            this->success.resize(_size840);
+            uint32_t _i844;
+            for (_i844 = 0; _i844 < _size840; ++_i844)
             {
-              xfer += iprot->readString(this->success[_i843]);
+              xfer += iprot->readString(this->success[_i844]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter844;
-      for (_iter844 = this->success.begin(); _iter844 != this->success.end(); ++_iter844)
+      std::vector<std::string> ::const_iterator _iter845;
+      for (_iter845 = this->success.begin(); _iter845 != this->success.end(); ++_iter845)
       {
-        xfer += oprot->writeString((*_iter844));
+        xfer += oprot->writeString((*_iter845));
       }
       xfer += oprot->writeListEnd();
     }
@@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size845;
-            ::apache::thrift::protocol::TType _etype848;
-            xfer += iprot->readListBegin(_etype848, _size845);
-            (*(this->success)).resize(_size845);
-            uint32_t _i849;
-            for (_i849 = 0; _i849 < _size845; ++_i849)
+            uint32_t _size846;
+            ::apache::thrift::protocol::TType _etype849;
+            xfer += iprot->readListBegin(_etype849, _size846);
+            (*(this->success)).resize(_size846);
+            uint32_t _i850;
+            for (_i850 = 0; _i850 < _size846; ++_i850)
             {
-              xfer += iprot->readString((*(this->success))[_i849]);
+              xfer += iprot->readString((*(this->success))[_i850]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size850;
-            ::apache::thrift::protocol::TType _etype853;
-            xfer += iprot->readListBegin(_etype853, _size850);
-            this->success.resize(_size850);
-            uint32_t _i854;
-            for (_i854 = 0; _i854 < _size850; ++_i854)
+            uint32_t _size851;
+            ::apache::thrift::protocol::TType _etype854;
+            xfer += iprot->readListBegin(_etype854, _size851);
+            this->success.resize(_size851);
+            uint32_t _i855;
+            for (_i855 = 0; _i855 < _size851; ++_i855)
             {
-              xfer += iprot->readString(this->success[_i854]);
+              xfer += iprot->readString(this->success[_i855]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter855;
-      for (_iter855 = this->success.begin(); _iter855 != this->success.end(); ++_iter855)
+      std::vector<std::string> ::const_iterator _iter856;
+      for (_iter856 = this->success.begin(); _iter856 != this->success.end(); ++_iter856)
       {
-        xfer += oprot->writeString((*_iter855));
+        xfer += oprot->writeString((*_iter856));
       }
       xfer += oprot->writeListEnd();
     }
@@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size856;
-            ::apache::thrift::protocol::TType _etype859;
-            xfer += iprot->readListBegin(_etype859, _size856);
-            (*(this->success)).resize(_size856);
-            uint32_t _i860;
-            for (_i860 = 0; _i860 < _size856; ++_i860)
+            uint32_t _size857;
+            ::apache::thrift::protocol::TType _etype860;
+            xfer += iprot->readListBegin(_etype860, _size857);
+            (*(this->success)).resize(_size857);
+            uint32_t _i861;
+            for (_i861 = 0; _i861 < _size857; ++_i861)
             {
-              xfer += iprot->readString((*(this->success))[_i860]);
+              xfer += iprot->readString((*(this->success))[_i861]);
             }
             xfer += iprot->readListEnd();
           }
@@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->success.clear();
-            uint32_t _size861;
-            ::apache::thrift::protocol::TType _ktype862;
-            ::apache::thrift::protocol::TType _vtype863;
-            xfer += iprot->readMapBegin(_ktype862, _vtype863, _size861);
-            uint32_t _i865;
-            for (_i865 = 0; _i865 < _size861; ++_i865)
+            uint32_t _size862;
+            ::apache::thrift::protocol::TType _ktype863;
+            ::apache::thrift::protocol::TType _vtype864;
+            xfer += iprot->readMapBegin(_ktype863, _vtype864, _size862);
+            uint32_t _i866;
+            for (_i866 = 0; _i866 < _size862; ++_i866)
             {
-              std::string _key866;
-              xfer += iprot->readString(_key866);
-              Type& _val867 = this->success[_key866];
-              xfer += _val867.read(iprot);
+              std::string _key867;
+              xfer += iprot->readString(_key867);
+              Type& _val868 = this->success[_key867];
+              xfer += _val868.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
     {
       xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::map<std::string, Type> ::const_iterator _iter868;
-      for (_iter868 = this->success.begin(); _iter868 != this->success.end(); ++_iter868)
+      std::map<std::string, Type> ::const_iterator _iter869;
+      for (_iter869 = this->success.begin(); _iter869 != this->success.end(); ++_iter869)
       {
-        xfer += oprot->writeString(_iter868->first);
-        xfer += _iter868->second.write(oprot);
+        xfer += oprot->writeString(_iter869->first);
+        xfer += _iter869->second.write(oprot);
       }
       xfer += oprot->writeMapEnd();
     }
@@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             (*(this->success)).clear();
-            uint32_t _size869;
-            ::apache::thrift::protocol::TType _ktype870;
-            ::apache::thrift::protocol::TType _vtype871;
-            xfer += iprot->readMapBegin(_ktype870, _vtype871, _size869);
-            uint32_t _i873;
-            for (_i873 = 0; _i873 < _size869; ++_i873)
+            uint32_t _size870;
+            ::apache::thrift::protocol::TType _ktype871;
+            ::apache::thrift::protocol::TType _vtype872;
+            xfer += iprot->readMapBegin(_ktype871, _vtype872, _size870);
+            uint32_t _i874;
+            for (_i874 = 0; _i874 < _size870; ++_i874)
             {
-              std::string _key874;
-              xfer += iprot->readString(_key874);
-              Type& _val875 = (*(this->success))[_key874];
-              xfer += _val875.read(iprot);
+              std::string _key875;
+              xfer += iprot->readString(_key875);
+              Type& _val876 = (*(this->success))[_key875];
+              xfer += _val876.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size876;
-            ::apache::thrift::protocol::TType _etype879;
-            xfer += iprot->readListBegin(_etype879, _size876);
-            this->success.resize(_size876);
-            uint32_t _i880;
-            for (_i880 = 0; _i880 < _size876; ++_i880)
+            uint32_t _size877;
+            ::apache::thrift::protocol::TType _etype880;
+            xfer += iprot->readListBegin(_etype880, _size877);
+            this->success.resize(_size877);
+            uint32_t _i881;
+            for (_i881 = 0; _i881 < _size877; ++_i881)
             {
-              xfer += this->success[_i880].read(iprot);
+              xfer += this->success[_i881].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter881;
-      for (_iter881 = this->success.begin(); _iter881 != this->success.end(); ++_iter881)
+      std::vector<FieldSchema> ::const_iterator _iter882;
+      for (_iter882 = this->success.begin(); _iter882 != this->success.end(); ++_iter882)
       {
-        xfer += (*_iter881).write(oprot);
+        xfer += (*_iter882).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size882;
-            ::apache::thrift::protocol::TType _etype885;
-            xfer += iprot->readListBegin(_etype885, _size882);
-            (*(this->success)).resize(_size882);
-            uint32_t _i886;
-            for (_i886 = 0; _i886 < _size882; ++_i886)
+            uint32_t _size883;
+            ::apache::thrift::protocol::TType _etype886;
+            xfer += iprot->readListBegin(_etype886, _size883);
+            (*(this->success)).resize(_size883);
+            uint32_t _i887;
+            for (_i887 = 0; _i887 < _size883; ++_i887)
             {
-              xfer += (*(this->success))[_i886].read(iprot);
+              xfer += (*(this->success))[_i887].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size887;
-            ::apache::thrift::protocol::TType _etype890;
-            xfer += iprot->readListBegin(_etype890, _size887);
-            this->success.resize(_size887);
-            uint32_t _i891;
-            for (_i891 = 0; _i891 < _size887; ++_i891)
+            uint32_t _size888;
+            ::apache::thrift::protocol::TType _etype891;
+            xfer += iprot->readListBegin(_etype891, _size888);
+            this->success.resize(_size888);
+            uint32_t _i892;
+            for (_i892 = 0; _i892 < _size888; ++_i892)
             {
-              xfer += this->success[_i891].read(iprot);
+              xfer += this->success[_i892].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter892;
-      for (_iter892 = this->success.begin(); _iter892 != this->success.end(); ++_iter892)
+      std::vector<FieldSchema> ::const_iterator _iter893;
+      for (_iter893 = this->success.begin(); _iter893 != this->success.end(); ++_iter893)
       {
-        xfer += (*_iter892).write(oprot);
+        xfer += (*_iter893).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size893;
-            ::apache::thrift::protocol::TType _etype896;
-            xfer += iprot->readListBegin(_etype896, _size893);
-            (*(this->success)).resize(_size893);
-            uint32_t _i897;
-            for (_i897 = 0; _i897 < _size893; ++_i897)
+            uint32_t _size894;
+            ::apache::thrift::protocol::TType _etype897;
+            xfer += iprot->readListBegin(_etype897, _size894);
+            (*(this->success)).resize(_size894);
+            uint32_t _i898;
+            for (_i898 = 0; _i898 < _size894; ++_i898)
             {
-              xfer += (*(this->success))[_i897].read(iprot);
+              xfer += (*(this->success))[_i898].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size898;
-            ::apache::thrift::protocol::TType _etype901;
-            xfer += iprot->readListBegin(_etype901, _size898);
-            this->success.resize(_size898);
-            uint32_t _i902;
-            for (_i902 = 0; _i902 < _size898; ++_i902)
+            uint32_t _size899;
+            ::apache::thrift::protocol::TType _etype902;
+            xfer += iprot->readListBegin(_etype902, _size899);
+            this->success.resize(_size899);
+            uint32_t _i903;
+            for (_i903 = 0; _i903 < _size899; ++_i903)
             {
-              xfer += this->success[_i902].read(iprot);
+              xfer += this->success[_i903].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter903;
-      for (_iter903 = this->success.begin(); _iter903 != this->success.end(); ++_iter903)
+      std::vector<FieldSchema> ::const_iterator _iter904;
+      for (_iter904 = this->success.begin(); _iter904 != this->success.end(); ++_iter904)
       {
-        xfer += (*_iter903).write(oprot);
+        xfer += (*_iter904).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size904;
-            ::apache::thrift::protocol::TType _etype907;
-            xfer += iprot->readListBegin(_etype907, _size904);
-            (*(this->success)).resize(_size904);
-            uint32_t _i908;
-            for (_i908 = 0; _i908 < _size904; ++_i908)
+            uint32_t _size905;
+            ::apache::thrift::protocol::TType _etype908;
+            xfer += iprot->readListBegin(_etype908, _size905);
+            (*(this->success)).resize(_size905);
+            uint32_t _i909;
+            for (_i909 = 0; _i909 < _size905; ++_i909)
             {
-              xfer += (*(this->success))[_i908].read(iprot);
+              xfer += (*(this->success))[_i909].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size909;
-            ::apache::thrift::protocol::TType _etype912;
-            xfer += iprot->readListBegin(_etype912, _size909);
-            this->success.resize(_size909);
-            uint32_t _i913;
-            for (_i913 = 0; _i913 < _size909; ++_i913)
+            uint32_t _size910;
+            ::apache::thrift::protocol::TType _etype913;
+            xfer += iprot->readListBegin(_etype913, _size910);
+            this->success.resize(_size910);
+            uint32_t _i914;
+            for (_i914 = 0; _i914 < _size910; ++_i914)
             {
-              xfer += this->success[_i913].read(iprot);
+              xfer += this->success[_i914].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter914;
-      for (_iter914 = this->success.begin(); _iter914 != this->success.end(); ++_iter914)
+      std::vector<FieldSchema> ::const_iterator _iter915;
+      for (_iter915 = this->success.begin(); _iter915 != this->success.end(); ++_iter915)
       {
-        xfer += (*_iter914).write(oprot);
+        xfer += (*_iter915).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size915;
-            ::apache::thrift::protocol::TType _etype918;
-            xfer += iprot->readListBegin(_etype918, _size915);
-            (*(this->success)).resize(_size915);
-            uint32_t _i919;
-            for (_i919 = 0; _i919 < _size915; ++_i919)
+            uint32_t _size916;
+            ::apache::thrift::protocol::TType _etype919;
+            xfer += iprot->readListBegin(_etype919, _size916);
+            (*(this->success)).resize(_size916);
+            uint32_t _i920;
+            for (_i920 = 0; _i920 < _size916; ++_i920)
             {
-              xfer += (*(this->success))[_i919].read(iprot);
+              xfer += (*(this->success))[_i920].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->primaryKeys.clear();
-            uint32_t _size920;
-            ::apache::thrift::protocol::TType _etype923;
-            xfer += iprot->readListBegin(_etype923, _size920);
-            this->primaryKeys.resize(_size920);
-            uint32_t _i924;
-            for (_i924 = 0; _i924 < _size920; ++_i924)
+            uint32_t _size921;
+            ::apache::thrift::protocol::TType _etype924;
+            xfer += iprot->readListBegin(_etype924, _size921);
+            this->primaryKeys.resize(_size921);
+            uint32_t _i925;
+            for (_i925 = 0; _i925 < _size921; ++_i925)
             {
-              xfer += this->primaryKeys[_i924].read(iprot);
+              xfer += this->primaryKeys[_i925].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->foreignKeys.clear();
-            uint32_t _size925;
-            ::apache::thrift::protocol::TType _etype928;
-            xfer += iprot->readListBegin(_etype928, _size925);
-            this->foreignKeys.resize(_size925);
-            uint32_t _i929;
-            for (_i929 = 0; _i929 < _size925; ++_i929)
+            uint32_t _size926;
+            ::apache::thrift::protocol::TType _etype929;
+            xfer += iprot->readListBegin(_etype929, _size926);
+            this->foreignKeys.resize(_size926);
+            uint32_t _i930;
+            for (_i930 = 0; _i930 < _size926; ++_i930)
             {
-              xfer += this->foreignKeys[_i929].read(iprot);
+              xfer += this->foreignKeys[_i930].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4578,10 +4578,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
-    std::vector<SQLPrimaryKey> ::const_iterator _iter930;
-    for (_iter930 = this->primaryKeys.begin(); _iter930 != this->primaryKeys.end(); ++_iter930)
+    std::vector<SQLPrimaryKey> ::const_iterator _iter931;
+    for (_iter931 = this->primaryKeys.begin(); _iter931 != this->primaryKeys.end(); ++_iter931)
     {
-      xfer += (*_iter930).write(oprot);
+      xfer += (*_iter931).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4590,10 +4590,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
-    std::vector<SQLForeignKey> ::const_iterator _iter931;
-    for (_iter931 = this->foreignKeys.begin(); _iter931 != this->foreignKeys.end(); ++_iter931)
+    std::vector<SQLForeignKey> ::const_iterator _iter932;
+    for (_iter932 = this->foreignKeys.begin(); _iter932 != this->foreignKeys.end(); ++_iter932)
     {
-      xfer += (*_iter931).write(oprot);
+      xfer += (*_iter932).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4621,10 +4621,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->primaryKeys)).size()));
-    std::vector<SQLPrimaryKey> ::const_iterator _iter932;
-    for (_iter932 = (*(this->primaryKeys)).begin(); _iter932 != (*(this->primaryKeys)).end(); ++_iter932)
+    std::vector<SQLPrimaryKey> ::const_iterator _iter933;
+    for (_iter933 = (*(this->primaryKeys)).begin(); _iter933 != (*(this->primaryKeys)).end(); ++_iter933)
     {
-      xfer += (*_iter932).write(oprot);
+      xfer += (*_iter933).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4633,10 +4633,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->foreignKeys)).size()));
-    std::vector<SQLForeignKey> ::const_iterator _iter933;
-    for (_iter933 = (*(this->foreignKeys)).begin(); _iter933 != (*(this->foreignKeys)).end(); ++_iter933)
+    std::vector<SQLForeignKey> ::const_iterator _iter934;
+    for (_iter934 = (*(this->foreignKeys)).begin(); _iter934 != (*(this->foreignKeys)).end(); ++_iter934)
     {
-      xfer += (*_iter933).write(oprot);
+      xfer += (*_iter934).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5931,253 +5931,6 @@ uint32_t ThriftHiveMetastore_drop_table_with_environment_context_presult::read(:
 }
 
 
-ThriftHiveMetastore_truncate_table_args::~ThriftHiveMetastore_truncate_table_args() throw() {
-}
-
-
-uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->dbName);
-          this->__isset.dbName = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->tableName);
-          this->__isset.tableName = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 3:
-        if (ftype == ::apache::thrift::protocol::T_LIST) {
-          {
-            this->partNames.clear();
-            uint32_t _size934;
-            ::apache::thrift::protocol::TType _etype937;
-            xfer += iprot->readListBegin(_etype937, _size934);
-            this->partNames.resize(_size934);
-            uint32_t _i938;
-            for (_i938 = 0; _i938 < _size934; ++_i938)
-            {
-              xfer += iprot->readString(this->partNames[_i938]);
-            }
-            xfer += iprot->readListEnd();
-          }
-          this->__isset.partNames = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  return xfer;
-}
-
-uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
-  uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_truncate_table_args");
-
-  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
-  xfer += oprot->writeString(this->dbName);
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2);
-  xfer += oprot->writeString(this->tableName);
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
-  {
-    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
-    std::vector<std::string> ::const_iterator _iter939;
-    for (_iter939 = this->partNames.begin(); _iter939 != this->partNames.end(); ++_iter939)
-    {
-      xfer += oprot->writeString((*_iter939));
-    }
-    xfer += oprot->writeListEnd();
-  }
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-
-ThriftHiveMetastore_truncate_table_pargs::~ThriftHiveMetastore_truncate_table_pargs() throw() {
-}
-
-
-uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
-  uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_truncate_table_pargs");
-
-  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
-  xfer += oprot->writeString((*(this->dbName)));
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2);
-  xfer += oprot->writeString((*(this->tableName)));
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
-  {
-    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partNames)).size()));
-    std::vector<std::string> ::const_iterator _iter940;
-    for (_iter940 = (*(this->partNames)).begin(); _iter940 != (*(this->partNames)).end(); ++_iter940)
-    {
-      xfer += oprot->writeString((*_iter940));
-    }
-    xfer += oprot->writeListEnd();
-  }
-  xfer += oprot->writeFieldEnd();
-
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-
-ThriftHiveMetastore_truncate_table_result::~ThriftHiveMetastore_truncate_table_result() throw() {
-}
-
-
-uint32_t ThriftHiveMetastore_truncate_table_result::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o1.read(iprot);
-          this->__isset.o1 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  return xfer;
-}
-
-uint32_t ThriftHiveMetastore_truncate_table_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
-
-  uint32_t xfer = 0;
-
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_truncate_table_result");
-
-  if (this->__isset.o1) {
-    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
-    xfer += this->o1.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  }
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
-  return xfer;
-}
-
-
-ThriftHiveMetastore_truncate_table_presult::~ThriftHiveMetastore_truncate_table_presult() throw() {
-}
-
-
-uint32_t ThriftHiveMetastore_truncate_table_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
-
-  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
-  uint32_t xfer = 0;
-  std::string fname;
-  ::apache::thrift::protocol::TType ftype;
-  int16_t fid;
-
-  xfer += iprot->readStructBegin(fname);
-
-  using ::apache::thrift::protocol::TProtocolException;
-
-
-  while (true)
-  {
-    xfer += iprot->readFieldBegin(fname, ftype, fid);
-    if (ftype == ::apache::thrift::protocol::T_STOP) {
-      break;
-    }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o1.read(iprot);
-          this->__isset.o1 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
-    xfer += iprot->readFieldEnd();
-  }
-
-  xfer += iprot->readStructEnd();
-
-  return xfer;
-}
-
-
 ThriftHiveMetastore_get_tables_args::~ThriftHiveMetastore_get_tables_args() throw() {
 }
 
@@ -6302,14 +6055,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size941;
-            ::apache::thrift::protocol::TType _etype944;
-            xfer += iprot->readListBegin(_etype944, _size941);
-            this->success.resize(_size941);
-            uint32_t _i945;
-            for (_i945 = 0; _i945 < _size941; ++_i945)
+            uint32_t _size935;
+            ::apache::thrift::protocol::TType _etype938;
+            xfer += iprot->readListBegin(_etype938, _size935);
+            this->success.resize(_size935);
+            uint32_t _i939;
+            for (_i939 = 0; _i939 < _size935; ++_i939)
             {
-              xfer += iprot->readString(this->success[_i945]);
+              xfer += iprot->readString(this->success[_i939]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6348,10 +6101,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter946;
-      for (_iter946 = this->success.begin(); _iter946 != this->success.end(); ++_iter946)
+      std::vector<std::string> ::const_iterator _iter940;
+      for (_iter940 = this->success.begin(); _iter940 != this->success.end(); ++_iter940)
       {
-        xfer += oprot->writeString((*_iter946));
+        xfer += oprot->writeString((*_iter940));
       }
       xfer += oprot->writeListEnd();
     }
@@ -6396,14 +6149,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size947;
-            ::apache::thrift::protocol::TType _etype950;
-            xfer += iprot->readListBegin(_etype950, _size947);
-            (*(this->success)).resize(_size947);
-            uint32_t _i951;
-            for (_i951 = 0; _i951 < _size947; ++_i951)
+            uint32_t _size941;
+            ::apache::thrift::protocol::TType _etype944;
+            xfer += iprot->readListBegin(_etype944, _size941);
+            (*(this->success)).resize(_size941);
+            uint32_t _i945;
+            for (_i945 = 0; _i945 < _size941; ++_i945)
             {
-              xfer += iprot->readString((*(this->success))[_i951]);
+              xfer += iprot->readString((*(this->success))[_i945]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6573,14 +6326,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size952;
-            ::apache::thrift::protocol::TType _etype955;
-            xfer += iprot->readListBegin(_etype955, _size952);
-            this->success.resize(_size952);
-            uint32_t _i956;
-            for (_i956 = 0; _i956 < _size952; ++_i956)
+            uint32_t _size946;
+            ::apache::thrift::protocol::TType _etype949;
+            xfer += iprot->readListBegin(_etype949, _size946);
+            this->success.resize(_size946);
+            uint32_t _i950;
+            for (_i950 = 0; _i950 < _size946; ++_i950)
             {
-              xfer += iprot->readString(this->success[_i956]);
+              xfer += iprot->readString(this->success[_i950]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6619,10 +6372,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift::
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter957;
-      for (_iter957 = this->success.begin(); _iter957 != this->success.end(); ++_iter957)
+      std::vector<std::string> ::const_iterator _iter951;
+      for (_iter951 = this->success.begin(); _iter951 != this->success.end(); ++_iter951)
       {
-        xfer += oprot->writeString((*_iter957));
+        xfer += oprot->writeString((*_iter951));
       }
       xfer += oprot->writeListEnd();
     }
@@ -6667,14 +6420,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size958;
-            ::apache::thrift::protocol::TType _etype961;
-            xfer += iprot->readListBegin(_etype961, _size958);
-            (*(this->success)).resize(_size958);
-            uint32_t _i962;
-            for (_i962 = 0; _i962 < _size958; ++_i962)
+            uint32_t _size952;
+            ::apache::thrift::protocol::TType _etype955;
+            xfer += iprot->readListBegin(_etype955, _size952);
+            (*(this->success)).resize(_size952);
+            uint32_t _i956;
+            for (_i956 = 0; _i956 < _size952; ++_i956)
             {
-              xfer += iprot->readString((*(this->success))[_i962]);
+              xfer += iprot->readString((*(this->success))[_i956]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6749,14 +6502,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->tbl_types.clear();
-            uint32_t _size963;
-            ::apache::thrift::protocol::TType _etype966;
-            xfer += iprot->readListBegin(_etype966, _size963);
-            this->tbl_types.resize(_size963);
-            uint32_t _i967;
-            for (_i967 = 0; _i967 < _size963; ++_i967)
+            uint32_t _size957;
+            ::apache::thrift::protocol::TType _etype960;
+            xfer += iprot->readListBegin(_etype960, _size957);
+            this->tbl_types.resize(_size957);
+            uint32_t _i961;
+            for (_i961 = 0; _i961 < _size957; ++_i961)
             {
-              xfer += iprot->readString(this->tbl_types[_i967]);
+              xfer += iprot->readString(this->tbl_types[_i961]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6793,10 +6546,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
-    std::vector<std::string> ::const_iterator _iter968;
-    for (_iter968 = this->tbl_types.begin(); _iter968 != this->tbl_types.end(); ++_iter968)
+    std::vector<std::string> ::const_iterator _iter962;
+    for (_iter962 = this->tbl_types.begin(); _iter962 != this->tbl_types.end(); ++_iter962)
     {
-      xfer += oprot->writeString((*_iter968));
+      xfer += oprot->writeString((*_iter962));
     }
     xfer += oprot->writeListEnd();
   }
@@ -6828,10 +6581,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
-    std::vector<std::string> ::const_iterator _iter969;
-    for (_iter969 = (*(this->tbl_types)).begin(); _iter969 != (*(this->tbl_types)).end(); ++_iter969)
+    std::vector<std::string> ::const_iterator _iter963;
+    for (_iter963 = (*(this->tbl_types)).begin(); _iter963 != (*(this->tbl_types)).end(); ++_iter963)
     {
-      xfer += oprot->writeString((*_iter969));
+      xfer += oprot->writeString((*_iter963));
     }
     xfer += oprot->writeListEnd();
   }
@@ -6872,14 +6625,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size970;
-            ::apache::thrift::protocol::TType _etype973;
-            xfer += iprot->readListBegin(_etype973, _size970);
-            this->success.resize(_size970);
-            uint32_t _i974;
-            for (_i974 = 0; _i974 < _size970; ++_i974)
+            uint32_t _size964;
+            ::apache::thrift::protocol::TType _etype967;
+            xfer += iprot->readListBegin(_etype967, _size964);
+            this->success.resize(_size964);
+            uint32_t _i968;
+            for (_i968 = 0; _i968 < _size964; ++_i968)
             {
-              xfer += this->success[_i974].read(iprot);
+              xfer += this->success[_i968].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -6918,10 +6671,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<TableMeta> ::const_iterator _iter975;
-      for (_iter975 = this->success.begin(); _iter975 != this->success.end(); ++_iter975)
+      std::vector<TableMeta> ::const_iterator _iter969;
+      for (_iter969 = this->success.begin(); _iter969 != this->success.end(); ++_iter969)
       {
-        xfer += (*_iter975).write(oprot);
+        xfer += (*_iter969).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -6966,14 +6719,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size976;
-            ::apache::thrift::protocol::TType _etype979;
-            xfer += iprot->readListBegin(_etype979, _size976);
-            (*(this->success)).resize(_size976);
-            uint32_t _i980;
-            for (_i980 = 0; _i980 < _size976; ++_i980)
+            uint32_t _size970;
+            ::apache::thrift::protocol::TType _etype973;
+            xfer += iprot->readListBegin(_etype973, _size970);
+            (*(this->success)).resize(_size970);
+            uint32_t _i974;
+            for (_i974 = 0; _i974 < _size970; ++_i974)
             {
-              xfer += (*(this->success))[_i980].read(iprot);
+              xfer += (*(this->success))[_i974].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -7111,14 +6864,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size981;
-            ::apache::thrift::protocol::TType _etype984;
-            xfer += iprot->readListBegin(_etype984, _size981);
-            this->success.resize(_size981);
-            uint32_t _i985;
-            for (_i985 = 0; _i985 < _size981; ++_i985)
+            uint32_t _size975;
+            ::apache::thrift::protocol::TType _etype978;
+            xfer += iprot->readListBegin(_etype978, _size975);
+            this->success.resize(_size975);
+            uint32_t _i979;
+            for (_i979 = 0; _i979 < _size975; ++_i979)
             {
-              xfer += iprot->readString(this->success[_i985]);
+              xfer += iprot->readString(this->success[_i979]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7157,10 +6910,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter986;
-      for (_iter986 = this->success.begin(); _iter986 != this->success.end(); ++_iter986)
+      std::vector<std::string> ::const_iterator _iter980;
+      for (_iter980 = this->success.begin(); _iter980 != this->success.end(); ++_iter980)
       {
-        xfer += oprot->writeString((*_iter986));
+        xfer += oprot->writeString((*_iter980));
       }
       xfer += oprot->writeListEnd();
     }
@@ -7205,14 +6958,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size987;
-            ::apache::thrift::protocol::TType _etype990;
-            xfer += iprot->readListBegin(_etype990, _size987);
-            (*(this->success)).resize(_size987);
-            uint32_t _i991;
-            for (_i991 = 0; _i991 < _size987; ++_i991)
+            uint32_t _size981;
+            ::apache::thrift::protocol::TType _etype984;
+            xfer += iprot->readListBegin(_etype984, _size981);
+            (*(this->success)).resize(_size981);
+            uint32_t _i985;
+            for (_i985 = 0; _i985 < _size981; ++_i985)
             {
-              xfer += iprot->readString((*(this->success))[_i991]);
+              xfer += iprot->readString((*(this->success))[_i985]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7522,14 +7275,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->tbl_names.clear();
-            uint32_t _size992;
-            ::apache::thrift::protocol::TType _etype995;
-            xfer += iprot->readListBegin(_etype995, _size992);
-            this->tbl_names.resize(_size992);
-            uint32_t _i996;
-            for (_i996 = 0; _i996 < _size992; ++_i996)
+            uint32_t _size986;
+            ::apache::thrift::protocol::TType _etype989;
+            xfer += iprot->readListBegin(_etype989, _size986);
+            this->tbl_names.resize(_size986);
+            uint32_t _i990;
+            for (_i990 = 0; _i990 < _size986; ++_i990)
             {
-              xfer += iprot->readString(this->tbl_names[_i996]);
+              xfer += iprot->readString(this->tbl_names[_i990]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7562,10 +7315,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr
   xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
-    std::vector<std::string> ::const_iterator _iter997;
-    for (_iter997 = this->tbl_names.begin(); _iter997 != this->tbl_names.end(); ++_iter997)
+    std::vector<std::string> ::const_iterator _iter991;
+    for (_iter991 = this->tbl_names.begin(); _iter991 != this->tbl_names.end(); ++_iter991)
     {
-      xfer += oprot->writeString((*_iter997));
+      xfer += oprot->writeString((*_iter991));
     }
     xfer += oprot->writeListEnd();
   }
@@ -7593,10 +7346,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th
   xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
-    std::vector<std::string> ::const_iterator _iter998;
-    for (_iter998 = (*(this->tbl_names)).begin(); _iter998 != (*(this->tbl_names)).end(); ++_iter998)
+    std::vector<std::string> ::const_iterator _iter992;
+    for (_iter992 = (*(this->tbl_names)).begin(); _iter992 != (*(this->tbl_names)).end(); ++_iter992)
     {
-      xfer += oprot->writeString((*_iter998));
+      xfer += oprot->writeString((*_iter992));
     }
     xfer += oprot->writeListEnd();
   }
@@ -7637,14 +7390,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size999;
-            ::apache::thrift::protocol::TType _etype1002;
-            xfer += iprot->readListBegin(_etype1002, _size999);
-            this->success.resize(_size999);
-            uint32_t _i1003;
-            for (_i1003 = 0; _i1003 < _size999; ++_i1003)
+            uint32_t _size993;
+            ::apache::thrift::protocol::TType _etype996;
+            xfer += iprot->readListBegin(_etype996, _size993);
+            this->success.resize(_size993);
+            uint32_t _i997;
+            for (_i997 = 0; _i997 < _size993; ++_i997)
             {
-              xfer += this->success[_i1003].read(iprot);
+              xfer += this->success[_i997].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -7675,10 +7428,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Table> ::const_iterator _iter1004;
-      for (_iter1004 = this->success.begin(); _iter1004 != this->success.end(); ++_iter1004)
+      std::vector<Table> ::const_iterator _iter998;
+      for (_iter998 = this->success.begin(); _iter998 != this->success.end(); ++_iter998)
       {
-        xfer += (*_iter1004).write(oprot);
+        xfer += (*_iter998).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -7719,14 +7472,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1005;
-            ::apache::thrift::protocol::TType _etype1008;
-            xfer += iprot->readListBegin(_etype1008, _size1005);
-            (*(this->success)).resize(_size1005);
-            uint32_t _i1009;
-            for (_i1009 = 0; _i1009 < _size1005; ++_i1009)
+            uint32_t _size999;
+            ::apache::thrift::protocol::TType _etype1002;
+            xfer += iprot->readListBegin(_etype1002, _size999);
+            (*(this->success)).resize(_size999);
+            uint32_t _i1003;
+            for (_i1003 = 0; _i1003 < _size999; ++_i1003)
             {
-              xfer += (*(this->success))[_i1009].read(iprot);
+              xfer += (*(this->success))[_i1003].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -8362,14 +8115,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1010;
-            ::apache::thrift::protocol::TType _etype1013;
-            xfer += iprot->readListBegin(_etype1013, _size1010);
-            this->success.resize(_size1010);
-            uint32_t _i1014;
-            for (_i1014 = 0; _i1014 < _size1010; ++_i1014)
+            uint32_t _size1004;
+            ::apache::thrift::protocol::TType _etype1007;
+            xfer += iprot->readListBegin(_etype1007, _size1004);
+            this->success.resize(_size1004);
+            uint32_t _i1008;
+            for (_i1008 = 0; _i1008 < _size1004; ++_i1008)
             {
-              xfer += iprot->readString(this->success[_i1014]);
+              xfer += iprot->readString(this->success[_i1008]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8424,10 +8177,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1015;
-      for (_iter1015 = this->success.begin(); _iter1015 != this->success.end(); ++_iter1015)
+      std::vector<std::string> ::const_iterator _iter1009;
+      for (_iter1009 = this->success.begin(); _iter1009 != this->success.end(); ++_iter1009)
       {
-        xfer += oprot->writeString((*_iter1015));
+        xfer += oprot->writeString((*_iter1009));
       }
       xfer += oprot->writeListEnd();
     }
@@ -8480,14 +8233,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1016;
-            ::apache::thrift::protocol::TType _etype1019;
-            xfer += iprot->readListBegin(_etype1019, _size1016);
-            (*(this->success)).resize(_size1016);
-            uint32_t _i1020;
-            for (_i1020 = 0; _i1020 < _size1016; ++_i1020)
+            uint32_t _size1010;
+            ::apache::thrift::protocol::TType _etype1013;
+            xfer += iprot->readListBegin(_etype1013, _size1010);
+            (*(this->success)).resize(_size1010);
+            uint32_t _i1014;
+            for (_i1014 = 0; _i1014 < _size1010; ++_i1014)
             {
-              xfer += iprot->readString((*(this->success))[_i1020]);
+              xfer += iprot->readString((*(this->success))[_i1014]);
             }
             xfer += iprot->readListEnd();
           }
@@ -9821,14 +9574,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size1021;
-            ::apache::thrift::protocol::TType _etype1024;
-            xfer += iprot->readListBegin(_etype1024, _size1021);
-            this->new_parts.resize(_size1021);
-            uint32_t _i1025;
-            for (_i1025 = 0; _i1025 < _size1021; ++_i1025)
+            uint32_t _size1015;
+            ::apache::thrift::protocol::TType _etype1018;
+            xfer += iprot->readListBegin(_etype1018, _size1015);
+            this->new_parts.resize(_size1015);
+            uint32_t _i1019;
+            for (_i1019 = 0; _i1019 < _size1015; ++_i1019)
             {
-              xfer += this->new_parts[_i1025].read(iprot);
+              xfer += this->new_parts[_i1019].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -9857,10 +9610,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<Partition> ::const_iterator _iter1026;
-    for (_iter1026 = this->new_parts.begin(); _iter1026 != this->new_parts.end(); ++_iter1026)
+    std::vector<Partition> ::const_iterator _iter1020;
+    for (_iter1020 = this->new_parts.begin(); _iter1020 != this->new_parts.end(); ++_iter1020)
     {
-      xfer += (*_iter1026).write(oprot);
+      xfer += (*_iter1020).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -9884,10 +9637,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<Partition> ::const_iterator _iter1027;
-    for (_iter1027 = (*(this->new_parts)).begin(); _iter1027 != (*(this->new_parts)).end(); ++_iter1027)
+    std::vector<Partition> ::const_iterator _iter1021;
+    for (_iter1021 = (*(this->new_parts)).begin(); _iter1021 != (*(this->new_parts)).end(); ++_iter1021)
     {
-      xfer += (*_iter1027).write(oprot);
+      xfer += (*_iter1021).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -10096,14 +9849,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size1028;
-            ::apache::thrift::protocol::TType _etype1031;
-            xfer += iprot->readListBegin(_etype1031, _size1028);
-            this->new_parts.resize(_size1028);
-            uint32_t _i1032;
-            for (_i1032 = 0; _i1032 < _size1028; ++_i1032)
+            uint32_t _size1022;
+            ::apache::thrift::protocol::TType _etype1025;
+            xfer += iprot->readListBegin(_etype1025, _size1022);
+            this->new_parts.resize(_size1022);
+            uint32_t _i1026;
+            for (_i1026 = 0; _i1026 < _size1022; ++_i1026)
             {
-              xfer += this->new_parts[_i1032].read(iprot);
+              xfer += this->new_parts[_i1026].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -10132,10 +9885,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<PartitionSpec> ::const_iterator _iter1033;
-    for (_iter1033 = this->new_parts.begin(); _iter1033 != this->new_parts.end(); ++_iter1033)
+    std::vector<PartitionSpec> ::const_iterator _iter1027;
+    for (_iter1027 = this->new_parts.begin(); _iter1027 != this->new_parts.end(); ++_iter1027)
     {
-      xfer += (*_iter1033).write(oprot);
+      xfer += (*_iter1027).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -10159,10 +9912,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift:
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<PartitionSpec> ::const_iterator _iter1034;
-    for (_iter1034 = (*(this->new_parts)).begin(); _iter1034 != (*(this->new_parts)).end(); ++_iter1034)
+    std::vector<PartitionSpec> ::const_iterator _iter1028;
+    for (_iter1028 = (*(this->new_parts)).begin(); _iter1028 != (*(this->new_parts)).end(); ++_iter1028)
     {
-      xfer += (*_iter1034).write(oprot);
+      xfer += (*_iter1028).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -10387,14 +10140,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1035;
-            ::apache::thrift::protocol::TType _etype1038;
-            xfer += iprot->readListBegin(_etype1038, _size1035);
-            this->part_vals.resize(_size1035);
-            uint32_t _i1039;
-            for (_i1039 = 0; _i1039 < _size1035; ++_i1039)
+            uint32_t _size1029;
+            ::apache::thrift::protocol::TType _etype1032;
+            xfer += iprot->readListBegin(_etype1032, _size1029);
+            this->part_vals.resize(_size1029);
+            uint32_t _i1033;
+            for (_i1033 = 0; _i1033 < _size1029; ++_i1033)
             {
-              xfer += iprot->readString(this->part_vals[_i1039]);
+              xfer += iprot->readString(this->part_vals[_i1033]);
             }
             xfer += iprot->readListEnd();
           }
@@ -10431,10 +10184,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1040;
-    for (_iter1040 = this->part_vals.begin(); _iter1040 != this->part_vals.end(); ++_iter1040)
+    std::vector<std::string> ::const_iterator _iter1034;
+    for (_iter1034 = this->part_vals.begin(); _iter1034 != this->part_vals.end(); ++_iter1034)
     {
-      xfer += oprot->writeString((*_iter1040));
+      xfer += oprot->writeString((*_iter1034));
     }
     xfer += oprot->writeListEnd();
   }
@@ -10466,10 +10219,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1041;
-    for (_iter1041 = (*(this->part_vals)).begin(); _iter1041 != (*(this->part_vals)).end(); ++_iter1041)
+    std::vector<std::string> ::const_iterator _iter1035;
+    for (_iter1035 = (*(this->part_vals)).begin(); _iter1035 != (*(this->part_vals)).end(); ++_iter1035)
     {
-      xfer += oprot->writeString((*_iter1041));
+      xfer += oprot->writeString((*_iter1035));
     }
     xfer += oprot->writeListEnd();
   }
@@ -10941,14 +10694,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1042;
-            ::apache::thrift::protocol::TType _etype1045;
-            xfer += iprot->readListBegin(_etype1045, _size1042);
-            this->part_vals.resize(_size1042);
-            uint32_t _i1046;
-            for (_i1046 = 0; _i1046 < _size1042; ++_i1046)
+            uint32_t _size1036;
+            ::apache::thrift::protocol::TType _etype1039;
+            xfer += iprot->readListBegin(_etype1039, _size1036);
+            this->part_vals.resize(_size1036);
+            uint32_t _i1040;
+            for (_i1040 = 0; _i1040 < _size1036; ++_i1040)
             {
-              xfer += iprot->readString(this->part_vals[_i1046]);
+              xfer += iprot->readString(this->part_vals[_i1040]);
             }
             xfer += iprot->readListEnd();
           }
@@ -10993,10 +10746,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1047;
-    for (_iter1047 = this->part_vals.begin(); _iter1047 != this->part_vals.end(); ++_iter1047)
+    std::vector<std::string> ::const_iterator _iter1041;
+    for (_iter1041 = this->part_vals.begin(); _iter1041 != this->part_vals.end(); ++_iter1041)
     {
-      xfer += oprot->writeString((*_iter1047));
+      xfer += oprot->writeString((*_iter1041));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11032,10 +10785,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1048;
-    for (_iter1048 = (*(this->part_vals)).begin(); _iter1048 != (*(this->part_vals)).end(); ++_iter1048)
+    std::vector<std::string> ::const_iterator _iter1042;
+    for (_iter1042 = (*(this->part_vals)).begin(); _iter1042 != (*(this->part_vals)).end(); ++_iter1042)
     {
-      xfer += oprot->writeString((*_iter1048));
+      xfer += oprot->writeString((*_iter1042));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11838,14 +11591,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1049;
-            ::apache::thrift::protocol::TType _etype1052;
-            xfer += iprot->readListBegin(_etype1052, _size1049);
-            this->part_vals.resize(_size1049);
-            uint32_t _i1053;
-            for (_i1053 = 0; _i1053 < _size1049; ++_i1053)
+            uint32_t _size1043;
+            ::apache::thrift::protocol::TType _etype1046;
+            xfer += iprot->readListBegin(_etype1046, _size1043);
+            this->part_vals.resize(_size1043);
+            uint32_t _i1047;
+            for (_i1047 = 0; _i1047 < _size1043; ++_i1047)
             {
-              xfer += iprot->readString(this->part_vals[_i1053]);
+              xfer += iprot->readString(this->part_vals[_i1047]);
             }
             xfer += iprot->readListEnd();
           }
@@ -11890,10 +11643,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1054;
-    for (_iter1054 = this->part_vals.begin(); _iter1054 != this->part_vals.end(); ++_iter1054)
+    std::vector<std::string> ::const_iterator _iter1048;
+    for (_iter1048 = this->part_vals.begin(); _iter1048 != this->part_vals.end(); ++_iter1048)
     {
-      xfer += oprot->writeString((*_iter1054));
+      xfer += oprot->writeString((*_iter1048));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11929,10 +11682,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1055;
-    for (_iter1055 = (*(this->part_vals)).begin(); _iter1055 != (*(this->part_vals)).end(); ++_iter1055)
+    std::vector<std::string> ::const_iterator _iter1049;
+    for (_iter1049 = (*(this->part_vals)).begin(); _iter1049 != (*(this->part_vals)).end(); ++_iter1049)
     {
-      xfer += oprot->writeString((*_iter1055));
+      xfer += oprot->writeString((*_iter1049));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12141,14 +11894,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read(
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1056;
-            ::apache::thrift::protocol::TType _etype1059;
-            xfer += iprot->readListBegin(_etype1059, _size1056);
-            this->part_vals.resize(_size1056);
-            uint32_t _i1060;
-            for (_i1060 = 0; _i1060 < _size1056; ++_i1060)
+            uint32_t _size1050;
+            ::apache::thrift::protocol::TType _etype1053;
+            xfer += iprot->readListBegin(_etype1053, _size1050);
+            this->part_vals.resize(_size1050);
+            uint32_t _i1054;
+            for (_i1054 = 0; _i1054 < _size1050; ++_i1054)
             {
-              xfer += iprot->readString(this->part_vals[_i1060]);
+              xfer += iprot->readString(this->part_vals[_i1054]);
             }
             xfer += iprot->readListEnd();
           }
@@ -12201,10 +11954,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1061;
-    for (_iter1061 = this->part_vals.begin(); _iter1061 != this->part_vals.end(); ++_iter1061)
+    std::vector<std::string> ::const_iterator _iter1055;
+    for (_iter1055 = this->part_vals.begin(); _iter1055 != this->part_vals.end(); ++_iter1055)
     {
-      xfer += oprot->writeString((*_iter1061));
+      xfer += oprot->writeString((*_iter1055));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12244,10 +11997,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1062;
-    for (_iter1062 = (*(this->part_vals)).begin(); _iter1062 != (*(this->part_vals)).end(); ++_iter1062)
+    std::vector<std::string> ::const_iterator _iter1056;
+    for (_iter1056 = (*(this->part_vals)).begin(); _iter1056 != (*(this->part_vals)).end(); ++_iter1056)
     {
-      xfer += oprot->writeString((*_iter1062));
+      xfer += oprot->writeString((*_iter1056));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13253,14 +13006,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1063;
-            ::apache::thrift::protocol::TType _etype1066;
-            xfer += iprot->readListBegin(_etype1066, _size1063);
-            this->part_vals.resize(_size1063);
-            uint32_t _i1067;
-            for (_i1067 = 0; _i1067 < _size1063; ++_i1067)
+            uint32_t _size1057;
+            ::apache::thrift::protocol::TType _etype1060;
+            xfer += iprot->readListBegin(_etype1060, _size1057);
+            this->part_vals.resize(_size1057);
+            uint32_t _i1061;
+            for (_i1061 = 0; _i1061 < _size1057; ++_i1061)
             {
-              xfer += iprot->readString(this->part_vals[_i1067]);
+              xfer += iprot->readString(this->part_vals[_i1061]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13297,10 +13050,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1068;
-    for (_iter1068 = this->part_vals.begin(); _iter1068 != this->part_vals.end(); ++_iter1068)
+    std::vector<std::string> ::const_iterator _iter1062;
+    for (_iter1062 = this->part_vals.begin(); _iter1062 != this->part_vals.end(); ++_iter1062)
     {
-      xfer += oprot->writeString((*_iter1068));
+      xfer += oprot->writeString((*_iter1062));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13332,10 +13085,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1069;
-    for (_iter1069 = (*(this->part_vals)).begin(); _iter1069 != (*(this->part_vals)).end(); ++_iter1069)
+    std::vector<std::string> ::const_iterator _iter1063;
+    for (_iter1063 = (*(this->part_vals)).begin(); _iter1063 != (*(this->part_vals)).end(); ++_iter1063)
     {
-      xfer += oprot->writeString((*_iter1069));
+      xfer += oprot->writeString((*_iter1063));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13524,17 +13277,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->partitionSpecs.clear();
-            uint32_t _size1070;
-            ::apache::thrift::protocol::TType _ktype1071;
-            ::apache::thrift::protocol::TType _vtype1072;
-            xfer += iprot->readMapBegin(_ktype1071, _vtype1072, _size1070);
-            uint32_t _i1074;
-            for (_i1074 = 0; _i1074 < _size1070; ++_i1074)
+            uint32_t _size1064;
+            ::apache::thrift::protocol::TType _ktype1065;
+            ::apache::thrift::protocol::TType _vtype1066;
+            xfer += iprot->readMapBegin(_ktype1065, _vtype1066, _size1064);
+            uint32_t _i1068;
+            for (_i1068 = 0; _i1068 < _size1064; ++_i1068)
             {
-              std::string _key1075;
-              xfer += iprot->readString(_key1075);
-              std::string& _val1076 = this->partitionSpecs[_key1075];
-              xfer += iprot->readString(_val1076);
+              std::string _key1069;
+              xfer += iprot->readString(_key1069);
+              std::string& _val1070 = this->partitionSpecs[_key1069];
+              xfer += iprot->readString(_val1070);
             }
             xfer += iprot->readMapEnd();
           }
@@ -13595,11 +13348,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
-    std::map<std::string, std::string> ::const_iterator _iter1077;
-    for (_iter1077 = this->partitionSpecs.begin(); _iter1077 != this->partitionSpecs.end(); ++_iter1077)
+    std::map<std::string, std::string> ::const_iterator _iter1071;
+    for (_iter1071 = this->partitionSpecs.begin(); _iter1071 != this->partitionSpecs.end(); ++_iter1071)
     {
-      xfer += oprot->writeString(_iter1077->first);
-      xfer += oprot->writeString(_iter1077->second);
+      xfer += oprot->writeString(_iter1071->first);
+      xfer += oprot->writeString(_iter1071->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -13639,11 +13392,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
-    std::map<std::string, std::string> ::const_iterator _iter1078;
-    for (_iter1078 = (*(this->partitionSpecs)).begin(); _iter1078 != (*(this->partitionSpecs)).end(); ++_iter1078)
+    std::map<std::string, std::string> ::const_iterator _iter1072;
+    for (_iter1072 = (*(this->partitionSpecs)).begin(); _iter1072 != (*(this->partitionSpecs)).end(); ++_iter1072)
     {
-      xfer += oprot->writeString(_iter1078->first);
-      xfer += oprot->writeString(_iter1078->second);
+      xfer += oprot->writeString(_iter1072->first);
+      xfer += oprot->writeString(_iter1072->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -13888,17 +13641,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->partitionSpecs.clear();
-            uint32_t _size1079;
-            ::apache::thrift::protocol::TType _ktype1080;
-            ::apache::thrift::protocol::TType _vtype1081;
-            xfer += iprot->readMapBegin(_ktype1080, _vtype1081, _size1079);
-            uint32_t _i1083;
-            for (_i1083 = 0; _i1083 < _size1079; ++_i1083)
+            uint32_t _size1073;
+            ::apache::thrift::protocol::TType _ktype1074;
+            ::apache::thrift::protocol::TType _vtype1075;
+            xfer += iprot->readMapBegin(_ktype1074, _vtype1075, _size1073);
+            uint32_t _i1077;
+            for (_i1077 = 0; _i1077 < _size1073; ++_i1077)
             {
-              std::string _key1084;
-              xfer += iprot->readString(_key1084);
-              std::string& _val1085 = this->partitionSpecs[_key1084];
-              xfer += iprot->readString(_val1085);
+              std::string _key1078;
+              xfer += iprot->readString(_key1078);
+              std::string& _val1079 = this->partitionSpecs[_key1078];
+              xfer += iprot->readString(_val1079);
             }
             xfer += iprot->readMapEnd();
           }
@@ -13959,11 +13712,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
-    std::map<std::string, std::string> ::const_iterator _iter1086;
-    for (_iter1086 = this->partitionSpecs.begin(); _iter1086 != this->partitionSpecs.end(); ++_iter1086)
+    std::map<std::string, std::string> ::const_iterator _iter1080;
+    for (_iter1080 = this->partitionSpecs.begin(); _iter1080 != this->partitionSpecs.end(); ++_iter1080)
     {
-      xfer += oprot->writeString(_iter1086->first);
-      xfer += oprot->writeString(_iter1086->second);
+      xfer += oprot->writeString(_iter1080->first);
+      xfer += oprot->writeString(_iter1080->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -14003,11 +13756,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift::
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
-    std::map<std::string, std::string> ::const_iterator _iter1087;
-    for (_iter1087 = (*(this->partitionSpecs)).begin(); _iter1087 != (*(this->partitionSpecs)).end(); ++_iter1087)
+    std::map<std::string, std::string> ::const_iterator _iter1081;
+    for (_iter1081 = (*(this->partitionSpecs)).begin(); _iter1081 != (*(this->partitionSpecs)).end(); ++_iter1081)
     {
-      xfer += oprot->writeString(_iter1087->first);
-      xfer += oprot->writeString(_iter1087->second);
+      xfer += oprot->writeString(_iter1081->first);
+      xfer += oprot->writeString(_iter1081->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -14064,14 +13817,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1088;
-            ::apache::thrift::protocol::TType _etype1091;
-            xfer += iprot->readListBegin(_etype1091, _size1088);
-            this->success.resize(_size1088);
-            uint32_t _i1092;
-            for (_i1092 = 0; _i1092 < _size1088; ++_i1092)
+            uint32_t _size1082;
+            ::apache::thrift::protocol::TType _etype1085;
+            xfer += iprot->readListBegin(_etype1085, _size1082);
+            this->success.resize(_size1082);
+            uint32_t _i1086;
+            for (_i1086 = 0; _i1086 < _size1082; ++_i1086)
             {
-              xfer += this->success[_i1092].read(iprot);
+              xfer += this->success[_i1086].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -14134,10 +13887,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter1093;
-      for (_iter1093 = this->success.begin(); _iter1093 != this->success.end(); ++_iter1093)
+      std::vector<Partition> ::const_iterator _iter1087;
+      for (_iter1087 = this->success.begin(); _iter1087 != this->success.end(); ++_iter1087)
       {
-        xfer += (*_iter1093).write(oprot);
+        xfer += (*_iter1087).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -14194,14 +13947,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1094;
-            ::apache::thrift::protocol::TType _etype1097;
-            xfer += iprot->readListBegin(_etype1097, _size1094);
-            (*(this->success)).resize(_size1094);
-            uint32_t _i1098;
-            for (_i1098 = 0; _i1098 < _size1094; ++_i1098)
+            uint32_t _size1088;
+            ::apache::thrift::protocol::TType _etype1091;
+            xfer += iprot->readListBegin(_etype1091, _size1088);
+            (*(this->success)).resize(_size1088);
+            uint32_t _i1092;
+            for (_i1092 = 0; _i1092 < _size1088; ++_i1092)
             {
-              xfer += (*(this->success))[_i1098].read(iprot);
+              xfer += (*(this->success))[_i1092].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -14300,14 +14053,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1099;
-            ::apache::thrift::protocol::TType _etype1102;
-            xfer += iprot->readListBegin(_etype1102, _size1099);
-            this->part_vals.resize(_size1099);
-            uint32_t _i1103;
-            for (_i1103 = 0; _i1103 < _size1099; ++_i1103)
+            uint32_t _size1093;
+            ::apache::thrift::protocol::TType _etype1096;
+            xfer += iprot->readListBegin(_etype1096, _size1093);
+            this->part_vals.resize(_size1093);
+            uint32_t _i1097;
+            for (_i1097 = 0; _i1097 < _size1093; ++_i1097)
             {
-              xfer += iprot->readString(this->part_vals[_i1103]);
+              xfer += iprot->readString(this->part_vals[_i1097]);
             }
             xfer += iprot->readListEnd();
           }
@@ -14328,14 +14081,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->group_names.clear();
-            uint32_t _size1104;
-            ::apache::thrift::protocol::TType _etype1107;
-            xfer += iprot->readListBegin(_etype1107, _size1104);
-            this->group_names.resize(_size1104);
-            uint32_t _i1108;
-            for (_i1108 = 0; _i1108 < _size1104; ++_i1108)
+            uint32_t _size1098;
+            ::apache::thrift::protocol::TType _etype1101;
+            xfer += iprot->readListBegin(_etype1101, _size1098);
+            this->group_names.resize(_size1098);
+            uint32_t _i1102;
+            for (_i1102 = 0; _i1102 < _size1098; ++_i1102)
             {
-              xfer += iprot->readString(this->group_names[_i1108]);
+              xfer += iprot->readString(this->group_names[_i1102]);
             }
             xfer += iprot->readListEnd();
           }
@@ -14372,10 +14125,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1109;
-    for (_iter1109 = this->part_vals.begin(); _iter1109 != this->part_vals.end(); ++_iter1109)
+    std::vector<std::string> ::const_iterator _iter1103;
+    for (_iter1103 = this->part_vals.begin(); _iter1103 != this->part_vals.end(); ++_iter1103)
     {
-      xfer += oprot->writeString((*_iter1109));
+      xfer += oprot->writeString((*_iter1103));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14388,10 +14141,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
-    std::vector<std::string> ::const_iterator _iter1110;
-    for (_iter1110 = this->group_names.begin(); _iter1110 != this->group_names.end(); ++_iter1110)
+    std::vector<std::string> ::const_iterator _iter1104;
+    for (_iter1104 = this->group_names.begin(); _iter1104 != this->group_names.end(); ++_iter1104)
     {
-      xfer += oprot->writeString((*_iter1110));
+      xfer += oprot->writeString((*_iter1104));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14423,10 +14176,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1111;
-    for (_iter1111 = (*(this->part_vals)).begin(); _iter1111 != (*(this->part_vals)).end(); ++_iter1111)
+    std::vector<std::string> ::const_iterator _iter1105;
+    for (_iter1105 = (*(this->part_vals)).begin(); _iter1105 != (*(this->part_vals)).end(); ++_iter1105)
     {
-      xfer += oprot->writeString((*_iter1111));
+      xfer += oprot->writeString((*_iter1105));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14439,10 +14192,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
-    std::vector<std::string> ::const_iterator _iter1112;
-    for (_iter1112 = (*(this->group_names)).begin(); _iter1112 != (*(this->group_names)).end(); ++_iter1112)
+    std::vector<std::string> ::const_iterator _iter1106;
+    for (_iter1106 = (*(this->group_names)).begin(); _iter1106 != (*(this->group_names)).end(); ++_iter1106)
     {
-      xfer += oprot->writeString((*_iter1112));
+      xfer += oprot->writeString((*_iter1106));
     }
     xfer += oprot->writeListEnd();
   }
@@ -15001,14 +14754,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1113;
-            ::apache::thrift::protocol::TType _etype1116;
-            xfer += iprot->readListBegin(_etype1116, _size1113);
-            this->success.resize(_size1113);
-            uint32_t _i1117;
-            for (_i1117 = 0; _i1117 < _size1113; ++_i1117)
+            uint32_t _size1107;
+            ::apache::thrift::protocol::TType _etype1110;
+            xfer += iprot->readListBegin(_etype1110, _size1107);
+            this->success.resize(_size1107);
+            uint32_t _i1111;
+            for (_i1111 = 0; _i1111 < _size1107; ++_i1111)
             {
-              xfer += this->success[_i1117].read(iprot);
+              xfer += this->success[_i1111].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -15055,10 +14808,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT

<TRUNCATED>

[45/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index d32f1e5..30eefa6 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -272,7 +272,6 @@ public class HiveConf extends Configuration {
       HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE,
       HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH,
       HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX,
-      HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER,
       HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION,
       HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED,
       HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE,
@@ -551,13 +550,6 @@ public class HiveConf extends Configuration {
         "If not set, defaults to the codec extension for text files (e.g. \".gz\"), or no extension otherwise."),
 
     HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true),
-    HIVE_IN_TEST_SHORT_LOGS("hive.in.test.short.logs", false,
-        "internal usage only, used only in test mode. If set true, when requesting the " +
-        "operation logs the short version (generated by LogDivertAppenderForTest) will be " +
-        "returned"),
-    HIVE_IN_TEST_REMOVE_LOGS("hive.in.test.remove.logs", true,
-        "internal usage only, used only in test mode. If set false, the operation logs, and the " +
-        "operation log directory will not be removed, so they can be found after the test runs."),
 
     HIVE_IN_TEZ_TEST("hive.in.tez.test", false, "internal use only, true when in testing tez",
         true),
@@ -744,16 +736,9 @@ public class HiveConf extends Configuration {
         "Defaults to all permissions for the hiveserver2/metastore process user."),
     METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order",
         "List of comma separated metastore object types that should be pinned in the cache"),
-    METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "HikariCP", new StringSet("BONECP", "DBCP",
+    METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "BONECP", new StringSet("BONECP", "DBCP",
       "HikariCP", "NONE"),
         "Specify connection pool library for datanucleus"),
-    METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS("datanucleus.connectionPool.maxPoolSize", 10,
-      "Specify the maximum number of connections in the connection pool. Note: The configured size will be used by\n" +
-        "2 connection pools (TxnHandler and ObjectStore). When configuring the max connection pool size, it is\n" +
-        "recommended to take into account the number of metastore instances and the number of HiveServer2 instances\n" +
-        "configured with embedded metastore. To get optimal performance, set config to meet the following condition\n"+
-        "(2 * pool_size * metastore_instances + 2 * pool_size * HS2_instances_with_embedded_metastore) = \n" +
-        "(2 * physical_core_count + hard_disk_count)."),
     // Workaround for DN bug on Postgres:
     // http://www.datanucleus.org/servlet/forum/viewthread_thread,7985_offset
     METASTORE_DATANUCLEUS_INIT_COL_INFO("datanucleus.rdbms.initializeColumnInfo", "NONE",
@@ -902,12 +887,6 @@ public class HiveConf extends Configuration {
     METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
         "Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" +
         "This class is used to store and retrieval of raw metadata objects such as table, database"),
-    METASTORE_CACHED_RAW_STORE_IMPL("hive.metastore.cached.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
-        "Name of the wrapped RawStore class"),
-    METASTORE_CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY(
-        "hive.metastore.cached.rawstore.cache.update.frequency", "60", new TimeValidator(
-            TimeUnit.SECONDS),
-        "The time after which metastore cache is updated from metastore DB."),
     METASTORE_TXN_STORE_IMPL("hive.metastore.txn.store.impl",
         "org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler",
         "Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnStore.  This " +
@@ -1709,10 +1688,6 @@ public class HiveConf extends Configuration {
     HIVE_STATS_NDV_ERROR("hive.stats.ndv.error", (float)20.0,
         "Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost. \n" +
         "A lower value for error indicates higher accuracy and a higher compute cost."),
-    HIVE_METASTORE_STATS_NDV_TUNER("hive.metastore.stats.ndv.tuner", (float)0.0,
-         "Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \n" +
-         "The lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\n" +
-         "Its value should be between 0.0 (i.e., choose lower bound) and 1.0 (i.e., choose higher bound)"),
     HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION("hive.metastore.stats.ndv.densityfunction", false,
         "Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions"),
     HIVE_STATS_KEY_PREFIX("hive.stats.key.prefix", "", "", true), // internal usage only
@@ -1761,10 +1736,6 @@ public class HiveConf extends Configuration {
         "uses column statistics to estimate the number of rows flowing out of it and hence the data size.\n" +
         "In the absence of column statistics, this factor determines the amount of rows that flows out\n" +
         "of JOIN operator."),
-    HIVE_STATS_CORRELATED_MULTI_KEY_JOINS("hive.stats.correlated.multi.key.joins", false,
-        "When estimating output rows for a join involving multiple columns, the default behavior assumes" +
-        "the columns are independent. Setting this flag to true will cause the estimator to assume" +
-        "the columns are correlated."),
     // in the absence of uncompressed/raw data size, total file size will be used for statistics
     // annotation. But the file may be compressed, encoded and serialized which may be lesser in size
     // than the actual uncompressed/raw data size. This factor will be multiplied to file size to estimate
@@ -1796,9 +1767,6 @@ public class HiveConf extends Configuration {
     HIVE_LOCK_MAPRED_ONLY("hive.lock.mapred.only.operation", false,
         "This param is to control whether or not only do lock on queries\n" +
         "that need to execute at least one mapred job."),
-    HIVE_LOCK_QUERY_STRING_MAX_LENGTH("hive.lock.query.string.max.length", 1000000,
-        "The maximum length of the query string to store in the lock.\n" +
-        "The default value is 1000000, since the data limit of a znode is 1MB"),
 
      // Zookeeper related configs
     HIVE_ZOOKEEPER_QUORUM("hive.zookeeper.quorum", "",
@@ -2031,7 +1999,6 @@ public class HiveConf extends Configuration {
     HIVE_DRUID_PASSIVE_WAIT_TIME("hive.druid.passiveWaitTimeMs", 30000,
             "Wait time in ms default to 30 seconds."
     ),
-    HIVE_DRUID_BITMAP_FACTORY_TYPE("hive.druid.bitmap.type", "roaring", new PatternSet("roaring", "concise"), "Coding algorithm use to encode the bitmaps"),
     // For HBase storage handler
     HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true,
         "Whether writes to HBase should be forced to the write-ahead log. \n" +
@@ -2173,7 +2140,7 @@ public class HiveConf extends Configuration {
         "When true the HDFS location stored in the index file will be ignored at runtime.\n" +
         "If the data got moved or the name of the cluster got changed, the index data should still be usable."),
 
-    HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile,file,s3,s3a",
+    HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile,file",
         "A comma separated list of acceptable URI schemes for import and export."),
     // temporary variable for testing. This is added just to turn off this feature in case of a bug in
     // deployment. It has not been documented in hive-default.xml intentionally, this should be removed
@@ -2225,10 +2192,7 @@ public class HiveConf extends Configuration {
         "When enabled, will log EXPLAIN EXTENDED output for the query at INFO log4j log level."),
     HIVE_EXPLAIN_USER("hive.explain.user", true,
         "Whether to show explain result at user level.\n" +
-        "When enabled, will log EXPLAIN output for the query at user level. Tez only."),
-    HIVE_SPARK_EXPLAIN_USER("hive.spark.explain.user", false,
-        "Whether to show explain result at user level.\n" +
-        "When enabled, will log EXPLAIN output for the query at user level. Spark only."),
+        "When enabled, will log EXPLAIN output for the query at user level."),
 
     // prefix used to auto generated column aliases (this should be started with '_')
     HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL("hive.autogen.columnalias.prefix.label", "_c",
@@ -2244,29 +2208,19 @@ public class HiveConf extends Configuration {
             "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics",
             "org.apache.hadoop.hive.common.metrics.LegacyMetrics"),
         "Hive metrics subsystem implementation class."),
-    HIVE_CODAHALE_METRICS_REPORTER_CLASSES("hive.service.metrics.codahale.reporter.classes",
-        "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
-            "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter",
-            "Comma separated list of reporter implementation classes for metric class "
-                + "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics. Overrides "
-                + "HIVE_METRICS_REPORTER conf if present"),
-    @Deprecated
-    HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "",
-        "Reporter implementations for metric class "
-            + "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;" +
-        "Deprecated, use HIVE_CODAHALE_METRICS_REPORTER_CLASSES instead. This configuraiton will be"
-            + " overridden by HIVE_CODAHALE_METRICS_REPORTER_CLASSES if present. " +
-            "Comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2"),
+    HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "JSON_FILE, JMX",
+        "Reporter type for metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics, " +
+        "comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2"),
     HIVE_METRICS_JSON_FILE_LOCATION("hive.service.metrics.file.location", "/tmp/report.json",
         "For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, the location of local JSON metrics file.  " +
         "This file will get overwritten at every interval."),
-    HIVE_METRICS_JSON_FILE_INTERVAL("hive.service.metrics.file.frequency", "5000ms",
+    HIVE_METRICS_JSON_FILE_INTERVAL("hive.service.metrics.file.frequency", "5s",
         new TimeValidator(TimeUnit.MILLISECONDS),
-        "For metric class org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
+        "For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, " +
         "the frequency of updating JSON metrics file."),
     HIVE_METRICS_HADOOP2_INTERVAL("hive.service.metrics.hadoop2.frequency", "30s",
         new TimeValidator(TimeUnit.SECONDS),
-        "For metric class org.apache.hadoop.hive.common.metrics.metrics2.Metrics2Reporter, " +
+        "For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics HADOOP2 reporter, " +
         "the frequency of updating the HADOOP2 metrics system."),
     HIVE_METRICS_HADOOP2_COMPONENT_NAME("hive.service.metrics.hadoop2.component",
         "hive",
@@ -2283,6 +2237,10 @@ public class HiveConf extends Configuration {
     HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false,
         "Where to insert into multilevel directories like\n" +
         "\"insert directory '/HIVEFT25686/chinna/' from table\""),
+    HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS("hive.warehouse.subdir.inherit.perms", true,
+        "Set this to false if the table directories should be created\n" +
+        "with the permissions derived from dfs umask instead of\n" +
+        "inheriting the permission of the warehouse or database directory."),
     HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true,
         "whether insert into external tables is allowed"),
     HIVE_TEMPORARY_TABLE_STORAGE(
@@ -2291,10 +2249,7 @@ public class HiveConf extends Configuration {
          "Choices between memory, ssd and default"),
     HIVE_QUERY_LIFETIME_HOOKS("hive.query.lifetime.hooks", "",
         "A comma separated list of hooks which implement QueryLifeTimeHook. These will be triggered" +
-            " before/after query compilation and before/after query execution, in the order specified." +
-        "Implementations of QueryLifeTimeHookWithParseHooks can also be specified in this list. If they are" +
-        "specified then they will be invoked in the same places as QueryLifeTimeHooks and will be invoked during pre " +
-         "and post query parsing"),
+            " before/after query compilation and before/after query execution, in the order specified"),
     HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", "",
         "A comma separated list of hooks which implement HiveDriverRunHook. Will be run at the beginning " +
         "and end of Driver.run, these will be run in the order specified."),
@@ -2803,9 +2758,9 @@ public class HiveConf extends Configuration {
     HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT("hive.vectorized.use.vectorized.input.format", true,
         "This flag should be set to true to enable vectorizing with vectorized input file format capable SerDe.\n" +
         "The default value is true."),
-    HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE("hive.vectorized.use.vector.serde.deserialize", true,
+    HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE("hive.vectorized.use.vector.serde.deserialize", false,
         "This flag should be set to true to enable vectorizing rows using vector deserialize.\n" +
-        "The default value is true."),
+        "The default value is false."),
     HIVE_VECTORIZATION_USE_ROW_DESERIALIZE("hive.vectorized.use.row.serde.deserialize", false,
         "This flag should be set to true to enable vectorizing using row deserialize.\n" +
         "The default value is false."),
@@ -2874,9 +2829,6 @@ public class HiveConf extends Configuration {
         "Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes\n" +
         "and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as\n" +
         "necessary."),
-    TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR("hive.tez.llap.min.reducer.per.executor", 0.95f,
-        "If above 0, the min number of reducers for auto-parallelism for LLAP scheduling will\n" +
-        "be set to this fraction of the number of executors."),
     TEZ_MAX_PARTITION_FACTOR("hive.tez.max.partition.factor", 2f,
         "When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges."),
     TEZ_MIN_PARTITION_FACTOR("hive.tez.min.partition.factor", 0.25f,
@@ -2904,18 +2856,8 @@ public class HiveConf extends Configuration {
     TEZ_DYNAMIC_SEMIJOIN_REDUCTION("hive.tez.dynamic.semijoin.reduction", true,
         "When dynamic semijoin is enabled, shuffle joins will perform a leaky semijoin before shuffle. This " +
         "requires hive.tez.dynamic.partition.pruning to be enabled."),
-    TEZ_MIN_BLOOM_FILTER_ENTRIES("hive.tez.min.bloom.filter.entries", 1000000L,
-            "Bloom filter should be of at min certain size to be effective"),
     TEZ_MAX_BLOOM_FILTER_ENTRIES("hive.tez.max.bloom.filter.entries", 100000000L,
             "Bloom filter should be of at max certain size to be effective"),
-    TEZ_BLOOM_FILTER_FACTOR("hive.tez.bloom.filter.factor", (float) 2.0,
-            "Bloom filter should be a multiple of this factor with nDV"),
-    TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION("hive.tez.bigtable.minsize.semijoin.reduction", 100000000L,
-            "Big table for runtime filteting should be of atleast this size"),
-    TEZ_DYNAMIC_SEMIJOIN_REDUCTION_THRESHOLD("hive.tez.dynamic.semijoin.reduction.threshold", (float) 0.50,
-            "Only perform semijoin optimization if the estimated benefit at or above this fraction of the target table"),
-    TEZ_DYNAMIC_SEMIJOIN_REDUCTION_HINT_ONLY("hive.tez.dynamic.semijoin.reduction.hint.only", false,
-            "When true, only enforce semijoin when a hint is provided"),
     TEZ_SMB_NUMBER_WAVES(
         "hive.tez.smb.number.waves",
         (float) 0.5,
@@ -2953,19 +2895,13 @@ public class HiveConf extends Configuration {
         "LLAP IO memory usage; 'cache' (the default) uses data and metadata cache with a\n" +
         "custom off-heap allocator, 'none' doesn't use either (this mode may result in\n" +
         "significant performance degradation)"),
-    LLAP_ALLOCATOR_MIN_ALLOC("hive.llap.io.allocator.alloc.min", "256Kb", new SizeValidator(),
+    LLAP_ALLOCATOR_MIN_ALLOC("hive.llap.io.allocator.alloc.min", "16Kb", new SizeValidator(),
         "Minimum allocation possible from LLAP buddy allocator. Allocations below that are\n" +
         "padded to minimum allocation. For ORC, should generally be the same as the expected\n" +
         "compression buffer size, or next lowest power of 2. Must be a power of 2."),
     LLAP_ALLOCATOR_MAX_ALLOC("hive.llap.io.allocator.alloc.max", "16Mb", new SizeValidator(),
         "Maximum allocation possible from LLAP buddy allocator. For ORC, should be as large as\n" +
         "the largest expected ORC compression buffer size. Must be a power of 2."),
-    @Deprecated
-    LLAP_IO_METADATA_FRACTION("hive.llap.io.metadata.fraction", 0.1f,
-        "Temporary setting for on-heap metadata cache fraction of xmx, set to avoid potential\n" +
-        "heap problems on very large datasets when on-heap metadata cache takes over\n" +
-        "everything. -1 managed metadata and data together (which is more flexible). This\n" +
-        "setting will be removed (in effect become -1) once ORC metadata cache is moved off-heap."),
     LLAP_ALLOCATOR_ARENA_COUNT("hive.llap.io.allocator.arena.count", 8,
         "Arena count for LLAP low-level cache; cache will be allocated in the steps of\n" +
         "(size/arena_count) bytes. This size must be <= 1Gb and >= max allocation; if it is\n" +
@@ -3159,19 +3095,6 @@ public class HiveConf extends Configuration {
     LLAP_DAEMON_NUM_EXECUTORS("hive.llap.daemon.num.executors", 4,
       "Number of executors to use in LLAP daemon; essentially, the number of tasks that can be\n" +
       "executed in parallel.", "llap.daemon.num.executors"),
-    LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR("hive.llap.mapjoin.memory.oversubscribe.factor", 0.2f,
-      "Fraction of memory from hive.auto.convert.join.noconditionaltask.size that can be over subscribed\n" +
-        "by queries running in LLAP mode. This factor has to be from 0.0 to 1.0. Default is 20% over subscription.\n"),
-    LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY("hive.llap.memory.oversubscription.max.executors.per.query", 3,
-      "Used along with hive.llap.mapjoin.memory.oversubscribe.factor to limit the number of executors from\n" +
-        "which memory for mapjoin can be borrowed. Default 3 (from 3 other executors\n" +
-        "hive.llap.mapjoin.memory.oversubscribe.factor amount of memory can be borrowed based on which mapjoin\n" +
-        "conversion decision will be made). This is only an upper bound. Lower bound is determined by number of\n" +
-        "executors and configured max concurrency."),
-    LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL("hive.llap.mapjoin.memory.monitor.check.interval", 100000L,
-      "Check memory usage of mapjoin hash tables after every interval of this many rows. If map join hash table\n" +
-        "memory usage exceeds (hive.auto.convert.join.noconditionaltask.size * hive.hash.table.inflation.factor)\n" +
-        "when running in LLAP, tasks will get killed and not retried. Set the value to 0 to disable this feature."),
     LLAP_DAEMON_AM_REPORTER_MAX_THREADS("hive.llap.daemon.am-reporter.max.threads", 4,
         "Maximum number of threads to be used for AM reporter. If this is lower than number of\n" +
         "executors in llap daemon, it would be set to number of executors at runtime.",
@@ -3307,14 +3230,13 @@ public class HiveConf extends Configuration {
         "logger used for llap-daemons."),
 
     SPARK_USE_OP_STATS("hive.spark.use.op.stats", true,
-        "Whether to use operator stats to determine reducer parallelism for Hive on Spark.\n" +
-        "If this is false, Hive will use source table stats to determine reducer\n" +
-        "parallelism for all first level reduce tasks, and the maximum reducer parallelism\n" +
-        "from all parents for all the rest (second level and onward) reducer tasks."),
-    SPARK_USE_TS_STATS_FOR_MAPJOIN("hive.spark.use.ts.stats.for.mapjoin", false,
-        "If this is set to true, mapjoin optimization in Hive/Spark will use statistics from\n" +
-        "TableScan operators at the root of operator tree, instead of parent ReduceSink\n" +
-        "operators of the Join operator."),
+        "Whether to use operator stats to determine reducer parallelism for Hive on Spark. "
+            + "If this is false, Hive will use source table stats to determine reducer "
+            + "parallelism for all first level reduce tasks, and the maximum reducer parallelism "
+            + "from all parents for all the rest (second level and onward) reducer tasks."),
+    SPARK_USE_FILE_SIZE_FOR_MAPJOIN("hive.spark.use.file.size.for.mapjoin", false,
+        "If this is set to true, mapjoin optimization in Hive/Spark will use source file sizes associated "
+            + "with TableScan operator on the root of operator tree, instead of using operator statistics."),
     SPARK_CLIENT_FUTURE_TIMEOUT("hive.spark.client.future.timeout",
       "60s", new TimeValidator(TimeUnit.SECONDS),
       "Timeout for requests from Hive client to remote Spark driver."),
@@ -3357,8 +3279,6 @@ public class HiveConf extends Configuration {
         "hive.spark.use.groupby.shuffle", true,
         "Spark groupByKey transformation has better performance but uses unbounded memory." +
             "Turn this off when there is a memory issue."),
-    SPARK_JOB_MAX_TASKS("hive.spark.job.max.tasks", -1, "The maximum number of tasks a Spark job may have.\n" +
-            "If a Spark job contains more tasks than the maximum, it will be cancelled. A value of -1 means no limit."),
     NWAYJOINREORDER("hive.reorder.nway.joins", true,
       "Runs reordering of tables within single n-way join (i.e.: picks streamtable)"),
     HIVE_MERGE_NWAY_JOINS("hive.merge.nway.joins", true,
@@ -3761,9 +3681,6 @@ public class HiveConf extends Configuration {
       }
     } else if (name.startsWith("hive.spark")) { // Remote Spark Context property.
       result = true;
-    } else if (name.equals("mapreduce.job.queuename")) {
-      // a special property starting with mapreduce that we would also like to effect if it changes
-      result = true;
     }
 
     return result;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
index dc02803..9ba08e5 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
@@ -94,22 +94,11 @@ public class HiveConfUtil {
   public static void stripConfigurations(Configuration conf, Set<String> hiddenSet) {
     for (String name : hiddenSet) {
       if (conf.get(name) != null) {
-        conf.set(name, StringUtils.EMPTY);
+        conf.set(name, "");
       }
     }
   }
 
-  /**
-   * Searches the given configuration object and replaces all the configuration values for keys
-   * defined hive.conf.hidden.list by empty String
-   *
-   * @param conf - Configuration object which needs to be modified to remove sensitive keys
-   */
-  public static void stripConfigurations(Configuration conf) {
-    Set<String> hiddenSet = getHiddenSet(conf);
-    stripConfigurations(conf, hiddenSet);
-  }
-
   public static void dumpConfig(Configuration originalConf, StringBuilder sb) {
     Set<String> hiddenSet = getHiddenSet(originalConf);
     sb.append("Values omitted for security reason if present: ").append(hiddenSet).append("\n");

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java b/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
index 008b3b0..7f3c8b3 100644
--- a/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
+++ b/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
@@ -39,7 +39,6 @@ import java.util.Map;
 public class PerfLogger {
   public static final String ACQUIRE_READ_WRITE_LOCKS = "acquireReadWriteLocks";
   public static final String COMPILE = "compile";
-  public static final String WAIT_COMPILE = "waitCompile";
   public static final String PARSE = "parse";
   public static final String ANALYZE = "semanticAnalyze";
   public static final String OPTIMIZER = "optimizer";

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hive/http/ConfServlet.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/http/ConfServlet.java b/common/src/java/org/apache/hive/http/ConfServlet.java
index 856a5d2..253df4f 100644
--- a/common/src/java/org/apache/hive/http/ConfServlet.java
+++ b/common/src/java/org/apache/hive/http/ConfServlet.java
@@ -26,7 +26,6 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConfUtil;
 
 /**
  * A servlet to print out the running configuration data.
@@ -82,14 +81,11 @@ public class ConfServlet extends HttpServlet {
    * Guts of the servlet - extracted for easy testing.
    */
   static void writeResponse(Configuration conf, Writer out, String format)
-      throws IOException, BadFormatException {
-    //redact the sensitive information from the configuration values
-    Configuration hconf = new Configuration(conf);
-    HiveConfUtil.stripConfigurations(hconf);
+    throws IOException, BadFormatException {
     if (FORMAT_JSON.equals(format)) {
-      Configuration.dumpConfiguration(hconf, out);
+      Configuration.dumpConfiguration(conf, out);
     } else if (FORMAT_XML.equals(format)) {
-      hconf.writeXml(out);
+      conf.writeXml(out);
     } else {
       throw new BadFormatException("Bad format: " + format);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hive/http/HttpServer.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/http/HttpServer.java b/common/src/java/org/apache/hive/http/HttpServer.java
index fd3d457..db5650d 100644
--- a/common/src/java/org/apache/hive/http/HttpServer.java
+++ b/common/src/java/org/apache/hive/http/HttpServer.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.core.Appender;
@@ -53,13 +54,11 @@ import org.apache.logging.log4j.core.appender.OutputStreamManager;
 import org.eclipse.jetty.rewrite.handler.RewriteHandler;
 import org.eclipse.jetty.rewrite.handler.RewriteRegexRule;
 import org.eclipse.jetty.server.Connector;
-import org.eclipse.jetty.server.HttpConfiguration;
-import org.eclipse.jetty.server.HttpConnectionFactory;
-import org.eclipse.jetty.server.LowResourceMonitor;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.handler.ContextHandler.Context;
 import org.eclipse.jetty.server.handler.ContextHandlerCollection;
-import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.server.nio.SelectChannelConnector;
+import org.eclipse.jetty.server.ssl.SslSelectChannelConnector;
 import org.eclipse.jetty.servlet.DefaultServlet;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.eclipse.jetty.servlet.FilterMapping;
@@ -86,9 +85,9 @@ public class HttpServer {
   public static final String ADMINS_ACL = "admins.acl";
 
   private final String name;
-  private String appDir;
-  private WebAppContext webAppContext;
-  private Server webServer;
+  private final String appDir;
+  private final WebAppContext webAppContext;
+  private final Server webServer;
 
   /**
    * Create a status server on the given port.
@@ -96,7 +95,16 @@ public class HttpServer {
   private HttpServer(final Builder b) throws IOException {
     this.name = b.name;
 
-    createWebServer(b);
+    webServer = new Server();
+    appDir = getWebAppsPath(b.name);
+    webAppContext = createWebAppContext(b);
+
+    if (b.useSPNEGO) {
+      // Secure the web server with kerberos
+      setupSpnegoFilter(b);
+    }
+
+    initializeWebServer(b);
   }
 
   public static class Builder {
@@ -211,7 +219,7 @@ public class HttpServer {
   }
 
   public int getPort() {
-    return ((ServerConnector)(webServer.getConnectors()[0])).getLocalPort();
+    return webServer.getConnectors()[0].getLocalPort();
   }
 
   /**
@@ -337,14 +345,9 @@ public class HttpServer {
    * Create a channel connector for "http/https" requests
    */
   Connector createChannelConnector(int queueSize, Builder b) {
-    ServerConnector connector;
-
-    final HttpConfiguration conf = new HttpConfiguration();
-    conf.setRequestHeaderSize(1024*64);
-    final HttpConnectionFactory http = new HttpConnectionFactory(conf);
-
+    SelectChannelConnector connector;
     if (!b.useSSL) {
-      connector = new ServerConnector(webServer, http);
+      connector = new SelectChannelConnector();
     } else {
       SslContextFactory sslContextFactory = new SslContextFactory();
       sslContextFactory.setKeyStorePath(b.keyStorePath);
@@ -354,13 +357,15 @@ public class HttpServer {
       sslContextFactory.addExcludeProtocols(excludedSSLProtocols.toArray(
           new String[excludedSSLProtocols.size()]));
       sslContextFactory.setKeyStorePassword(b.keyStorePassword);
-      connector = new ServerConnector(webServer, sslContextFactory, http);
+      connector = new SslSelectChannelConnector(sslContextFactory);
     }
 
+    connector.setLowResourcesMaxIdleTime(10000);
     connector.setAcceptQueueSize(queueSize);
+    connector.setResolveNames(false);
+    connector.setUseDirectBuffers(false);
+    connector.setRequestHeaderSize(1024*64);
     connector.setReuseAddress(true);
-    connector.setHost(b.host);
-    connector.setPort(b.port);
     return connector;
   }
 
@@ -373,7 +378,7 @@ public class HttpServer {
     }
   }
 
-  private void createWebServer(final Builder b) throws IOException {
+  void initializeWebServer(Builder b) {
     // Create the thread pool for the web server to handle HTTP requests
     QueuedThreadPool threadPool = new QueuedThreadPool();
     if (b.maxThreads > 0) {
@@ -381,26 +386,12 @@ public class HttpServer {
     }
     threadPool.setDaemon(true);
     threadPool.setName(b.name + "-web");
+    webServer.setThreadPool(threadPool);
 
-    this.webServer = new Server(threadPool);
-    this.appDir = getWebAppsPath(b.name);
-    this.webAppContext = createWebAppContext(b);
-
-    if (b.useSPNEGO) {
-      // Secure the web server with kerberos
-      setupSpnegoFilter(b);
-    }
-
-    initializeWebServer(b, threadPool.getMaxThreads());
-  }
-
-  private void initializeWebServer(final Builder b, int queueSize) {
-    // Set handling for low resource conditions.
-    final LowResourceMonitor low = new LowResourceMonitor(webServer);
-    low.setLowResourcesIdleTimeout(10000);
-    webServer.addBean(low);
-
-    Connector connector = createChannelConnector(queueSize, b);
+    // Create the channel connector for the web server
+    Connector connector = createChannelConnector(threadPool.getMaxThreads(), b);
+    connector.setHost(b.host);
+    connector.setPort(b.port);
     webServer.addConnector(connector);
 
     RewriteHandler rwHandler = new RewriteHandler();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java
----------------------------------------------------------------------
diff --git a/common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java b/common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java
index adc9b0c..03fcaeb 100644
--- a/common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java
+++ b/common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java
@@ -213,6 +213,7 @@ public class TestFileUtils {
     Path copySrc = new Path("copySrc");
     Path copyDst = new Path("copyDst");
     HiveConf conf = new HiveConf(TestFileUtils.class);
+    conf.set(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, "false");
 
     FileSystem mockFs = mock(FileSystem.class);
     when(mockFs.getUri()).thenReturn(URI.create("hdfs:///"));

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java
----------------------------------------------------------------------
diff --git a/common/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java b/common/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java
index 00ee820..6661158 100644
--- a/common/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java
+++ b/common/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java
@@ -26,7 +26,6 @@ import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
-import java.util.BitSet;
 
 /**
  * Tests for {@link ValidReadTxnList}
@@ -35,9 +34,9 @@ public class TestValidReadTxnList {
 
   @Test
   public void noExceptions() throws Exception {
-    ValidTxnList txnList = new ValidReadTxnList(new long[0], new BitSet(), 1, Long.MAX_VALUE);
+    ValidTxnList txnList = new ValidReadTxnList(new long[0], 1, Long.MAX_VALUE);
     String str = txnList.writeToString();
-    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", str);
+    Assert.assertEquals("1:" + Long.MAX_VALUE + ":", str);
     ValidTxnList newList = new ValidReadTxnList();
     newList.readFromString(str);
     Assert.assertTrue(newList.isTxnValid(1));
@@ -46,9 +45,9 @@ public class TestValidReadTxnList {
 
   @Test
   public void exceptions() throws Exception {
-    ValidTxnList txnList = new ValidReadTxnList(new long[]{2L,4L}, new BitSet(), 5, 4L);
+    ValidTxnList txnList = new ValidReadTxnList(new long[]{2L,4L}, 5, 4L);
     String str = txnList.writeToString();
-    Assert.assertEquals("5:4:2,4:", str);
+    Assert.assertEquals("5:4:2:4", str);
     ValidTxnList newList = new ValidReadTxnList();
     newList.readFromString(str);
     Assert.assertTrue(newList.isTxnValid(1));
@@ -63,7 +62,7 @@ public class TestValidReadTxnList {
   public void longEnoughToCompress() throws Exception {
     long[] exceptions = new long[1000];
     for (int i = 0; i < 1000; i++) exceptions[i] = i + 100;
-    ValidTxnList txnList = new ValidReadTxnList(exceptions, new BitSet(), 2000, 900);
+    ValidTxnList txnList = new ValidReadTxnList(exceptions, 2000, 900);
     String str = txnList.writeToString();
     ValidTxnList newList = new ValidReadTxnList();
     newList.readFromString(str);
@@ -77,7 +76,7 @@ public class TestValidReadTxnList {
   public void readWriteConfig() throws Exception {
     long[] exceptions = new long[1000];
     for (int i = 0; i < 1000; i++) exceptions[i] = i + 100;
-    ValidTxnList txnList = new ValidReadTxnList(exceptions, new BitSet(), 2000, 900);
+    ValidTxnList txnList = new ValidReadTxnList(exceptions, 2000, 900);
     String str = txnList.writeToString();
     Configuration conf = new Configuration();
     conf.set(ValidTxnList.VALID_TXNS_KEY, str);
@@ -90,20 +89,4 @@ public class TestValidReadTxnList {
     newConf.readFields(in);
     Assert.assertEquals(str, newConf.get(ValidTxnList.VALID_TXNS_KEY));
   }
-
-  @Test
-  public void testAbortedTxn() throws Exception {
-    long[] exceptions = {2L, 4L, 6L, 8L, 10L};
-    BitSet bitSet = new BitSet(exceptions.length);
-    bitSet.set(0);  // mark txn "2L" aborted
-    bitSet.set(3);  // mark txn "8L" aborted
-    ValidTxnList txnList = new ValidReadTxnList(exceptions, bitSet, 11, 4L);
-    String str = txnList.writeToString();
-    Assert.assertEquals("11:4:4,6,10:2,8", str);
-    Assert.assertTrue(txnList.isTxnAborted(2L));
-    Assert.assertFalse(txnList.isTxnAborted(4L));
-    Assert.assertFalse(txnList.isTxnAborted(6L));
-    Assert.assertTrue(txnList.isTxnAborted(8L));
-    Assert.assertFalse(txnList.isTxnAborted(10L));
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
----------------------------------------------------------------------
diff --git a/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java b/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
index 67f81d6..aa4e75f 100644
--- a/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
+++ b/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
@@ -33,6 +33,8 @@ import org.junit.Before;
 import org.junit.Test;
 
 import java.io.File;
+import java.nio.file.Files;
+import java.nio.file.Paths;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -55,12 +57,9 @@ public class TestCodahaleMetrics {
 
     jsonReportFile = new File(workDir, "json_reporting");
     jsonReportFile.delete();
-
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "local");
     conf.setVar(HiveConf.ConfVars.HIVE_METRICS_CLASS, CodahaleMetrics.class.getCanonicalName());
-    conf.setVar(HiveConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES,
-        "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, "
-            + "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter");
+    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name());
     conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_LOCATION, jsonReportFile.toString());
     conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_INTERVAL, "100ms");
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleReportersConf.java
----------------------------------------------------------------------
diff --git a/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleReportersConf.java b/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleReportersConf.java
deleted file mode 100644
index d694e26..0000000
--- a/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleReportersConf.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.metrics.metrics2;
-
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import java.lang.reflect.InvocationTargetException;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hive.common.metrics.MetricsTestUtils;
-import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.File;
-
-/**
- * Unit tests for Codahale reporter config backward compatibility
- */
-public class TestCodahaleReportersConf {
-
-  private static File workDir = new File(System.getProperty("test.tmp.dir"));
-  private static File jsonReportFile;
-
-  @After
-  public void after() throws Exception {
-    MetricsFactory.close();
-  }
-
-  /**
-   * Tests that the deprecated HIVE_METRICS_REPORTER config is used if the HIVE_CODAHALE_METRICS_REPORTER_CLASSES is missing.
-   */
-  @Test
-  public void testFallbackToDeprecatedConfig() throws Exception {
-
-    HiveConf conf = new HiveConf();
-
-    jsonReportFile = new File(workDir, "json_reporting");
-    jsonReportFile.delete();
-
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "local");
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_CLASS, CodahaleMetrics.class.getCanonicalName());
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, "JMX, JSON");
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_LOCATION, jsonReportFile.toString());
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_INTERVAL, "100ms");
-
-    MetricsFactory.init(conf);
-
-    int runs = 5;
-    for (int i = 0; i < runs; i++) {
-      MetricsFactory.getInstance().incrementCounter("count2");
-    }
-
-    // we expect json file to be updated
-    byte[] jsonData = MetricsTestUtils.getFileData(jsonReportFile.getAbsolutePath(), 2000, 3);
-    ObjectMapper objectMapper = new ObjectMapper();
-
-    JsonNode rootNode = objectMapper.readTree(jsonData);
-    JsonNode countersNode = rootNode.path("counters");
-    JsonNode methodCounterNode = countersNode.path("count2");
-    JsonNode countNode = methodCounterNode.path("count");
-    Assert.assertEquals(countNode.asInt(), 5);
-  }
-
-  /**
-   * Tests that the deprecated HIVE_METRICS_REPORTER config is not used if
-   * HIVE_CODAHALE_METRICS_REPORTER_CLASSES is present.
-   *
-   * The deprecated config specifies json reporters whereas the newer one doesn't. Validates that
-   * the JSON file is not created.
-   */
-  @Test
-  public void testNoFallback() throws Exception {
-
-    HiveConf conf = new HiveConf();
-
-    jsonReportFile = new File(workDir, "json_reporting");
-    jsonReportFile.delete();
-
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "local");
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_CLASS, CodahaleMetrics.class.getCanonicalName());
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, "JMX, JSON");
-    conf.setVar(HiveConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES,
-             "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter");
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_LOCATION, jsonReportFile.toString());
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_INTERVAL, "100ms");
-
-    MetricsFactory.init(conf);
-
-    int runs = 5;
-    for (int i = 0; i < runs; i++) {
-      MetricsFactory.getInstance().incrementCounter("count2");
-    }
-
-    Assert.assertFalse(jsonReportFile.exists());
-  }
-
-  /**
-   * Tests that the deprecated HIVE_METRICS_REPORTER config is not used if
-   * HIVE_CODAHALE_METRICS_REPORTER_CLASSES is present but incorrect.
-   *
-   * The deprecated config specifies json reporters whereas the newer one doesn't. Validates that
-   * the JSON file is not created.
-   */
-  @Test
-  public void testNoFallbackOnIncorrectConf() throws Exception {
-
-    HiveConf conf = new HiveConf();
-
-    jsonReportFile = new File(workDir, "json_reporting");
-    jsonReportFile.delete();
-
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "local");
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_CLASS, CodahaleMetrics.class.getCanonicalName());
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, "JMX, JSON");
-    conf.setVar(HiveConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES,
-        "org.apache.hadoop.hive.common.metrics.NonExistentReporter");
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_LOCATION, jsonReportFile.toString());
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_INTERVAL, "100ms");
-
-    try {
-      MetricsFactory.init(conf);
-    } catch (InvocationTargetException expectedException) {
-
-    }
-
-    Assert.assertFalse(jsonReportFile.exists());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/contrib/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/pom.xml b/contrib/pom.xml
index 7423e31..6cc4931 100644
--- a/contrib/pom.xml
+++ b/contrib/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/contrib/src/test/results/clientnegative/case_with_row_sequence.q.out
----------------------------------------------------------------------
diff --git a/contrib/src/test/results/clientnegative/case_with_row_sequence.q.out b/contrib/src/test/results/clientnegative/case_with_row_sequence.q.out
index cf92da6..9ee319f 100644
--- a/contrib/src/test/results/clientnegative/case_with_row_sequence.q.out
+++ b/contrib/src/test/results/clientnegative/case_with_row_sequence.q.out
@@ -10,4 +10,18 @@ POSTHOOK: query: create temporary function row_sequence as
 'org.apache.hadoop.hive.contrib.udf.UDFRowSequence'
 POSTHOOK: type: CREATEFUNCTION
 POSTHOOK: Output: row_sequence
-FAILED: SemanticException Stateful expressions cannot be used inside of CASE
+PREHOOK: query: SELECT CASE WHEN 3 > 2 THEN 10 WHEN row_sequence() > 5 THEN 20 ELSE 30 END
+FROM src LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+Execution failed with exit status: 2
+Obtaining error information
+
+Task failed!
+Task ID:
+  Stage-1
+
+Logs:
+
+FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/data/files/e011_01.txt
----------------------------------------------------------------------
diff --git a/data/files/e011_01.txt b/data/files/e011_01.txt
deleted file mode 100644
index 92df12a..0000000
--- a/data/files/e011_01.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-12
-34
-56
-78

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/data/files/events.txt
----------------------------------------------------------------------
diff --git a/data/files/events.txt b/data/files/events.txt
deleted file mode 100644
index e0bd0be..0000000
--- a/data/files/events.txt
+++ /dev/null
@@ -1,200 +0,0 @@
-1111,20121121,1111,1,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,2,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,3,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,4,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,5,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,6,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,7,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,8,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,9,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,10,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,11,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,12,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,13,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,14,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,15,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,16,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,17,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,18,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,19,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,20,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,21,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,22,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,23,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,24,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,25,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,26,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,27,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,28,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,29,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,30,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,31,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,32,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,33,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,34,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,35,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,36,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,37,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,38,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,39,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,40,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,41,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,42,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,43,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,44,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,45,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,46,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,47,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,48,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,49,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121121,1111,50,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121021,1111,1,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,2,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,3,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,4,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,5,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,6,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,7,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,8,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,9,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,10,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,11,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,12,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,13,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,14,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,15,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,16,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,17,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,18,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,19,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,20,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,21,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,22,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,23,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,24,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,25,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,26,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,27,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,28,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,29,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,30,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,31,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,32,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,33,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,34,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,35,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,36,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,37,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,38,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,39,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,40,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,41,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,42,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,43,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,44,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,45,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,46,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,47,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,48,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,49,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121021,1111,50,type1,eventData,session,full_uid,20121021,39,hq_change
-1111,20121221,1111,1,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,2,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,3,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,4,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,5,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,6,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,7,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,8,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,9,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,10,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,11,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,12,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,13,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,14,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,15,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,16,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,17,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,18,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,19,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,20,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,21,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,22,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,23,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,24,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,25,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,26,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,27,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,28,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,29,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,30,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,31,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,32,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,33,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,34,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,35,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,36,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,37,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,38,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,39,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,40,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,41,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,42,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,43,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,44,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,45,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,46,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,47,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,48,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,49,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20121221,1111,50,type1,eventData,session,full_uid,20121121,39,hq_change
-1111,20120921,1111,1,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,2,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,3,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,4,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,5,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,6,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,7,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,8,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,9,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,10,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,11,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,12,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,13,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,14,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,15,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,16,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,17,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,18,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,19,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,20,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,21,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,22,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,23,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,24,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,25,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,26,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,27,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,28,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,29,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,30,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,31,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,32,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,33,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,34,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,35,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,36,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,37,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,38,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,39,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,40,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,41,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,42,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,43,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,44,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,45,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,46,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,47,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,48,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,49,type1,eventData,session,full_uid,20120921,39,hq_change
-1111,20120921,1111,50,type1,eventData,session,full_uid,20120921,39,hq_change

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/data/files/tpcds-perf/metastore_export/csv/TABLE_PARAMS.txt
----------------------------------------------------------------------
diff --git a/data/files/tpcds-perf/metastore_export/csv/TABLE_PARAMS.txt b/data/files/tpcds-perf/metastore_export/csv/TABLE_PARAMS.txt
new file mode 100644
index 0000000..78020f1
--- /dev/null
+++ b/data/files/tpcds-perf/metastore_export/csv/TABLE_PARAMS.txt
@@ -0,0 +1,143 @@
+_store_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"s_store_sk":"true","s_store_id":"true","s_rec_start_date":"true","s_rec_end_date":"true","s_closed_date_sk":"true","s_store_name":"true","s_number_employees":"true","s_floor_space":"true","s_hours":"true","s_manager":"true","s_market_id":"true","s_geography_class":"true","s_market_desc":"true","s_market_manager":"true","s_division_id":"true","s_division_name":"true","s_company_id":"true","s_company_name":"true","s_street_number":"true","s_street_name":"true","s_street_type":"true","s_suite_number":"true","s_city":"true","s_county":"true","s_state":"true","s_zip":"true","s_country":"true","s_gmt_offset":"true","s_tax_precentage":"true"},"BASIC_STATS":"true"}
+_store_@numFiles@1
+_store_@numRows@1704
+_store_@rawDataSize@3256276
+_store_@totalSize@101707
+_store_@transient_lastDdlTime@1434562098
+_call_center_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"cc_call_center_sk":"true","cc_call_center_id":"true","cc_rec_start_date":"true","cc_rec_end_date":"true","cc_closed_date_sk":"true","cc_open_date_sk":"true","cc_name":"true","cc_class":"true","cc_employees":"true","cc_sq_ft":"true","cc_hours":"true","cc_manager":"true","cc_mkt_id":"true","cc_mkt_class":"true","cc_mkt_desc":"true","cc_market_manager":"true","cc_division":"true","cc_division_name":"true","cc_company":"true","cc_company_name":"true","cc_street_number":"true","cc_street_name":"true","cc_street_type":"true","cc_suite_number":"true","cc_city":"true","cc_county":"true","cc_state":"true","cc_zip":"true","cc_country":"true","cc_gmt_offset":"true","cc_tax_percentage":"true"},"BASIC_STATS":"true"}
+_call_center_@numFiles@1
+_call_center_@numRows@60
+_call_center_@rawDataSize@122700
+_call_center_@totalSize@10347
+_call_center_@transient_lastDdlTime@1434561922
+_catalog_page_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"cp_catalog_page_sk":"true","cp_catalog_page_id":"true","cp_start_date_sk":"true","cp_end_date_sk":"true","cp_department":"true","cp_catalog_number":"true","cp_catalog_page_number":"true","cp_description":"true","cp_type":"true"},"BASIC_STATS":"true"}
+_catalog_page_@numFiles@1
+_catalog_page_@numRows@46000
+_catalog_page_@rawDataSize@21198808
+_catalog_page_@totalSize@1576662
+_catalog_page_@transient_lastDdlTime@1434561925
+_customer_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"c_customer_sk":"true","c_customer_id":"true","c_current_cdemo_sk":"true","c_current_hdemo_sk":"true","c_current_addr_sk":"true","c_first_shipto_date_sk":"true","c_first_sales_date_sk":"true","c_salutation":"true","c_first_name":"true","c_last_name":"true","c_preferred_cust_flag":"true","c_birth_day":"true","c_birth_month":"true","c_birth_year":"true","c_birth_country":"true","c_login":"true","c_email_address":"true","c_last_review_date":"true"},"BASIC_STATS":"true"}
+_customer_@numFiles@538
+_customer_@numRows@80000000
+_customer_@rawDataSize@68801615852
+_customer_@totalSize@3143935054
+_customer_@transient_lastDdlTime@1434561966
+_customer_address_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"ca_address_sk":"true","ca_address_id":"true","ca_street_number":"true","ca_street_name":"true","ca_street_type":"true","ca_suite_number":"true","ca_city":"true","ca_county":"true","ca_state":"true","ca_zip":"true","ca_country":"true","ca_gmt_offset":"true","ca_location_type":"true"},"BASIC_STATS":"true"}
+_customer_address_@numFiles@274
+_customer_address_@numRows@40000000
+_customer_address_@rawDataSize@40595195284
+_customer_address_@totalSize@530195843
+_customer_address_@transient_lastDdlTime@1434561994
+_customer_demographics_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"cd_demo_sk":"true","cd_gender":"true","cd_marital_status":"true","cd_education_status":"true","cd_purchase_estimate":"true","cd_credit_rating":"true","cd_dep_count":"true","cd_dep_employed_count":"true","cd_dep_college_count":"true"},"BASIC_STATS":"true"}
+_customer_demographics_@numFiles@8
+_customer_demographics_@numRows@1861800
+_customer_demographics_@rawDataSize@717186159
+_customer_demographics_@totalSize@323062
+_customer_demographics_@transient_lastDdlTime@1434562071
+_date_dim_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"d_date_sk":"true","d_date_id":"true","d_date":"true","d_month_seq":"true","d_week_seq":"true","d_quarter_seq":"true","d_year":"true","d_dow":"true","d_moy":"true","d_dom":"true","d_qoy":"true","d_fy_year":"true","d_fy_quarter_seq":"true","d_fy_week_seq":"true","d_day_name":"true","d_quarter_name":"true","d_holiday":"true","d_weekend":"true","d_following_holiday":"true","d_first_dom":"true","d_last_dom":"true","d_same_day_ly":"true","d_same_day_lq":"true","d_current_day":"true","d_current_week":"true","d_current_month":"true","d_current_quarter":"true","d_current_year":"true"},"BASIC_STATS":"true"}
+_date_dim_@numFiles@1
+_date_dim_@numRows@73049
+_date_dim_@rawDataSize@81741831
+_date_dim_@totalSize@362925
+_date_dim_@transient_lastDdlTime@1434562075
+_household_demographics_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"hd_demo_sk":"true","hd_income_band_sk":"true","hd_buy_potential":"true","hd_dep_count":"true","hd_vehicle_count":"true"},"BASIC_STATS":"true"}
+_household_demographics_@numFiles@1
+_household_demographics_@numRows@7200
+_household_demographics_@rawDataSize@770400
+_household_demographics_@totalSize@901
+_household_demographics_@transient_lastDdlTime@1434562078
+_income_band_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"ib_income_band_sk":"true","ib_lower_bound":"true","ib_upper_bound":"true"},"BASIC_STATS":"true"}
+_income_band_@numFiles@1
+_income_band_@numRows@20
+_income_band_@rawDataSize@240
+_income_band_@totalSize@399
+_income_band_@transient_lastDdlTime@1434562081
+_item_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"i_item_sk":"true","i_item_id":"true","i_rec_start_date":"true","i_rec_end_date":"true","i_item_desc":"true","i_current_price":"true","i_wholesale_cost":"true","i_brand_id":"true","i_brand":"true","i_class_id":"true","i_class":"true","i_category_id":"true","i_category":"true","i_manufact_id":"true","i_manufact":"true","i_size":"true","i_formulation":"true","i_color":"true","i_units":"true","i_container":"true","i_manager_id":"true","i_product_name":"true"},"BASIC_STATS":"true"}
+_item_@numFiles@9
+_item_@numRows@462000
+_item_@rawDataSize@663560457
+_item_@totalSize@29760748
+_item_@transient_lastDdlTime@1434562091
+_promotion_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"p_promo_sk":"true","p_promo_id":"true","p_start_date_sk":"true","p_end_date_sk":"true","p_item_sk":"true","p_cost":"true","p_response_target":"true","p_promo_name":"true","p_channel_dmail":"true","p_channel_email":"true","p_channel_catalog":"true","p_channel_tv":"true","p_channel_radio":"true","p_channel_press":"true","p_channel_event":"true","p_channel_demo":"true","p_channel_details":"true","p_purpose":"true","p_discount_active":"true"},"BASIC_STATS":"true"}
+_promotion_@numFiles@1
+_promotion_@numRows@2300
+_promotion_@rawDataSize@2713420
+_promotion_@totalSize@63964
+_promotion_@transient_lastDdlTime@1434562093
+_reason_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"r_reason_sk":"true","r_reason_id":"true","r_reason_desc":"true"},"BASIC_STATS":"true"}
+_reason_@numFiles@1
+_reason_@numRows@72
+_reason_@rawDataSize@14400
+_reason_@totalSize@1024
+_reason_@transient_lastDdlTime@1434562095
+_ship_mode_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"sm_ship_mode_sk":"true","sm_ship_mode_id":"true","sm_type":"true","sm_code":"true","sm_carrier":"true","sm_contract":"true"},"BASIC_STATS":"true"}
+_ship_mode_@numFiles@0
+_ship_mode_@numRows@0
+_ship_mode_@rawDataSize@0
+_ship_mode_@totalSize@0
+_ship_mode_@transient_lastDdlTime@1434562097
+_time_dim_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"t_time_sk":"true","t_time_id":"true","t_time":"true","t_hour":"true","t_minute":"true","t_second":"true","t_am_pm":"true","t_shift":"true","t_sub_shift":"true","t_meal_time":"true"},"BASIC_STATS":"true"}
+_time_dim_@numFiles@1
+_time_dim_@numRows@86400
+_time_dim_@rawDataSize@40694400
+_time_dim_@totalSize@133902
+_time_dim_@transient_lastDdlTime@1434562099
+_warehouse_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"w_warehouse_sk":"true","w_warehouse_id":"true","w_warehouse_name":"true","w_warehouse_sq_ft":"true","w_street_number":"true","w_street_name":"true","w_street_type":"true","w_suite_number":"true","w_city":"true","w_county":"true","w_state":"true","w_zip":"true","w_country":"true","w_gmt_offset":"true"},"BASIC_STATS":"true"}
+_warehouse_@numFiles@1
+_warehouse_@numRows@27
+_warehouse_@rawDataSize@27802
+_warehouse_@totalSize@2971
+_warehouse_@transient_lastDdlTime@1434562102
+_web_page_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"wp_web_page_sk":"true","wp_web_page_id":"true","wp_rec_start_date":"true","wp_rec_end_date":"true","wp_creation_date_sk":"true","wp_access_date_sk":"true","wp_autogen_flag":"true","wp_customer_sk":"true","wp_url":"true","wp_type":"true","wp_char_count":"true","wp_link_count":"true","wp_image_count":"true","wp_max_ad_count":"true"},"BASIC_STATS":"true"}
+_web_page_@numFiles@1
+_web_page_@numRows@4602
+_web_page_@rawDataSize@2696178
+_web_page_@totalSize@50572
+_web_page_@transient_lastDdlTime@1434562104
+_web_site_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"web_site_sk":"true","web_site_id":"true","web_rec_start_date":"true","web_rec_end_date":"true","web_name":"true","web_open_date_sk":"true","web_close_date_sk":"true","web_class":"true","web_manager":"true","web_mkt_id":"true","web_mkt_class":"true","web_mkt_desc":"true","web_market_manager":"true","web_company_id":"true","web_company_name":"true","web_street_number":"true","web_street_name":"true","web_street_type":"true","web_suite_number":"true","web_city":"true","web_county":"true","web_state":"true","web_zip":"true","web_country":"true","web_gmt_offset":"true","web_tax_percentage":"true"},"BASIC_STATS":"true"}
+_web_site_@numFiles@1
+_web_site_@numRows@84
+_web_site_@rawDataSize@155408
+_web_site_@totalSize@11271
+_web_site_@transient_lastDdlTime@1434562107
+_catalog_returns_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"cr_returned_date_sk":"true","cr_returned_time_sk":"true","cr_item_sk":"true","cr_refunded_customer_sk":"true","cr_refunded_cdemo_sk":"true","cr_refunded_hdemo_sk":"true","cr_refunded_addr_sk":"true","cr_returning_customer_sk":"true","cr_returning_cdemo_sk":"true","cr_returning_hdemo_sk":"true","cr_returning_addr_sk":"true","cr_call_center_sk":"true","cr_catalog_page_sk":"true","cr_ship_mode_sk":"true","cr_warehouse_sk":"true","cr_reason_sk":"true","cr_order_number":"true","cr_return_quantity":"true","cr_return_amount":"true","cr_return_tax":"true","cr_return_amt_inc_tax":"true","cr_fee":"true","cr_return_ship_cost":"true","cr_refunded_cash":"true","cr_reversed_charge":"true","cr_store_credit":"true","cr_net_loss":"true"},"BASIC_STATS":"true"}
+_catalog_returns_@numFiles@400
+_catalog_returns_@numRows@28798881
+_catalog_returns_@rawDataSize@3057234680
+_catalog_returns_@totalSize@1651022096
+_catalog_sales_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"cs_sold_date_sk":"true","cs_sold_time_sk":"true","cs_ship_date_sk":"true","cs_bill_customer_sk":"true","cs_bill_cdemo_sk":"true","cs_bill_hdemo_sk":"true","cs_bill_addr_sk":"true","cs_ship_customer_sk":"true","cs_ship_cdemo_sk":"true","cs_ship_hdemo_sk":"true","cs_ship_addr_sk":"true","cs_call_center_sk":"true","cs_catalog_page_sk":"true","cs_ship_mode_sk":"true","cs_warehouse_sk":"true","cs_item_sk":"true","cs_promo_sk":"true","cs_order_number":"true","cs_quantity":"true","cs_wholesale_cost":"true","cs_list_price":"true","cs_sales_price":"true","cs_ext_discount_amt":"true","cs_ext_sales_price":"true","cs_ext_wholesale_cost":"true","cs_ext_list_price":"true","cs_ext_tax":"true","cs_coupon_amt":"true","cs_ext_ship_cost":"true","cs_net_paid":"true","cs_net_paid_inc_tax":"true","cs_net_paid_inc_ship":"true","cs_net_paid_inc_ship_tax":"true","cs_net_profit":"true"},"BASIC_STATS":"true"}
+_catalog_sales_@numFiles@552
+_catalog_sales_@numRows@287989836
+_catalog_sales_@rawDataSize@38999608952
+_catalog_sales_@totalSize@16430853294
+_catalog_sales_@transient_lastDdlTime@1434700893
+_inventory_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"inv_date_sk":"true","inv_item_sk":"true","inv_warehouse_sk":"true","inv_quantity_on_hand":"true"},"BASIC_STATS":"true"}
+_inventory_@numFiles@51
+_inventory_@numRows@37584000
+_inventory_@rawDataSize@593821104
+_inventory_@totalSize@58323594
+_inventory_@transient_lastDdlTime@1434681166
+_store_sales_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"ss_sold_date_sk":"true","ss_sold_time_sk":"true","ss_item_sk":"true","ss_customer_sk":"true","ss_cdemo_sk":"true","ss_hdemo_sk":"true","ss_addr_sk":"true","ss_store_sk":"true","ss_promo_sk":"true","ss_ticket_number":"true","ss_quantity":"true","ss_wholesale_cost":"true","ss_list_price":"true","ss_sales_price":"true","ss_ext_discount_amt":"true","ss_ext_sales_price":"true","ss_ext_wholesale_cost":"true","ss_ext_list_price":"true","ss_ext_tax":"true","ss_coupon_amt":"true","ss_net_paid":"true","ss_net_paid_inc_tax":"true","ss_net_profit":"true"},"BASIC_STATS":"true"}
+_store_sales_@numFiles@600
+_store_sales_@numRows@575995635
+_store_sales_@rawDataSize@50814502088
+_store_sales_@totalSize@22288384284
+_store_sales_@transient_lastDdlTime@1434700760
+_web_sales_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"ws_sold_date_sk":"true","ws_sold_time_sk":"true","ws_ship_date_sk":"true","ws_item_sk":"true","ws_bill_customer_sk":"true","ws_bill_cdemo_sk":"true","ws_bill_hdemo_sk":"true","ws_bill_addr_sk":"true","ws_ship_customer_sk":"true","ws_ship_cdemo_sk":"true","ws_ship_hdemo_sk":"true","ws_ship_addr_sk":"true","ws_web_page_sk":"true","ws_web_site_sk":"true","ws_ship_mode_sk":"true","ws_warehouse_sk":"true","ws_promo_sk":"true","ws_order_number":"true","ws_quantity":"true","ws_wholesale_cost":"true","ws_list_price":"true","ws_sales_price":"true","ws_ext_discount_amt":"true","ws_ext_sales_price":"true","ws_ext_wholesale_cost":"true","ws_ext_list_price":"true","ws_ext_tax":"true","ws_coupon_amt":"true","ws_ext_ship_cost":"true","ws_net_paid":"true","ws_net_paid_inc_tax":"true","ws_net_paid_inc_ship":"true","ws_net_paid_inc_ship_tax":"true","ws_net_profit":"true"},"BASIC_STATS":"true"}
+_web_sales_@numFiles@529
+_web_sales_@numRows@144002668
+_web_sales_@rawDataSize@19580198212
+_web_sales_@totalSize@8304889016
+_web_sales_@transient_lastDdlTime@1434700829
+_web_returns_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"wr_returned_date_sk":"true","wr_returned_time_sk":"true","wr_item_sk":"true","wr_refunded_customer_sk":"true","wr_refunded_cdemo_sk":"true","wr_refunded_hdemo_sk":"true","wr_refunded_addr_sk":"true","wr_returning_customer_sk":"true","wr_returning_cdemo_sk":"true","wr_returning_hdemo_sk":"true","wr_returning_addr_sk":"true","wr_web_page_sk":"true","wr_reason_sk":"true","wr_order_number":"true","wr_return_quantity":"true","wr_return_amt":"true","wr_return_tax":"true","wr_return_amt_inc_tax":"true","wr_fee":"true","wr_return_ship_cost":"true","wr_refunded_cash":"true","wr_reversed_charge":"true","wr_account_credit":"true","wr_net_loss":"true"},"BASIC_STATS":"true"}
+_web_returns_@numFiles@135
+_web_returns_@numRows@14398467
+_web_returns_@rawDataSize@1325194184
+_web_returns_@totalSize@827734274
+_web_returns_@transient_lastDdlTime@1434700842
+_store_returns_@COLUMN_STATS_ACCURATE@{"COLUMN_STATS":{"sr_returned_date_sk":"true","sr_return_time_sk":"true","sr_item_sk":"true","sr_customer_sk":"true","sr_cdemo_sk":"true","sr_hdemo_sk":"true","sr_addr_sk":"true","sr_store_sk":"true","sr_reason_sk":"true","sr_ticket_number":"true","sr_return_quantity":"true","sr_return_amt":"true","sr_return_tax":"true","sr_return_amt_inc_tax":"true","sr_fee":"true","sr_return_ship_cost":"true","sr_refunded_cash":"true","sr_reversed_charge":"true","sr_store_credit":"true","sr_net_loss":"true"},"BASIC_STATS":"true"}
+_store_returns_@numFiles@573
+_store_returns_@numRows@57591150
+_store_returns_@rawDataSize@4462194832
+_store_returns_@totalSize@2683203627
+_store_returns_@transient_lastDdlTime@1434700790


[31/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusServiceDriver.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusServiceDriver.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusServiceDriver.java
index 1b57e38..b36d4ff 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusServiceDriver.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusServiceDriver.java
@@ -24,11 +24,11 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintWriter;
-import java.net.URISyntaxException;
 import java.text.DecimalFormat;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -41,28 +41,20 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.llap.cli.LlapStatusOptionsProcessor.LlapStatusOptions;
-import org.apache.hadoop.hive.llap.cli.status.LlapStatusHelpers;
-import org.apache.hadoop.hive.llap.cli.status.LlapStatusHelpers.AppStatusBuilder;
-import org.apache.hadoop.hive.llap.cli.status.LlapStatusHelpers.LlapInstance;
 import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration;
 import org.apache.hadoop.hive.llap.registry.ServiceInstance;
 import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.slider.api.ClusterDescription;
 import org.apache.slider.api.ClusterDescriptionKeys;
-import org.apache.slider.api.StateValues;
 import org.apache.slider.api.StatusKeys;
-import org.apache.slider.api.types.ApplicationDiagnostics;
-import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.client.SliderClient;
-import org.apache.slider.common.params.ActionDiagnosticArgs;
 import org.apache.slider.core.exceptions.SliderException;
+import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.SerializationConfig;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
@@ -72,7 +64,6 @@ import org.slf4j.LoggerFactory;
 public class LlapStatusServiceDriver {
 
   private static final Logger LOG = LoggerFactory.getLogger(LlapStatusServiceDriver.class);
-  private static final Logger CONSOLE_LOGGER = LoggerFactory.getLogger("LlapStatusServiceDriverConsole");
 
   // Defining a bunch of configs here instead of in HiveConf. These are experimental, and mainly
   // for use when retry handling is fixed in Yarn/Hadoop
@@ -113,8 +104,6 @@ public class LlapStatusServiceDriver {
       CONF_PREFIX + "zk-registry.timeout-ms";
   private static final long CONFIG_LLAP_ZK_REGISTRY_TIMEOUT_MS_DEFAULT = 20000l;
 
-  private static final long LOG_SUMMARY_INTERVAL = 15000L; // Log summary every ~15 seconds.
-
   private static final String LLAP_KEY = "LLAP";
   private final Configuration conf;
   private final Clock clock = new SystemClock();
@@ -172,8 +161,7 @@ public class LlapStatusServiceDriver {
    * @param args
    * @return command line options.
    */
-  public LlapStatusOptions parseOptions(String[] args) throws
-      LlapStatusCliException {
+  public LlapStatusOptions parseOptions(String[] args) throws LlapStatusCliException {
 
     LlapStatusOptionsProcessor optionsProcessor = new LlapStatusOptionsProcessor();
     LlapStatusOptions options;
@@ -221,21 +209,16 @@ public class LlapStatusServiceDriver {
       }
 
       try {
-        if (sliderClient == null) {
-          sliderClient = LlapSliderUtils.createSliderClient(conf);
-        }
-      } catch (Exception e) {
-        LlapStatusCliException le = new LlapStatusCliException(
-            LlapStatusServiceDriver.ExitCode.SLIDER_CLIENT_ERROR_CREATE_FAILED,
-            "Failed to create slider client", e);
-        logError(le);
-        return le.getExitCode().getInt();
+        sliderClient = createSliderClient();
+      } catch (LlapStatusCliException e) {
+        logError(e);
+        return e.getExitCode().getInt();
       }
 
       // Get the App report from YARN
       ApplicationReport appReport;
       try {
-        appReport = LlapSliderUtils.getAppReport(appName, sliderClient, options.getFindAppTimeoutMs());
+        appReport = getAppReport(appName, sliderClient, options.getFindAppTimeoutMs());
       } catch (LlapStatusCliException e) {
         logError(e);
         return e.getExitCode().getInt();
@@ -252,13 +235,13 @@ public class LlapStatusServiceDriver {
 
       if (ret != ExitCode.SUCCESS) {
         return ret.getInt();
-      } else if (EnumSet.of(LlapStatusHelpers.State.APP_NOT_FOUND, LlapStatusHelpers.State.COMPLETE, LlapStatusHelpers.State.LAUNCHING)
+      } else if (EnumSet.of(State.APP_NOT_FOUND, State.COMPLETE, State.LAUNCHING)
         .contains(appStatusBuilder.getState())) {
         return ExitCode.SUCCESS.getInt();
       } else {
         // Get information from slider.
         try {
-          ret = populateAppStatusFromSliderStatus(appName, sliderClient, appStatusBuilder);
+          ret = populateAppStatusFromSlider(appName, sliderClient, appStatusBuilder);
         } catch (LlapStatusCliException e) {
           // In case of failure, send back whatever is constructed sop far - which wouldbe from the AppReport
           logError(e);
@@ -266,18 +249,6 @@ public class LlapStatusServiceDriver {
         }
       }
 
-
-      if (ret != ExitCode.SUCCESS) {
-        return ret.getInt();
-      } else {
-        try {
-          ret = populateAppStatusFromSliderDiagnostics(appName, sliderClient, appStatusBuilder);
-        } catch (LlapStatusCliException e) {
-          logError(e);
-          return e.getExitCode().getInt();
-        }
-      }
-
       if (ret != ExitCode.SUCCESS) {
         return ret.getInt();
       } else {
@@ -297,8 +268,7 @@ public class LlapStatusServiceDriver {
     }
   }
 
-  public void outputJson(PrintWriter writer) throws
-      LlapStatusCliException {
+  public void outputJson(PrintWriter writer) throws LlapStatusCliException {
     ObjectMapper mapper = new ObjectMapper();
     mapper.configure(SerializationConfig.Feature.FAIL_ON_EMPTY_BEANS, false);
     mapper.setSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
@@ -372,27 +342,25 @@ public class LlapStatusServiceDriver {
    * @throws LlapStatusCliException
    */
   private ExitCode processAppReport(ApplicationReport appReport,
-                               AppStatusBuilder appStatusBuilder) throws
-      LlapStatusCliException {
+                               AppStatusBuilder appStatusBuilder) throws LlapStatusCliException {
     if (appReport == null) {
-      appStatusBuilder.setState(LlapStatusHelpers.State.APP_NOT_FOUND);
+      appStatusBuilder.setState(State.APP_NOT_FOUND);
       LOG.info("No Application Found");
       return ExitCode.SUCCESS;
     }
 
-    // TODO Maybe add the YARN URL for the app.
     appStatusBuilder.setAmInfo(
-        new LlapStatusHelpers.AmInfo().setAppName(appReport.getName()).setAppType(appReport.getApplicationType()));
+        new AmInfo().setAppName(appReport.getName()).setAppType(appReport.getApplicationType()));
     appStatusBuilder.setAppStartTime(appReport.getStartTime());
     switch (appReport.getYarnApplicationState()) {
       case NEW:
       case NEW_SAVING:
       case SUBMITTED:
-        appStatusBuilder.setState(LlapStatusHelpers.State.LAUNCHING);
+        appStatusBuilder.setState(State.LAUNCHING);
         return ExitCode.SUCCESS;
       case ACCEPTED:
         appStatusBuilder.maybeCreateAndGetAmInfo().setAppId(appReport.getApplicationId().toString());
-        appStatusBuilder.setState(LlapStatusHelpers.State.LAUNCHING);
+        appStatusBuilder.setState(State.LAUNCHING);
         return ExitCode.SUCCESS;
       case RUNNING:
         appStatusBuilder.maybeCreateAndGetAmInfo().setAppId(appReport.getApplicationId().toString());
@@ -403,13 +371,7 @@ public class LlapStatusServiceDriver {
       case KILLED:
         appStatusBuilder.maybeCreateAndGetAmInfo().setAppId(appReport.getApplicationId().toString());
         appStatusBuilder.setAppFinishTime(appReport.getFinishTime());
-        appStatusBuilder.setState(LlapStatusHelpers.State.COMPLETE);
-        ApplicationDiagnostics appDiagnostics = LlapSliderUtils.getApplicationDiagnosticsFromYarnDiagnostics(appReport, LOG);
-        if (appDiagnostics == null) {
-          LOG.warn("AppDiagnostics not available for YARN application report");
-        } else {
-          processAppDiagnostics(appStatusBuilder, appDiagnostics, true);
-        }
+        appStatusBuilder.setState(State.COMPLETE);
         return ExitCode.SUCCESS;
       default:
         throw new LlapStatusCliException(ExitCode.INTERNAL_ERROR,
@@ -418,11 +380,7 @@ public class LlapStatusServiceDriver {
   }
 
 
-
-
-
   /**
-   * Populates information from SliderStatus.
    *
    * @param appName
    * @param sliderClient
@@ -430,7 +388,7 @@ public class LlapStatusServiceDriver {
    * @return an ExitCode. An ExitCode other than ExitCode.SUCCESS implies future progress not possible
    * @throws LlapStatusCliException
    */
-  private ExitCode populateAppStatusFromSliderStatus(String appName, SliderClient sliderClient, AppStatusBuilder appStatusBuilder) throws
+  private ExitCode populateAppStatusFromSlider(String appName, SliderClient sliderClient, AppStatusBuilder appStatusBuilder) throws
       LlapStatusCliException {
 
     ClusterDescription clusterDescription;
@@ -492,10 +450,9 @@ public class LlapStatusServiceDriver {
 
               String host = (String) containerParams.get("host");
 
-              LlapInstance
-                  llapInstance = new LlapInstance(host, containerIdString);
+              LlapInstance llapInstance = new LlapInstance(host, containerIdString);
 
-              appStatusBuilder.addNewRunningLlapInstance(llapInstance);
+              appStatusBuilder.addNewLlapInstance(llapInstance);
             }
           }
 
@@ -507,45 +464,8 @@ public class LlapStatusServiceDriver {
     }
   }
 
-  /**
-   * Populates information based on the slider diagnostics call. Must be invoked
-   * after populating status from slider status.
-   * @param appName
-   * @param sliderClient
-   * @param appStatusBuilder
-   * @return
-   * @throws LlapStatusCliException
-   */
-  private ExitCode populateAppStatusFromSliderDiagnostics(String appName,
-                                                          SliderClient sliderClient,
-                                                          AppStatusBuilder appStatusBuilder) throws
-      LlapStatusCliException {
-
-    ApplicationDiagnostics appDiagnostics;
-    try {
-      ActionDiagnosticArgs args = new ActionDiagnosticArgs();
-      args.containers = true;
-      args.name = appName;
-      appDiagnostics =
-          sliderClient.actionDiagnosticContainers(args);
-    } catch (YarnException | IOException | URISyntaxException e) {
-      throw new LlapStatusCliException(
-          ExitCode.SLIDER_CLIENT_ERROR_OTHER,
-          "Failed to get container diagnostics from slider", e);
-    }
-    if (appDiagnostics == null) {
-      LOG.info("Slider container diagnostics not available");
-      return ExitCode.SLIDER_CLIENT_ERROR_OTHER;
-    }
-
-    processAppDiagnostics(appStatusBuilder, appDiagnostics, false);
-
-    return ExitCode.SUCCESS;
-  }
 
   /**
-   * Populate additional information for containers from the LLAP registry. Must be invoked
-   * after Slider status. Also after slider-diagnostics.
    * @param appStatusBuilder
    * @return an ExitCode. An ExitCode other than ExitCode.SUCCESS implies future progress not possible
    * @throws LlapStatusCliException
@@ -571,12 +491,10 @@ public class LlapStatusServiceDriver {
     }
 
     if (serviceInstances == null || serviceInstances.isEmpty()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No information found in the LLAP registry");
-      }
+      LOG.info("No information found in the LLAP registry");
       appStatusBuilder.setLiveInstances(0);
-      appStatusBuilder.setState(LlapStatusHelpers.State.LAUNCHING);
-      appStatusBuilder.clearRunningLlapInstances();
+      appStatusBuilder.setState(State.LAUNCHING);
+      appStatusBuilder.clearLlapInstances();
       return ExitCode.SUCCESS;
     } else {
       // Tracks instances known by both slider and llap.
@@ -587,7 +505,7 @@ public class LlapStatusServiceDriver {
         String containerIdString = serviceInstance.getProperties().get(
           HiveConf.ConfVars.LLAP_DAEMON_CONTAINER_ID.varname);
 
-        LlapInstance llapInstance = appStatusBuilder.removeAndGetRunningLlapInstanceForContainer(
+        LlapInstance llapInstance = appStatusBuilder.removeAndgetLlapInstanceForContainer(
           containerIdString);
         if (llapInstance != null) {
           llapInstance.setMgmtPort(serviceInstance.getManagementPort());
@@ -606,185 +524,375 @@ public class LlapStatusServiceDriver {
       }
 
       appStatusBuilder.setLiveInstances(validatedInstances.size());
-      appStatusBuilder.setLaunchingInstances(llapExtraInstances.size());
       if (validatedInstances.size() >= appStatusBuilder.getDesiredInstances()) {
-        appStatusBuilder.setState(LlapStatusHelpers.State.RUNNING_ALL);
+        appStatusBuilder.setState(State.RUNNING_ALL);
         if (validatedInstances.size() > appStatusBuilder.getDesiredInstances()) {
           LOG.warn("Found more entries in LLAP registry, as compared to desired entries");
         }
       } else {
         if (validatedInstances.size() > 0) {
-          appStatusBuilder.setState(LlapStatusHelpers.State.RUNNING_PARTIAL);
+          appStatusBuilder.setState(State.RUNNING_PARTIAL);
         } else {
-          appStatusBuilder.setState(LlapStatusHelpers.State.LAUNCHING);
+          appStatusBuilder.setState(State.LAUNCHING);
         }
       }
 
       // At this point, everything that can be consumed from AppStatusBuilder has been consumed.
       // Debug only
-      if (appStatusBuilder.allRunningInstances().size() > 0) {
+      if (appStatusBuilder.allInstances().size() > 0) {
         // Containers likely to come up soon.
-        LOG.debug("Potential instances starting up: {}", appStatusBuilder.allRunningInstances());
+        LOG.debug("Potential instances starting up: {}", appStatusBuilder.allInstances());
       }
       if (llapExtraInstances.size() > 0) {
-        // Old containers which are likely shutting down, or new containers which
-        // launched between slider-status/slider-diagnostics. Skip for this iteration.
+        // Old containers which are likely shutting down
         LOG.debug("Instances likely to shutdown soon: {}", llapExtraInstances);
       }
 
-      appStatusBuilder.clearAndAddPreviouslyKnownRunningInstances(validatedInstances);
+      appStatusBuilder.clearAndAddPreviouslyKnownInstances(validatedInstances);
 
     }
     return ExitCode.SUCCESS;
   }
 
 
-  private static void processAppDiagnostics(AppStatusBuilder appStatusBuilder,
-                                            ApplicationDiagnostics appDiagnostics, boolean appComplete) {
-    // For a running app this should be empty.
-    String finalMessage = appDiagnostics.getFinalMessage();
-    Collection<ContainerInformation> containerInfos =
-        appDiagnostics.getContainers();
-    appStatusBuilder.setDiagnostics(finalMessage);
-    if (containerInfos != null) {
-      for (ContainerInformation containerInformation : containerInfos) {
-        if (containerInformation.getState() == StateValues.STATE_LIVE && !appComplete) {
-          LlapInstance instance = appStatusBuilder
-              .removeAndGetCompletedLlapInstanceForContainer(
-                  containerInformation.getContainerId());
-          if (instance ==
-              null) { // New launch. Not available during slider status, but available now.
-            instance = new LlapInstance(containerInformation.getHost(),
-                containerInformation.getContainerId());
-          }
-          instance.setLogUrl(containerInformation.getLogLink());
-          appStatusBuilder.addNewRunningLlapInstance(instance);
-        } else if (containerInformation.getState() ==
-            StateValues.STATE_STOPPED || appComplete) {
-          LlapInstance instance =
-              new LlapInstance(containerInformation.getHost(),
-                  containerInformation.getContainerId());
-          instance.setLogUrl(containerInformation.getLogLink());
-          if (appComplete && containerInformation.getExitCode() !=
-              ContainerExitStatus.INVALID) {
-            instance
-                .setYarnContainerExitStatus(containerInformation.getExitCode());
-          }
-          instance.setDiagnostics(containerInformation.getDiagnostics());
-          appStatusBuilder.addNewCompleteLlapInstance(instance);
-        } else {
-          LOG.warn("Unexpected containerstate={}, for container={}",
-              containerInformation.getState(), containerInformation);
-        }
+  static final class AppStatusBuilder {
+
+    private AmInfo amInfo;
+    private State state = State.UNKNOWN;
+    private String originalConfigurationPath;
+    private String generatedConfigurationPath;
+
+    private int desiredInstances = -1;
+    private int liveInstances = -1;
+
+    private Long appStartTime;
+    private Long appFinishTime;
+
+    private boolean runningThresholdAchieved = false;
+
+    private final List<LlapInstance> llapInstances = new LinkedList<>();
+
+    private transient Map<String, LlapInstance> containerToInstanceMap = new HashMap<>();
+
+    public void setAmInfo(AmInfo amInfo) {
+      this.amInfo = amInfo;
+    }
+
+    public AppStatusBuilder setState(
+        State state) {
+      this.state = state;
+      return this;
+    }
+
+    public AppStatusBuilder setOriginalConfigurationPath(String originalConfigurationPath) {
+      this.originalConfigurationPath = originalConfigurationPath;
+      return this;
+    }
+
+    public AppStatusBuilder setGeneratedConfigurationPath(String generatedConfigurationPath) {
+      this.generatedConfigurationPath = generatedConfigurationPath;
+      return this;
+    }
+
+    public AppStatusBuilder setAppStartTime(long appStartTime) {
+      this.appStartTime = appStartTime;
+      return this;
+    }
+
+    public AppStatusBuilder setAppFinishTime(long finishTime) {
+      this.appFinishTime = finishTime;
+      return this;
+    }
+
+    public AppStatusBuilder setDesiredInstances(int desiredInstances) {
+      this.desiredInstances = desiredInstances;
+      return this;
+    }
+
+    public AppStatusBuilder setLiveInstances(int liveInstances) {
+      this.liveInstances = liveInstances;
+      return this;
+    }
+
+    public AppStatusBuilder addNewLlapInstance(LlapInstance llapInstance) {
+      this.llapInstances.add(llapInstance);
+      this.containerToInstanceMap.put(llapInstance.getContainerId(), llapInstance);
+      return this;
+    }
+
+    public AppStatusBuilder setRunningThresholdAchieved(boolean thresholdAchieved) {
+      this.runningThresholdAchieved = thresholdAchieved;
+      return this;
+    }
+
+    public LlapInstance removeAndgetLlapInstanceForContainer(String containerIdString) {
+      return containerToInstanceMap.remove(containerIdString);
+    }
+
+    public void clearLlapInstances() {
+      this.llapInstances.clear();
+      this.containerToInstanceMap.clear();
+    }
+
+    public AppStatusBuilder clearAndAddPreviouslyKnownInstances(List<LlapInstance> llapInstances) {
+      clearLlapInstances();
+      for (LlapInstance llapInstance : llapInstances) {
+        addNewLlapInstance(llapInstance);
       }
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("ContainerInfos is null");
+      return this;
+    }
+
+    @JsonIgnore
+    public List<LlapInstance> allInstances() {
+      return this.llapInstances;
+    }
+
+    public AmInfo getAmInfo() {
+      return amInfo;
+    }
+
+    public State getState() {
+      return state;
+    }
+
+    public String getOriginalConfigurationPath() {
+      return originalConfigurationPath;
+    }
+
+    public String getGeneratedConfigurationPath() {
+      return generatedConfigurationPath;
+    }
+
+    public int getDesiredInstances() {
+      return desiredInstances;
+    }
+
+    public int getLiveInstances() {
+      return liveInstances;
+    }
+
+    public Long getAppStartTime() {
+      return appStartTime;
+    }
+
+    public Long getAppFinishTime() {
+      return appFinishTime;
+    }
+
+    public List<LlapInstance> getLlapInstances() {
+      return llapInstances;
+    }
+
+    public boolean isRunningThresholdAchieved() {
+      return runningThresholdAchieved;
+    }
+
+    @JsonIgnore
+    public AmInfo maybeCreateAndGetAmInfo() {
+      if (amInfo == null) {
+        amInfo = new AmInfo();
       }
+      return amInfo;
+    }
+
+    @Override
+    public String toString() {
+      return "AppStatusBuilder{" +
+          "amInfo=" + amInfo +
+          ", state=" + state +
+          ", originalConfigurationPath='" + originalConfigurationPath + '\'' +
+          ", generatedConfigurationPath='" + generatedConfigurationPath + '\'' +
+          ", desiredInstances=" + desiredInstances +
+          ", liveInstances=" + liveInstances +
+          ", appStartTime=" + appStartTime +
+          ", appFinishTime=" + appFinishTime +
+          ", llapInstances=" + llapInstances +
+          ", containerToInstanceMap=" + containerToInstanceMap +
+          '}';
     }
   }
 
-  private static String constructCompletedContainerDiagnostics(List<LlapInstance> completedInstances) {
-    StringBuilder sb = new StringBuilder();
-    if (completedInstances == null || completedInstances.size() == 0) {
-      return "";
-    } else {
-      // TODO HIVE-15865 Ideally sort these by completion time, once that is available.
-      boolean isFirst = true;
-      for (LlapInstance instance : completedInstances) {
-        if (!isFirst) {
-          sb.append("\n");
-        } else {
-          isFirst = false;
-        }
+  static class AmInfo {
+    private String appName;
+    private String appType;
+    private String appId;
+    private String containerId;
+    private String hostname;
+    private String amWebUrl;
+
+    public AmInfo setAppName(String appName) {
+      this.appName = appName;
+      return this;
+    }
 
-        if (instance.getYarnContainerExitStatus() ==
-            ContainerExitStatus.KILLED_EXCEEDED_PMEM ||
-            instance.getYarnContainerExitStatus() ==
-                ContainerExitStatus.KILLED_EXCEEDED_VMEM) {
-          sb.append("\tKILLED container (by YARN for exceeding memory limits): ");
-        } else {
-          // TODO HIVE-15865 Handle additional reasons like OS launch failed (Slider needs to give this info)
-          sb.append("\tFAILED container: ");
-        }
-        sb.append(" ").append(instance.getContainerId());
-        sb.append(", Logs at: ").append(instance.getLogUrl());
-      }
+    public AmInfo setAppType(String appType) {
+      this.appType = appType;
+      return this;
+    }
+
+    public AmInfo setAppId(String appId) {
+      this.appId = appId;
+      return this;
+    }
+
+    public AmInfo setContainerId(String containerId) {
+      this.containerId = containerId;
+      return this;
+    }
+
+    public AmInfo setHostname(String hostname) {
+      this.hostname = hostname;
+      return this;
+    }
+
+    public AmInfo setAmWebUrl(String amWebUrl) {
+      this.amWebUrl = amWebUrl;
+      return this;
+    }
+
+    public String getAppName() {
+      return appName;
+    }
+
+    public String getAppType() {
+      return appType;
+    }
+
+    public String getAppId() {
+      return appId;
+    }
+
+    public String getContainerId() {
+      return containerId;
+    }
+
+    public String getHostname() {
+      return hostname;
+    }
+
+    public String getAmWebUrl() {
+      return amWebUrl;
+    }
+
+    @Override
+    public String toString() {
+      return "AmInfo{" +
+          "appName='" + appName + '\'' +
+          ", appType='" + appType + '\'' +
+          ", appId='" + appId + '\'' +
+          ", containerId='" + containerId + '\'' +
+          ", hostname='" + hostname + '\'' +
+          ", amWebUrl='" + amWebUrl + '\'' +
+          '}';
     }
-    return sb.toString();
   }
 
-  /**
-   * Helper method to construct a diagnostic message from a complete
-   * AppStatusBuilder.
-   *
-   * @return
-   */
-  private static String constructDiagnostics(
-      AppStatusBuilder appStatusBuilder) {
-    StringBuilder sb = new StringBuilder();
-
-    switch (appStatusBuilder.getState()) {
-      case APP_NOT_FOUND:
-        sb.append("LLAP status unknown. Awaiting app launch");
-        break;
-      case LAUNCHING:
-        // This is a catch all state - when containers have not started yet, or LLAP has not started yet.
-        if (StringUtils.isNotBlank(appStatusBuilder.getAmInfo().getAppId())) {
-          sb.append("LLAP Starting up with AppId=")
-              .append(appStatusBuilder.getAmInfo().getAppId()).append(".");
-          if (appStatusBuilder.getDesiredInstances() != null) {
-            sb.append(" Started 0/").append(appStatusBuilder.getDesiredInstances()).append(" instances");
-          }
+  static class LlapInstance {
+    private final String hostname;
+    private final String containerId;
+    private String statusUrl;
+    private String webUrl;
+    private Integer rpcPort;
+    private Integer mgmtPort;
+    private Integer  shufflePort;
 
-          String containerDiagnostics = constructCompletedContainerDiagnostics(
-              appStatusBuilder.getCompletedInstances());
-          if (StringUtils.isNotEmpty(containerDiagnostics)) {
-            sb.append("\n").append(containerDiagnostics);
-          }
-        } else {
-          sb.append("Awaiting LLAP startup");
-        }
-        break;
-      case RUNNING_PARTIAL:
-        sb.append("LLAP Starting up with ApplicationId=")
-            .append(appStatusBuilder.getAmInfo().getAppId());
-        sb.append(" Started").append(appStatusBuilder.getLiveInstances())
-            .append("/").append(appStatusBuilder.getDesiredInstances())
-            .append(" instances");
-        String containerDiagnostics = constructCompletedContainerDiagnostics(
-            appStatusBuilder.getCompletedInstances());
-        if (StringUtils.isNotEmpty(containerDiagnostics)) {
-          sb.append("\n").append(containerDiagnostics);
-        }
+    // TODO HIVE-13454 Add additional information such as #executors, container size, etc
 
-        // TODO HIVE-15865: Include information about pending requests, and last allocation time
-        // once Slider provides this information.
-        break;
-      case RUNNING_ALL:
-        sb.append("LLAP Application running with ApplicationId=")
-            .append(appStatusBuilder.getAmInfo().getAppId());
-        break;
-      case COMPLETE:
-
-        sb.append("LLAP Application already complete. ApplicationId=")
-            .append(appStatusBuilder.getAmInfo().getAppId());
-        containerDiagnostics = constructCompletedContainerDiagnostics(
-            appStatusBuilder.getCompletedInstances());
-        if (StringUtils.isNotEmpty(containerDiagnostics)) {
-          sb.append("\n").append(containerDiagnostics);
-        }
+    public LlapInstance(String hostname, String containerId) {
+      this.hostname = hostname;
+      this.containerId = containerId;
+    }
+
+    public LlapInstance setWebUrl(String webUrl) {
+      this.webUrl = webUrl;
+      return this;
+    }
+
+    public LlapInstance setStatusUrl(String statusUrl) {
+      this.statusUrl = statusUrl;
+      return this;
+    }
+
+    public LlapInstance setRpcPort(int rpcPort) {
+      this.rpcPort = rpcPort;
+      return this;
+    }
+
+    public LlapInstance setMgmtPort(int mgmtPort) {
+      this.mgmtPort = mgmtPort;
+      return this;
+    }
 
-        break;
-      case UNKNOWN:
-        sb.append("LLAP status unknown");
-        break;
+    public LlapInstance setShufflePort(int shufflePort) {
+      this.shufflePort = shufflePort;
+      return this;
     }
-    if (StringUtils.isNotBlank(appStatusBuilder.getDiagnostics())) {
-      sb.append("\n").append(appStatusBuilder.getDiagnostics());
+
+    public String getHostname() {
+      return hostname;
+    }
+
+    public String getStatusUrl() {
+      return statusUrl;
+    }
+
+    public String getContainerId() {
+      return containerId;
+    }
+
+    public String getWebUrl() {
+      return webUrl;
+    }
+
+    public Integer getRpcPort() {
+      return rpcPort;
+    }
+
+    public Integer getMgmtPort() {
+      return mgmtPort;
+    }
+
+    public Integer getShufflePort() {
+      return shufflePort;
     }
 
-    return sb.toString();
+    @Override
+    public String toString() {
+      return "LlapInstance{" +
+          "hostname='" + hostname + '\'' +
+          ", containerId='" + containerId + '\'' +
+          ", statusUrl='" + statusUrl + '\'' +
+          ", webUrl='" + webUrl + '\'' +
+          ", rpcPort=" + rpcPort +
+          ", mgmtPort=" + mgmtPort +
+          ", shufflePort=" + shufflePort +
+          '}';
+    }
+  }
+
+  static class LlapStatusCliException extends Exception {
+    final ExitCode exitCode;
+
+
+    public LlapStatusCliException(ExitCode exitCode, String message) {
+      super(exitCode.getInt() +": " + message);
+      this.exitCode = exitCode;
+    }
+
+    public LlapStatusCliException(ExitCode exitCode, String message, Throwable cause) {
+      super(message, cause);
+      this.exitCode = exitCode;
+    }
+
+    public ExitCode getExitCode() {
+      return exitCode;
+    }
+  }
+
+  enum State {
+    APP_NOT_FOUND, LAUNCHING,
+    RUNNING_PARTIAL,
+    RUNNING_ALL, COMPLETE, UNKNOWN
   }
 
   public enum ExitCode {
@@ -810,26 +918,6 @@ public class LlapStatusServiceDriver {
   }
 
 
-  public static class LlapStatusCliException extends Exception {
-    final LlapStatusServiceDriver.ExitCode exitCode;
-
-
-    public LlapStatusCliException(LlapStatusServiceDriver.ExitCode exitCode, String message) {
-      super(exitCode.getInt() +": " + message);
-      this.exitCode = exitCode;
-    }
-
-    public LlapStatusCliException(LlapStatusServiceDriver.ExitCode exitCode, String message, Throwable cause) {
-      super(message, cause);
-      this.exitCode = exitCode;
-    }
-
-    public LlapStatusServiceDriver.ExitCode getExitCode() {
-      return exitCode;
-    }
-  }
-
-
   private static void logError(Throwable t) {
     LOG.error("FAILED: " + t.getMessage(), t);
     System.err.println("FAILED: " + t.getMessage());
@@ -839,9 +927,6 @@ public class LlapStatusServiceDriver {
   public static void main(String[] args) {
     LOG.info("LLAP status invoked with arguments = {}", Arrays.toString(args));
     int ret = ExitCode.SUCCESS.getInt();
-    Clock clock = new SystemClock();
-    long startTime = clock.getTime();
-    long lastSummaryLogTime = -1;
 
     LlapStatusServiceDriver statusServiceDriver = null;
     LlapStatusOptions options = null;
@@ -852,8 +937,7 @@ public class LlapStatusServiceDriver {
       statusServiceDriver.close();
       logError(t);
       if (t instanceof LlapStatusCliException) {
-        LlapStatusCliException
-            ce = (LlapStatusCliException) t;
+        LlapStatusCliException ce = (LlapStatusCliException) t;
         ret = ce.getExitCode().getInt();
       } else {
         ret = ExitCode.INTERNAL_ERROR.getInt();
@@ -866,14 +950,12 @@ public class LlapStatusServiceDriver {
       System.exit(ret);
     }
 
-    boolean firstAttempt = true;
     final long refreshInterval = options.getRefreshIntervalMs();
     final boolean watchMode = options.isWatchMode();
     final long watchTimeout = options.getWatchTimeoutMs();
     long numAttempts = watchTimeout / refreshInterval;
-    numAttempts = watchMode ? numAttempts : 1; // Break out of the loop fast if watchMode is disabled.
-    LlapStatusHelpers.State launchingState = null;
-    LlapStatusHelpers.State currentState = null;
+    State launchingState = null;
+    State currentState = null;
     boolean desiredStateAttained = false;
     final float runningNodesThreshold = options.getRunningNodesThreshold();
     try (OutputStream os = options.getOutputFile() == null ? System.out :
@@ -887,62 +969,28 @@ public class LlapStatusServiceDriver {
         numAttempts, watchMode, new DecimalFormat("#.###").format(runningNodesThreshold));
       while (numAttempts > 0) {
         try {
-          if (!firstAttempt) {
-            if (watchMode) {
-              try {
-                Thread.sleep(refreshInterval);
-              } catch (InterruptedException e) {
-                // ignore
-              }
-            } else {
-              // reported once, so break
-              break;
-            }
-          } else {
-            firstAttempt = false;
-          }
           ret = statusServiceDriver.run(options, watchMode ? watchTimeout : 0);
-          currentState = statusServiceDriver.appStatusBuilder.getState();
-          try {
-            lastSummaryLogTime = LlapStatusServiceDriver
-                .maybeLogSummary(clock, lastSummaryLogTime, statusServiceDriver,
-                    watchMode, watchTimeout, launchingState);
-          } catch (Exception e) {
-            LOG.warn("Failed to log summary", e);
-          }
-
           if (ret == ExitCode.SUCCESS.getInt()) {
             if (watchMode) {
+              currentState = statusServiceDriver.appStatusBuilder.state;
 
               // slider has started llap application, now if for some reason state changes to COMPLETE then fail fast
               if (launchingState == null &&
-                  (EnumSet.of(LlapStatusHelpers.State.LAUNCHING,
-                      LlapStatusHelpers.State.RUNNING_PARTIAL,
-                      LlapStatusHelpers.State.RUNNING_ALL)
-                      .contains(currentState))) {
+                (currentState.equals(State.LAUNCHING) || currentState.equals(State.RUNNING_PARTIAL))) {
                 launchingState = currentState;
               }
 
-              if (launchingState != null && currentState.equals(
-                  LlapStatusHelpers.State.COMPLETE)) {
+              if (launchingState != null && currentState.equals(State.COMPLETE)) {
                 LOG.warn("Application stopped while launching. COMPLETE state reached while waiting for RUNNING state."
                   + " Failing " + "fast..");
                 break;
               }
 
-              if (!(currentState.equals(LlapStatusHelpers.State.RUNNING_PARTIAL) || currentState.equals(
-                  LlapStatusHelpers.State.RUNNING_ALL))) {
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug(
-                      "Current state: {}. Desired state: {}. {}/{} instances.",
-                      currentState,
-                      runningNodesThreshold == 1.0f ?
-                          LlapStatusHelpers.State.RUNNING_ALL :
-                          LlapStatusHelpers.State.RUNNING_PARTIAL,
-                      statusServiceDriver.appStatusBuilder.getLiveInstances(),
-                      statusServiceDriver.appStatusBuilder
-                          .getDesiredInstances());
-                }
+              if (!(currentState.equals(State.RUNNING_PARTIAL) || currentState.equals(State.RUNNING_ALL))) {
+                LOG.warn("Current state: {}. Desired state: {}. {}/{} instances.", currentState,
+                  runningNodesThreshold == 1.0f ? State.RUNNING_ALL : State.RUNNING_PARTIAL,
+                  statusServiceDriver.appStatusBuilder.getLiveInstances(),
+                  statusServiceDriver.appStatusBuilder.getDesiredInstances());
                 numAttempts--;
                 continue;
               }
@@ -953,17 +1001,11 @@ public class LlapStatusServiceDriver {
               if (desiredInstances > 0) {
                 final float ratio = (float) liveInstances / (float) desiredInstances;
                 if (ratio < runningNodesThreshold) {
-                  if (LOG.isDebugEnabled()) {
-                    LOG.debug(
-                        "Waiting until running nodes threshold is reached. Current: {} Desired: {}." +
-                            " {}/{} instances.",
-                        new DecimalFormat("#.###").format(ratio),
-                        new DecimalFormat("#.###")
-                            .format(runningNodesThreshold),
-                        statusServiceDriver.appStatusBuilder.getLiveInstances(),
-                        statusServiceDriver.appStatusBuilder
-                            .getDesiredInstances());
-                  }
+                  LOG.warn("Waiting until running nodes threshold is reached. Current: {} Desired: {}." +
+                      " {}/{} instances.", new DecimalFormat("#.###").format(ratio),
+                    new DecimalFormat("#.###").format(runningNodesThreshold),
+                    statusServiceDriver.appStatusBuilder.getLiveInstances(),
+                    statusServiceDriver.appStatusBuilder.getDesiredInstances());
                   numAttempts--;
                   continue;
                 } else {
@@ -994,14 +1036,18 @@ public class LlapStatusServiceDriver {
           }
           break;
         } finally {
-          // TODO Remove this before commit.
+          if (watchMode) {
+            try {
+              Thread.sleep(refreshInterval);
+            } catch (InterruptedException e) {
+              // ignore
+            }
+          } else {
+            // reported once, so break
+            break;
+          }
         }
       }
-      // Log final state to CONSOLE_LOGGER
-      LlapStatusServiceDriver
-          .maybeLogSummary(clock, 0L, statusServiceDriver,
-              watchMode, watchTimeout, launchingState);
-      CONSOLE_LOGGER.info("\n\n\n");
       // print current state before exiting
       statusServiceDriver.outputJson(pw);
       os.flush();
@@ -1013,8 +1059,7 @@ public class LlapStatusServiceDriver {
     } catch (Throwable t) {
       logError(t);
       if (t instanceof LlapStatusCliException) {
-        LlapStatusCliException
-            ce = (LlapStatusCliException) t;
+        LlapStatusCliException ce = (LlapStatusCliException) t;
         ret = ce.getExitCode().getInt();
       } else {
         ret = ExitCode.INTERNAL_ERROR.getInt();
@@ -1029,40 +1074,6 @@ public class LlapStatusServiceDriver {
     System.exit(ret);
   }
 
-  private static long maybeLogSummary(Clock clock, long lastSummaryLogTime,
-                                      LlapStatusServiceDriver statusServiceDriver,
-                                      boolean watchMode, long watchTimeout, LlapStatusHelpers.State launchingState) {
-    long currentTime = clock.getTime();
-    if (lastSummaryLogTime < currentTime - LOG_SUMMARY_INTERVAL) {
-      String diagString = null;
-      if (launchingState == null && statusServiceDriver.appStatusBuilder.getState() ==
-          LlapStatusHelpers.State.COMPLETE && watchMode) {
-        // First known state was COMPLETED. Wait for the app launch to start.
-        diagString = "Awaiting LLAP launch";
-        // Clear completed instances in this case. Don't want to provide information from the previous run.
-        statusServiceDriver.appStatusBuilder.clearCompletedLlapInstances();
-      } else {
-        diagString = constructDiagnostics(statusServiceDriver.appStatusBuilder);
-      }
-
-      if (lastSummaryLogTime == -1) {
-        if (watchMode) {
-          CONSOLE_LOGGER.info("\nLLAPSTATUS WatchMode with timeout={} s",
-              TimeUnit.SECONDS.convert(watchTimeout, TimeUnit.MILLISECONDS));
-        } else {
-          CONSOLE_LOGGER.info("\nLLAPSTATUS");
-        }
-        CONSOLE_LOGGER.info(
-            "--------------------------------------------------------------------------------");
-      }
-      CONSOLE_LOGGER.info(diagString);
-      CONSOLE_LOGGER.info(
-          "--------------------------------------------------------------------------------");
-      lastSummaryLogTime = currentTime;
-    }
-    return lastSummaryLogTime;
-  }
-
   private void close() {
     if (sliderClient != null) {
       sliderClient.stop();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/cli/status/LlapStatusHelpers.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/status/LlapStatusHelpers.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/status/LlapStatusHelpers.java
deleted file mode 100644
index 187f4c3..0000000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/status/LlapStatusHelpers.java
+++ /dev/null
@@ -1,449 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.llap.cli.status;
-
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.llap.cli.LlapStatusServiceDriver;
-import org.codehaus.jackson.annotate.JsonIgnore;
-
-public class LlapStatusHelpers {
-  public enum State {
-    APP_NOT_FOUND, LAUNCHING,
-    RUNNING_PARTIAL,
-    RUNNING_ALL, COMPLETE, UNKNOWN
-  }
-
-  public static class AmInfo {
-    private String appName;
-    private String appType;
-    private String appId;
-    private String containerId;
-    private String hostname;
-    private String amWebUrl;
-
-    public AmInfo setAppName(String appName) {
-      this.appName = appName;
-      return this;
-    }
-
-    public AmInfo setAppType(String appType) {
-      this.appType = appType;
-      return this;
-    }
-
-    public AmInfo setAppId(String appId) {
-      this.appId = appId;
-      return this;
-    }
-
-    public AmInfo setContainerId(String containerId) {
-      this.containerId = containerId;
-      return this;
-    }
-
-    public AmInfo setHostname(String hostname) {
-      this.hostname = hostname;
-      return this;
-    }
-
-    public AmInfo setAmWebUrl(String amWebUrl) {
-      this.amWebUrl = amWebUrl;
-      return this;
-    }
-
-    public String getAppName() {
-      return appName;
-    }
-
-    public String getAppType() {
-      return appType;
-    }
-
-    public String getAppId() {
-      return appId;
-    }
-
-    public String getContainerId() {
-      return containerId;
-    }
-
-    public String getHostname() {
-      return hostname;
-    }
-
-    public String getAmWebUrl() {
-      return amWebUrl;
-    }
-
-    @Override
-    public String toString() {
-      return "AmInfo{" +
-          "appName='" + appName + '\'' +
-          ", appType='" + appType + '\'' +
-          ", appId='" + appId + '\'' +
-          ", containerId='" + containerId + '\'' +
-          ", hostname='" + hostname + '\'' +
-          ", amWebUrl='" + amWebUrl + '\'' +
-          '}';
-    }
-  }
-
-  public static class LlapInstance {
-    private final String hostname;
-    private final String containerId;
-    private String logUrl;
-
-    // Only for live instances.
-    private String statusUrl;
-    private String webUrl;
-    private Integer rpcPort;
-    private Integer mgmtPort;
-    private Integer  shufflePort;
-
-    // For completed instances
-    private String diagnostics;
-    private int yarnContainerExitStatus;
-
-    // TODO HIVE-13454 Add additional information such as #executors, container size, etc
-
-    public LlapInstance(String hostname, String containerId) {
-      this.hostname = hostname;
-      this.containerId = containerId;
-    }
-
-    public LlapInstance setLogUrl(String logUrl) {
-      this.logUrl = logUrl;
-      return this;
-    }
-
-    public LlapInstance setWebUrl(String webUrl) {
-      this.webUrl = webUrl;
-      return this;
-    }
-
-    public LlapInstance setStatusUrl(String statusUrl) {
-      this.statusUrl = statusUrl;
-      return this;
-    }
-
-    public LlapInstance setRpcPort(int rpcPort) {
-      this.rpcPort = rpcPort;
-      return this;
-    }
-
-    public LlapInstance setMgmtPort(int mgmtPort) {
-      this.mgmtPort = mgmtPort;
-      return this;
-    }
-
-    public LlapInstance setShufflePort(int shufflePort) {
-      this.shufflePort = shufflePort;
-      return this;
-    }
-
-    public LlapInstance setDiagnostics(String diagnostics) {
-      this.diagnostics = diagnostics;
-      return this;
-    }
-
-    public LlapInstance setYarnContainerExitStatus(int yarnContainerExitStatus) {
-      this.yarnContainerExitStatus = yarnContainerExitStatus;
-      return this;
-    }
-
-    public String getHostname() {
-      return hostname;
-    }
-
-    public String getLogUrl() {
-      return logUrl;
-    }
-
-    public String getStatusUrl() {
-      return statusUrl;
-    }
-
-    public String getContainerId() {
-      return containerId;
-    }
-
-    public String getWebUrl() {
-      return webUrl;
-    }
-
-    public Integer getRpcPort() {
-      return rpcPort;
-    }
-
-    public Integer getMgmtPort() {
-      return mgmtPort;
-    }
-
-    public Integer getShufflePort() {
-      return shufflePort;
-    }
-
-    public String getDiagnostics() {
-      return diagnostics;
-    }
-
-    public int getYarnContainerExitStatus() {
-      return yarnContainerExitStatus;
-    }
-
-    @Override
-    public String toString() {
-      return "LlapInstance{" +
-          "hostname='" + hostname + '\'' +
-          "logUrl=" + logUrl + '\'' +
-          ", containerId='" + containerId + '\'' +
-          ", statusUrl='" + statusUrl + '\'' +
-          ", webUrl='" + webUrl + '\'' +
-          ", rpcPort=" + rpcPort +
-          ", mgmtPort=" + mgmtPort +
-          ", shufflePort=" + shufflePort +
-          ", diagnostics=" + diagnostics +
-          ", yarnContainerExitStatus=" + yarnContainerExitStatus +
-          '}';
-    }
-  }
-
-  public static final class AppStatusBuilder {
-
-    private AmInfo amInfo;
-    private State state = State.UNKNOWN;
-    private String diagnostics;
-    private String originalConfigurationPath;
-    private String generatedConfigurationPath;
-
-    private Integer desiredInstances = null;
-    private Integer liveInstances = null;
-    private Integer launchingInstances = null;
-
-
-    private Long appStartTime;
-    private Long appFinishTime;
-
-    private boolean runningThresholdAchieved = false;
-
-    private final List<LlapInstance> runningInstances = new LinkedList<>();
-    private final List<LlapInstance> completedInstances = new LinkedList<>();
-
-    private transient final Map<String, LlapInstance>
-        containerToRunningInstanceMap = new HashMap<>();
-    private transient final Map<String, LlapInstance>
-        containerToCompletedInstanceMap = new HashMap<>();
-
-    public void setAmInfo(AmInfo amInfo) {
-      this.amInfo = amInfo;
-    }
-
-    public AppStatusBuilder setState(
-        State state) {
-      this.state = state;
-      return this;
-    }
-
-    public AppStatusBuilder setDiagnostics(String diagnostics) {
-      this.diagnostics = diagnostics;
-      return this;
-    }
-
-    public AppStatusBuilder setOriginalConfigurationPath(String originalConfigurationPath) {
-      this.originalConfigurationPath = originalConfigurationPath;
-      return this;
-    }
-
-    public AppStatusBuilder setGeneratedConfigurationPath(String generatedConfigurationPath) {
-      this.generatedConfigurationPath = generatedConfigurationPath;
-      return this;
-    }
-
-    public AppStatusBuilder setAppStartTime(long appStartTime) {
-      this.appStartTime = appStartTime;
-      return this;
-    }
-
-    public AppStatusBuilder setAppFinishTime(long finishTime) {
-      this.appFinishTime = finishTime;
-      return this;
-    }
-
-    public void setRunningThresholdAchieved(boolean runningThresholdAchieved) {
-      this.runningThresholdAchieved = runningThresholdAchieved;
-    }
-
-    public AppStatusBuilder setDesiredInstances(int desiredInstances) {
-      this.desiredInstances = desiredInstances;
-      return this;
-    }
-
-    public AppStatusBuilder setLiveInstances(int liveInstances) {
-      this.liveInstances = liveInstances;
-      return this;
-    }
-
-    public AppStatusBuilder setLaunchingInstances(int launchingInstances) {
-      this.launchingInstances = launchingInstances;
-      return this;
-    }
-
-    public AppStatusBuilder addNewRunningLlapInstance(LlapInstance llapInstance) {
-      this.runningInstances.add(llapInstance);
-      this.containerToRunningInstanceMap
-          .put(llapInstance.getContainerId(), llapInstance);
-      return this;
-    }
-
-    public LlapInstance removeAndGetRunningLlapInstanceForContainer(String containerIdString) {
-      return containerToRunningInstanceMap.remove(containerIdString);
-    }
-
-    public void clearRunningLlapInstances() {
-      this.runningInstances.clear();
-      this.containerToRunningInstanceMap.clear();
-    }
-
-    public AppStatusBuilder clearAndAddPreviouslyKnownRunningInstances(List<LlapInstance> llapInstances) {
-      clearRunningLlapInstances();
-      for (LlapInstance llapInstance : llapInstances) {
-        addNewRunningLlapInstance(llapInstance);
-      }
-      return this;
-    }
-
-    @JsonIgnore
-    public List<LlapInstance> allRunningInstances() {
-      return this.runningInstances;
-    }
-
-    public AppStatusBuilder addNewCompleteLlapInstance(LlapInstance llapInstance) {
-      this.completedInstances.add(llapInstance);
-      this.containerToCompletedInstanceMap
-          .put(llapInstance.getContainerId(), llapInstance);
-      return this;
-    }
-
-    public LlapInstance removeAndGetCompletedLlapInstanceForContainer(String containerIdString) {
-      return containerToCompletedInstanceMap.remove(containerIdString);
-    }
-
-    public void clearCompletedLlapInstances() {
-      this.completedInstances.clear();
-      this.containerToCompletedInstanceMap.clear();
-    }
-
-    public AppStatusBuilder clearAndAddPreviouslyKnownCompletedInstances(List<LlapInstance> llapInstances) {
-      clearCompletedLlapInstances();
-      for (LlapInstance llapInstance : llapInstances) {
-        addNewCompleteLlapInstance(llapInstance);
-      }
-      return this;
-    }
-
-    @JsonIgnore
-    public List<LlapInstance> allCompletedInstances() {
-      return this.completedInstances;
-    }
-
-    public AmInfo getAmInfo() {
-      return amInfo;
-    }
-
-    public State getState() {
-      return state;
-    }
-
-    public String getDiagnostics() {
-      return diagnostics;
-    }
-
-    public String getOriginalConfigurationPath() {
-      return originalConfigurationPath;
-    }
-
-    public String getGeneratedConfigurationPath() {
-      return generatedConfigurationPath;
-    }
-
-    public Integer getDesiredInstances() {
-      return desiredInstances;
-    }
-
-    public Integer getLiveInstances() {
-      return liveInstances;
-    }
-
-    public Integer getLaunchingInstances() {
-      return launchingInstances;
-    }
-
-    public Long getAppStartTime() {
-      return appStartTime;
-    }
-
-    public Long getAppFinishTime() {
-      return appFinishTime;
-    }
-
-    public boolean isRunningThresholdAchieved() {
-      return runningThresholdAchieved;
-    }
-
-    public List<LlapInstance> getRunningInstances() {
-      return runningInstances;
-    }
-
-    public List<LlapInstance> getCompletedInstances() {
-      return completedInstances;
-    }
-
-    @JsonIgnore
-    public AmInfo maybeCreateAndGetAmInfo() {
-      if (amInfo == null) {
-        amInfo = new AmInfo();
-      }
-      return amInfo;
-    }
-
-    @Override
-    public String toString() {
-      return "AppStatusBuilder{" +
-          "amInfo=" + amInfo +
-          ", state=" + state +
-          ", diagnostics=" + diagnostics +
-          ", originalConfigurationPath='" + originalConfigurationPath + '\'' +
-          ", generatedConfigurationPath='" + generatedConfigurationPath + '\'' +
-          ", desiredInstances=" + desiredInstances +
-          ", liveInstances=" + liveInstances +
-          ", launchingInstances=" + launchingInstances +
-          ", appStartTime=" + appStartTime +
-          ", appFinishTime=" + appFinishTime +
-          ", runningThresholdAchieved=" + runningThresholdAchieved +
-          ", runningInstances=" + runningInstances +
-          ", completedInstances=" + completedInstances +
-          ", containerToRunningInstanceMap=" + containerToRunningInstanceMap +
-          '}';
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/configuration/LlapDaemonConfiguration.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/configuration/LlapDaemonConfiguration.java b/llap-server/src/java/org/apache/hadoop/hive/llap/configuration/LlapDaemonConfiguration.java
index 7219d36..88f3b19 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/configuration/LlapDaemonConfiguration.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/configuration/LlapDaemonConfiguration.java
@@ -36,7 +36,7 @@ public class LlapDaemonConfiguration extends Configuration {
   public static final String[] SSL_DAEMON_CONFIGS = { "ssl-server.xml" };
 
   public LlapDaemonConfiguration() {
-    super(true); // Load the defaults.
+    super(false);
     for (String conf : DAEMON_CONFIGS) {
       addResource(conf);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index e030a76..82bbcf3 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -524,8 +524,4 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
     return queryId + "-" + dagIndex;
   }
 
-  public int getNumActive() {
-    return executorService.getNumActive();
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/EvictingPriorityBlockingQueue.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/EvictingPriorityBlockingQueue.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/EvictingPriorityBlockingQueue.java
index 8fe59d4..a80bb9b 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/EvictingPriorityBlockingQueue.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/EvictingPriorityBlockingQueue.java
@@ -53,11 +53,6 @@ public class EvictingPriorityBlockingQueue<E> {
       currentSize++;
       return null;
     } else {
-      if (isEmpty()) {
-        // Empty queue. But no capacity available, due to waitQueueSize and additionalElementsAllowed
-        // Return the element.
-        return e;
-      }
       // No capacity. Check if an element needs to be evicted.
       E last = deque.peekLast();
       if (comparator.compare(e, last) < 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index aae146e..95bc675 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -15,17 +15,14 @@
 package org.apache.hadoop.hive.llap.daemon.impl;
 
 import org.apache.hadoop.hive.llap.LlapOutputFormatService;
-
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.MemoryPoolMXBean;
 import java.lang.management.MemoryType;
 import java.net.InetSocketAddress;
 import java.net.URL;
-import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Date;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
@@ -41,7 +38,6 @@ import org.apache.hadoop.hive.common.UgiFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.DaemonId;
-import org.apache.hadoop.hive.llap.LlapDaemonInfo;
 import org.apache.hadoop.hive.llap.LlapUtil;
 import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration;
 import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
@@ -177,33 +173,28 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
         daemonConf, ConfVars.LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE);
     boolean enablePreemption = HiveConf.getBoolVar(
         daemonConf, ConfVars.LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION);
-    final String logMsg = "Attempting to start LlapDaemon with the following configuration: " +
-      "maxJvmMemory=" + maxJvmMemory + " ("
-      + LlapUtil.humanReadableByteCount(maxJvmMemory) + ")" +
-      ", requestedExecutorMemory=" + executorMemoryBytes +
-      " (" + LlapUtil.humanReadableByteCount(executorMemoryBytes) + ")" +
-      ", llapIoCacheSize=" + ioMemoryBytes + " ("
-      + LlapUtil.humanReadableByteCount(ioMemoryBytes) + ")" +
-      ", xmxHeadRoomMemory=" + xmxHeadRoomBytes + " ("
-      + LlapUtil.humanReadableByteCount(xmxHeadRoomBytes) + ")" +
-      ", adjustedExecutorMemory=" + executorMemoryPerInstance +
-      " (" + LlapUtil.humanReadableByteCount(executorMemoryPerInstance) + ")" +
-      ", numExecutors=" + numExecutors +
-      ", llapIoEnabled=" + ioEnabled +
-      ", llapIoCacheIsDirect=" + isDirectCache +
-      ", rpcListenerPort=" + srvPort +
-      ", mngListenerPort=" + mngPort +
-      ", webPort=" + webPort +
-      ", outputFormatSvcPort=" + outputFormatServicePort +
-      ", workDirs=" + Arrays.toString(localDirs) +
-      ", shufflePort=" + shufflePort +
-      ", waitQueueSize= " + waitQueueSize +
-      ", enablePreemption= " + enablePreemption;
-    LOG.info(logMsg);
-    final String currTSISO8601 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date());
-    // Time based log retrieval may not fetch the above log line so logging to stderr for debugging purpose.
-    System.err.println(currTSISO8601 + " " + logMsg);
-
+    LOG.warn("Attempting to start LlapDaemonConf with the following configuration: " +
+        "maxJvmMemory=" + maxJvmMemory + " ("
+          + LlapUtil.humanReadableByteCount(maxJvmMemory) + ")" +
+        ", requestedExecutorMemory=" + executorMemoryBytes +
+        " (" + LlapUtil.humanReadableByteCount(executorMemoryBytes) + ")" +
+        ", llapIoCacheSize=" + ioMemoryBytes + " ("
+          + LlapUtil.humanReadableByteCount(ioMemoryBytes) + ")" +
+        ", xmxHeadRoomMemory=" + xmxHeadRoomBytes + " ("
+          + LlapUtil.humanReadableByteCount(xmxHeadRoomBytes) + ")" +
+        ", adjustedExecutorMemory=" + executorMemoryPerInstance +
+        " (" + LlapUtil.humanReadableByteCount(executorMemoryPerInstance) + ")" +
+        ", numExecutors=" + numExecutors +
+        ", llapIoEnabled=" + ioEnabled +
+        ", llapIoCacheIsDirect=" + isDirectCache +
+        ", rpcListenerPort=" + srvPort +
+        ", mngListenerPort=" + mngPort +
+        ", webPort=" + webPort +
+        ", outputFormatSvcPort=" + outputFormatServicePort +
+        ", workDirs=" + Arrays.toString(localDirs) +
+        ", shufflePort=" + shufflePort +
+        ", waitQueueSize= " + waitQueueSize +
+        ", enablePreemption= " + enablePreemption);
 
     long memRequired =
         executorMemoryBytes + (ioEnabled && isDirectCache == false ? ioMemoryBytes : 0);
@@ -265,7 +256,8 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
     this.metrics.setCacheMemoryPerInstance(ioMemoryBytes);
     this.metrics.setJvmMaxMemory(maxJvmMemory);
     this.metrics.setWaitQueueSize(waitQueueSize);
-    this.metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
+    // TODO: Has to be reverted in HIVE-15644
+    //this.metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
     this.llapDaemonInfoBean = MBeans.register("LlapDaemon", "LlapDaemonInfo", this);
     LOG.info("Started LlapMetricsSystem with displayName: " + displayName +
         " sessionId: " + sessionId);
@@ -344,7 +336,7 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
       System.setProperty("isThreadContextMapInheritable", "true");
       Configurator.initialize("LlapDaemonLog4j2", llap_l4j2.toString());
       long end = System.currentTimeMillis();
-      LOG.debug("LLAP daemon logging initialized from {} in {} ms. Async: {}",
+      LOG.warn("LLAP daemon logging initialized from {} in {} ms. Async: {}",
           llap_l4j2, (end - start), async);
     } else {
       throw new RuntimeException("Log initialization failed." +
@@ -386,7 +378,7 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
         "$$$$$$$$\\ $$$$$$$$\\ $$ |  $$ |$$ |\n" +
         "\\________|\\________|\\__|  \\__|\\__|\n" +
         "\n";
-    LOG.info("\n\n" + asciiArt);
+    LOG.warn("\n\n" + asciiArt);
   }
 
   @Override
@@ -508,6 +500,8 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
             nmHost, nmPort);
       }
 
+      int numExecutors = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_NUM_EXECUTORS);
+
       String workDirsString = System.getenv(ApplicationConstants.Environment.LOCAL_DIRS.name());
 
       String localDirList = LlapUtil.getDaemonLocalDirString(daemonConf, workDirsString);
@@ -518,19 +512,16 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
       int shufflePort = daemonConf
           .getInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, ShuffleHandler.DEFAULT_SHUFFLE_PORT);
       int webPort = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_WEB_PORT);
-
-      LlapDaemonInfo.initialize(appName, daemonConf);
-
-      int numExecutors = LlapDaemonInfo.INSTANCE.getNumExecutors();
-      long executorMemoryBytes = LlapDaemonInfo.INSTANCE.getExecutorMemory();
-      long ioMemoryBytes = LlapDaemonInfo.INSTANCE.getCacheSize();
-      boolean isDirectCache = LlapDaemonInfo.INSTANCE.isDirectCache();
-      boolean isLlapIo = LlapDaemonInfo.INSTANCE.isLlapIo();
+      long executorMemoryBytes = HiveConf.getIntVar(
+          daemonConf, ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB) * 1024l * 1024l;
+      long ioMemoryBytes = HiveConf.getSizeVar(daemonConf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
+      boolean isDirectCache = HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_ALLOCATOR_DIRECT);
+      boolean isLlapIo = HiveConf.getBoolVar(daemonConf, HiveConf.ConfVars.LLAP_IO_ENABLED, true);
 
       LlapDaemon.initializeLogging(daemonConf);
-      llapDaemon =
-          new LlapDaemon(daemonConf, numExecutors, executorMemoryBytes, isLlapIo, isDirectCache,
-              ioMemoryBytes, localDirs, rpcPort, mngPort, shufflePort, webPort, appName);
+      llapDaemon = new LlapDaemon(daemonConf, numExecutors, executorMemoryBytes, isLlapIo,
+          isDirectCache, ioMemoryBytes, localDirs, rpcPort, mngPort, shufflePort, webPort,
+          appName);
 
       LOG.info("Adding shutdown hook for LlapDaemon");
       ShutdownHookManager.addShutdownHook(new CompositeServiceShutdownHook(llapDaemon), 1);
@@ -541,7 +532,7 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
       // Relying on the RPC threads to keep the service alive.
     } catch (Throwable t) {
       // TODO Replace this with a ExceptionHandler / ShutdownHook
-      LOG.error("Failed to start LLAP Daemon with exception", t);
+      LOG.warn("Failed to start LLAP Daemon with exception", t);
       if (llapDaemon != null) {
         llapDaemon.shutdown();
       }
@@ -610,11 +601,6 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
   }
 
   @Override
-  public int getNumActive() {
-    return containerRunner.getNumActive();
-  }
-
-  @Override
   public long getExecutorMemoryPerInstance() {
     return executorMemoryPerInstance;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonMXBean.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonMXBean.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonMXBean.java
index 22cfc9e..d6449db 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonMXBean.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemonMXBean.java
@@ -40,12 +40,6 @@ public interface LlapDaemonMXBean {
   public int getNumExecutors();
 
   /**
-   * Gets the number of active executors.
-   * @return number of active executors
-   */
-  public int getNumActive();
-
-  /**
    * Gets the shuffle port.
    * @return the shuffle port
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
index f199593..fd6234a 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
@@ -45,7 +45,5 @@ public interface Scheduler<T> {
 
   Set<String> getExecutorsStatus();
 
-  int getNumActive();
-
   QueryIdentifier findQueryByFragment(String fragmentId);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
index 7f8c947..9eaa7d7 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
@@ -201,20 +201,6 @@ public class TaskExecutorService extends AbstractService
   };
 
   @Override
-  public int getNumActive() {
-    int result = 0;
-    for (Map.Entry<String, TaskWrapper> e : knownTasks.entrySet()) {
-      TaskWrapper task = e.getValue();
-      if (task.isInWaitQueue()) continue;
-      TaskRunnerCallable c = task.getTaskRunnerCallable();
-      // Count the tasks in intermediate state as waiting.
-      if (c == null || c.getStartTime() == 0) continue;
-      ++result;
-    }
-    return result;
-  }
-
-  @Override
   public Set<String> getExecutorsStatus() {
     // TODO Change this method to make the output easier to parse (parse programmatically)
     Set<String> result = new LinkedHashSet<>();
@@ -291,8 +277,7 @@ public class TaskExecutorService extends AbstractService
             }
             // If the task cannot finish and if no slots are available then don't schedule it.
             // Also don't wait if we have a task and we just killed something to schedule it.
-            // (numSlotsAvailable can go negative, if the callback after the thread completes is delayed)
-            boolean shouldWait = numSlotsAvailable.get() <= 0 && lastKillTimeMs == null;
+            boolean shouldWait = numSlotsAvailable.get() == 0 && lastKillTimeMs == null;
             if (task.getTaskRunnerCallable().canFinish()) {
               if (isDebugEnabled) {
                 LOG.debug("Attempting to schedule task {}, canFinish={}. Current state: "
@@ -743,8 +728,8 @@ public class TaskExecutorService extends AbstractService
       knownTasks.remove(taskWrapper.getRequestId());
       taskWrapper.setIsInPreemptableQueue(false);
       taskWrapper.maybeUnregisterForFinishedStateNotifications();
-      updatePreemptionListAndNotify(result.getEndReason());
       taskWrapper.getTaskRunnerCallable().getCallback().onSuccess(result);
+      updatePreemptionListAndNotify(result.getEndReason());
     }
 
     @Override
@@ -757,8 +742,8 @@ public class TaskExecutorService extends AbstractService
       knownTasks.remove(taskWrapper.getRequestId());
       taskWrapper.setIsInPreemptableQueue(false);
       taskWrapper.maybeUnregisterForFinishedStateNotifications();
-      updatePreemptionListAndNotify(null);
       taskWrapper.getTaskRunnerCallable().getCallback().onFailure(t);
+      updatePreemptionListAndNotify(null);
       LOG.error("Failed notification received: Stacktrace: " + ExceptionUtils.getStackTrace(t));
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index 1669815..c077d75 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -407,7 +407,6 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
       taskReporter.shutdown();
     }
     if (umbilical != null) {
-      // TODO: Can this be moved out of the main callback path
       RPC.stopProxy(umbilical);
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapIoMemoryServlet.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapIoMemoryServlet.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapIoMemoryServlet.java
deleted file mode 100644
index 3386cb4..0000000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapIoMemoryServlet.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.llap.daemon.services.impl;
-
-import java.io.PrintWriter;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.llap.io.api.LlapIo;
-import org.apache.hadoop.hive.llap.io.api.LlapProxy;
-import org.apache.hive.http.HttpServer;
-
-@SuppressWarnings("serial")
-public class LlapIoMemoryServlet extends HttpServlet {
-
-  private static final Log LOG = LogFactory.getLog(LlapIoMemoryServlet.class);
-  static final String ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods";
-  static final String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin";
-
-  /**
-   * Initialize this servlet.
-   */
-  @Override
-  public void init() throws ServletException {
-  }
-
-  /**
-   * Process a GET request for the specified resource.
-   *
-   * @param request
-   *          The servlet request we are processing
-   * @param response
-   *          The servlet response we are creating
-   */
-  @Override
-  public void doGet(HttpServletRequest request, HttpServletResponse response) {
-    try {
-      if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) {
-        return;
-      }
-      PrintWriter writer = null;
- 
-      try {
-        response.setContentType("text/plain; charset=utf8");
-        response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, "GET");
-        response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
-        response.setHeader("Cache-Control", "no-transform,public,max-age=60,s-maxage=60");
-
-        writer = response.getWriter();
-
-        LlapIo<?> llapIo = LlapProxy.getIo();
-        if (llapIo == null) {
-          writer.write("LLAP IO not found");
-        } else {
-          writer.write(llapIo.getMemoryInfo());
-        }
-
-      } finally {
-        if (writer != null) {
-          writer.close();
-        }
-      }
-    } catch (Exception e) {
-      LOG.error("Caught exception while processing llap status request", e);
-      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapWebServices.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapWebServices.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapWebServices.java
index e896df2..028daa1 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapWebServices.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/services/impl/LlapWebServices.java
@@ -102,7 +102,6 @@ public class LlapWebServices extends AbstractService {
       this.http = builder.build();
       this.http.addServlet("status", "/status", LlapStatusServlet.class);
       this.http.addServlet("peers", "/peers", LlapPeerRegistryServlet.class);
-      this.http.addServlet("iomem", "/iomem", LlapIoMemoryServlet.class);
     } catch (IOException e) {
       LOG.warn("LLAP web service failed to come up", e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
index 294fb2b..7c309a4 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
@@ -23,12 +23,13 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
 import javax.management.ObjectName;
 
-import org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon;
 import org.apache.hadoop.hive.llap.daemon.impl.StatsRecordingThreadPool;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -38,8 +39,8 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.cache.BuddyAllocator;
 import org.apache.hadoop.hive.llap.cache.BufferUsageManager;
+import org.apache.hadoop.hive.llap.cache.EvictionAwareAllocator;
 import org.apache.hadoop.hive.llap.cache.EvictionDispatcher;
-import org.apache.hadoop.hive.llap.cache.LlapOomDebugDump;
 import org.apache.hadoop.hive.llap.cache.LowLevelCache;
 import org.apache.hadoop.hive.llap.cache.LowLevelCacheImpl;
 import org.apache.hadoop.hive.llap.cache.LowLevelCacheMemoryManager;
@@ -65,6 +66,8 @@ import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.metrics2.util.MBeans;
 
 import com.google.common.primitives.Ints;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
@@ -82,7 +85,6 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
   private final LlapDaemonIOMetrics ioMetrics;
   private ObjectName buddyAllocatorMXBean;
   private final Allocator allocator;
-  private final LlapOomDebugDump memoryDump;
 
   private LlapIoImpl(Configuration conf) throws IOException {
     String ioMode = HiveConf.getVar(conf, HiveConf.ConfVars.LLAP_IO_MEMORY_MODE);
@@ -119,36 +121,14 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
     if (useLowLevelCache) {
       // Memory manager uses cache policy to trigger evictions, so create the policy first.
       boolean useLrfu = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_USE_LRFU);
-      long totalMemorySize = HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
-      int minAllocSize = (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC);
-      float metadataFraction = HiveConf.getFloatVar(conf, ConfVars.LLAP_IO_METADATA_FRACTION);
-      long metaMem = 0;
-      // TODO: this split a workaround until HIVE-15665.
-      //       Technically we don't have to do it for on-heap data cache but we'd do for testing.
-      boolean isSplitCache = metadataFraction > 0f;
-      if (isSplitCache) {
-        metaMem = (long)(LlapDaemon.getTotalHeapSize() * metadataFraction);
-      }
-      LowLevelCachePolicy cachePolicy = useLrfu ? new LowLevelLrfuCachePolicy(
-          minAllocSize, totalMemorySize, conf) : new LowLevelFifoCachePolicy();
+      LowLevelCachePolicy cachePolicy =
+          useLrfu ? new LowLevelLrfuCachePolicy(conf) : new LowLevelFifoCachePolicy(conf);
       // Allocator uses memory manager to request memory, so create the manager next.
       LowLevelCacheMemoryManager memManager = new LowLevelCacheMemoryManager(
-          totalMemorySize, cachePolicy, cacheMetrics);
-      LowLevelCachePolicy metaCachePolicy = null;
-      LowLevelCacheMemoryManager metaMemManager = null;
-      if (isSplitCache) {
-        metaCachePolicy = useLrfu ? new LowLevelLrfuCachePolicy(
-            minAllocSize, metaMem, conf) : new LowLevelFifoCachePolicy();
-        metaMemManager = new LowLevelCacheMemoryManager(metaMem, metaCachePolicy, cacheMetrics);
-      } else {
-        metaCachePolicy = cachePolicy;
-        metaMemManager = memManager;
-      }
-      cacheMetrics.setCacheCapacityTotal(totalMemorySize + metaMem);
+          conf, cachePolicy, cacheMetrics);
       // Cache uses allocator to allocate and deallocate, create allocator and then caches.
-      BuddyAllocator allocator = new BuddyAllocator(conf, memManager, cacheMetrics);
+      EvictionAwareAllocator allocator = new BuddyAllocator(conf, memManager, cacheMetrics);
       this.allocator = allocator;
-      this.memoryDump = allocator;
       LowLevelCacheImpl cacheImpl = new LowLevelCacheImpl(
           cacheMetrics, cachePolicy, allocator, true);
       cache = cacheImpl;
@@ -158,21 +138,15 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
         serdeCache = serdeCacheImpl;
       }
       boolean useGapCache = HiveConf.getBoolVar(conf, ConfVars.LLAP_CACHE_ENABLE_ORC_GAP_CACHE);
-      metadataCache = new OrcMetadataCache(metaMemManager, metaCachePolicy, useGapCache);
+      metadataCache = new OrcMetadataCache(memManager, cachePolicy, useGapCache);
       // And finally cache policy uses cache to notify it of eviction. The cycle is complete!
-      EvictionDispatcher e = new EvictionDispatcher(cache, serdeCache, metadataCache, allocator);
-      if (isSplitCache) {
-        metaCachePolicy.setEvictionListener(e);
-        metaCachePolicy.setParentDebugDumper(e);
-      }
-      cachePolicy.setEvictionListener(e);
-      cachePolicy.setParentDebugDumper(e);
-
+      cachePolicy.setEvictionListener(new EvictionDispatcher(
+          cache, serdeCache, metadataCache, allocator));
+      cachePolicy.setParentDebugDumper(cacheImpl);
       cacheImpl.startThreads(); // Start the cache threads.
       bufferManager = cacheImpl; // Cache also serves as buffer manager.
     } else {
       this.allocator = new SimpleAllocator(conf);
-      memoryDump = null;
       SimpleBufferManager sbm = new SimpleBufferManager(allocator, cacheMetrics);
       bufferManager = sbm;
       cache = sbm;
@@ -197,14 +171,6 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
     buddyAllocatorMXBean = MBeans.register("LlapDaemon", "BuddyAllocatorInfo", allocator);
   }
 
-  @Override
-  public String getMemoryInfo() {
-    if (memoryDump == null) return "\nNot using the allocator";
-    StringBuilder sb = new StringBuilder();
-    memoryDump.debugDumpShort(sb);
-    return sb.toString();
-  }
-
   @SuppressWarnings("rawtypes")
   @Override
   public InputFormat<NullWritable, VectorizedRowBatch> getInputFormat(

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
index 121e169..ac031aa 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
@@ -79,9 +79,9 @@ public class OrcColumnVectorProducer implements ColumnVectorProducer {
     cacheMetrics.incrCacheReadRequests();
     OrcEncodedDataConsumer edc = new OrcEncodedDataConsumer(consumer, columnIds.size(),
         _skipCorrupt, counters, ioMetrics);
-    OrcEncodedDataReader reader = new OrcEncodedDataReader(
-        lowLevelCache, bufferManager, metadataCache, conf, job, split, columnIds, sarg,
-        columnNames, edc, counters, readerSchema);
+    // Note: we use global conf here and ignore JobConf.
+    OrcEncodedDataReader reader = new OrcEncodedDataReader(lowLevelCache, bufferManager,
+        metadataCache, conf, split, columnIds, sarg, columnNames, edc, counters, readerSchema);
     edc.init(reader, reader);
     return edc;
   }


[40/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
index 50d8878..1cf47c3 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
@@ -31,16 +31,13 @@ import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Stack;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreEventListener;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FireEventRequest;
@@ -49,7 +46,6 @@ import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.FunctionType;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InsertEventRequestData;
-import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 import org.apache.hadoop.hive.metastore.api.Order;
@@ -60,21 +56,6 @@ import org.apache.hadoop.hive.metastore.api.ResourceUri;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
-import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
-import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
-import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
-import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
-import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.DropTableEvent;
-import org.apache.hadoop.hive.metastore.events.InsertEvent;
-import org.apache.hadoop.hive.metastore.events.ListenerEvent;
 import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage;
 import org.apache.hadoop.hive.metastore.messaging.AlterIndexMessage;
 import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
@@ -94,8 +75,6 @@ import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer;
 import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hive.hcatalog.data.Pair;
-import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -118,105 +97,12 @@ public class TestDbNotificationListener {
   private int startTime;
   private long firstEventId;
 
-  /* This class is used to verify that HiveMetaStore calls the non-transactional listeners with the
-    * current event ID set by the DbNotificationListener class */
-  public static class MockMetaStoreEventListener extends MetaStoreEventListener {
-    private static Stack<Pair<EventType, String>> eventsIds = new Stack<>();
-
-    private static void pushEventId(EventType eventType, final ListenerEvent event) {
-      if (event.getStatus()) {
-        Map<String, String> parameters = event.getParameters();
-        if (parameters.containsKey(MetaStoreEventListenerConstants.DB_NOTIFICATION_EVENT_ID_KEY_NAME)) {
-          Pair<EventType, String> pair =
-              new Pair<>(eventType, parameters.get(MetaStoreEventListenerConstants.DB_NOTIFICATION_EVENT_ID_KEY_NAME));
-          eventsIds.push(pair);
-        }
-      }
-    }
-
-    public static void popAndVerifyLastEventId(EventType eventType, long id) {
-      if (!eventsIds.isEmpty()) {
-        Pair<EventType, String> pair = eventsIds.pop();
-
-        assertEquals("Last event type does not match.", eventType, pair.first);
-        assertEquals("Last event ID does not match.", Long.toString(id), pair.second);
-      } else {
-        assertTrue("List of events is empty.",false);
-      }
-    }
-
-    public static void clearEvents() {
-      eventsIds.clear();
-    }
-
-    public MockMetaStoreEventListener(Configuration config) {
-      super(config);
-    }
-
-    public void onCreateTable (CreateTableEvent tableEvent) throws MetaException {
-      pushEventId(EventType.CREATE_TABLE, tableEvent);
-    }
-
-    public void onDropTable (DropTableEvent tableEvent)  throws MetaException {
-      pushEventId(EventType.DROP_TABLE, tableEvent);
-    }
-
-    public void onAlterTable (AlterTableEvent tableEvent) throws MetaException {
-      pushEventId(EventType.ALTER_TABLE, tableEvent);
-    }
-
-    public void onAddPartition (AddPartitionEvent partitionEvent) throws MetaException {
-      pushEventId(EventType.ADD_PARTITION, partitionEvent);
-    }
-
-    public void onDropPartition (DropPartitionEvent partitionEvent)  throws MetaException {
-      pushEventId(EventType.DROP_PARTITION, partitionEvent);
-    }
-
-    public void onAlterPartition (AlterPartitionEvent partitionEvent)  throws MetaException {
-      pushEventId(EventType.ALTER_PARTITION, partitionEvent);
-    }
-
-    public void onCreateDatabase (CreateDatabaseEvent dbEvent) throws MetaException {
-      pushEventId(EventType.CREATE_DATABASE, dbEvent);
-    }
-
-    public void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException {
-      pushEventId(EventType.DROP_DATABASE, dbEvent);
-    }
-
-    public void onAddIndex(AddIndexEvent indexEvent) throws MetaException {
-      pushEventId(EventType.CREATE_INDEX, indexEvent);
-    }
-
-    public void onDropIndex(DropIndexEvent indexEvent) throws MetaException {
-      pushEventId(EventType.DROP_INDEX, indexEvent);
-    }
-
-    public void onAlterIndex(AlterIndexEvent indexEvent) throws MetaException {
-      pushEventId(EventType.ALTER_INDEX, indexEvent);
-    }
-
-    public void onCreateFunction (CreateFunctionEvent fnEvent) throws MetaException {
-      pushEventId(EventType.CREATE_FUNCTION, fnEvent);
-    }
-
-    public void onDropFunction (DropFunctionEvent fnEvent) throws MetaException {
-      pushEventId(EventType.DROP_FUNCTION, fnEvent);
-    }
-
-    public void onInsert(InsertEvent insertEvent) throws MetaException {
-      pushEventId(EventType.INSERT, insertEvent);
-    }
-  }
-
   @SuppressWarnings("rawtypes")
   @BeforeClass
   public static void connectToMetastore() throws Exception {
     HiveConf conf = new HiveConf();
     conf.setVar(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS,
         DbNotificationListener.class.getName());
-    conf.setVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS, MockMetaStoreEventListener.class.getName());
     conf.setVar(HiveConf.ConfVars.METASTORE_EVENT_DB_LISTENER_TTL, String.valueOf(EVENTS_TTL) + "s");
     conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
     conf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true);
@@ -253,12 +139,6 @@ public class TestDbNotificationListener {
     DummyRawStoreFailEvent.setEventSucceed(true);
   }
 
-  @After
-  public void tearDown() {
-    MockMetaStoreEventListener.clearEvents();
-  }
-
-
   @Test
   public void createDatabase() throws Exception {
     String dbName = "createdb";
@@ -284,9 +164,6 @@ public class TestDbNotificationListener {
     CreateDatabaseMessage createDbMsg = md.getCreateDatabaseMessage(event.getMessage());
     assertEquals(dbName, createDbMsg.getDB());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_DATABASE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     DummyRawStoreFailEvent.setEventSucceed(false);
@@ -329,10 +206,6 @@ public class TestDbNotificationListener {
     DropDatabaseMessage dropDbMsg = md.getDropDatabaseMessage(event.getMessage());
     assertEquals(dbName, dropDbMsg.getDB());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.DROP_DATABASE, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_DATABASE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     db = new Database(dbName2, dbDescription, dbLocationUri, emptyParameters);
@@ -383,9 +256,6 @@ public class TestDbNotificationListener {
     assertEquals(tblName, createTblMsg.getTable());
     assertEquals(table, createTblMsg.getTableObj());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     table =
@@ -442,9 +312,6 @@ public class TestDbNotificationListener {
     AlterTableMessage alterTableMessage = md.getAlterTableMessage(event.getMessage());
     assertEquals(table, alterTableMessage.getTableObjAfter());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     DummyRawStoreFailEvent.setEventSucceed(false);
@@ -496,10 +363,6 @@ public class TestDbNotificationListener {
     assertEquals(defaultDbName, dropTblMsg.getDB());
     assertEquals(tblName, dropTblMsg.getTable());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.DROP_TABLE, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     table =
@@ -565,10 +428,6 @@ public class TestDbNotificationListener {
     assertTrue(ptnIter.hasNext());
     assertEquals(partition, ptnIter.next());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     partition =
@@ -635,10 +494,6 @@ public class TestDbNotificationListener {
     assertEquals(tblName, alterPtnMsg.getTable());
     assertEquals(newPart, alterPtnMsg.getPtnObjAfter());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     DummyRawStoreFailEvent.setEventSucceed(false);
@@ -702,11 +557,6 @@ public class TestDbNotificationListener {
     assertEquals(table.getTableName(), tableObj.getTableName());
     assertEquals(table.getOwner(), tableObj.getOwner());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.DROP_PARTITION, firstEventId + 3);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     List<String> newpartCol1Vals = Arrays.asList("tomorrow");
@@ -803,13 +653,6 @@ public class TestDbNotificationListener {
     Iterator<Map<String, String>> parts = dropPtnMsg.getPartitions().iterator();
     assertTrue(parts.hasNext());
     assertEquals(part1.getValues(), Lists.newArrayList(parts.next().values()));
-
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.DROP_PARTITION, firstEventId + 5);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 4);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 3);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
   }
 
   @Test
@@ -850,9 +693,6 @@ public class TestDbNotificationListener {
     assertEquals(ResourceType.JAR, funcObj.getResourceUris().get(0).getResourceType());
     assertEquals(funcResource, funcObj.getResourceUris().get(0).getUri());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_FUNCTION, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     DummyRawStoreFailEvent.setEventSucceed(false);
@@ -902,10 +742,6 @@ public class TestDbNotificationListener {
     assertEquals(defaultDbName, dropFuncMsg.getDB());
     assertEquals(funcName, dropFuncMsg.getFunctionName());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.DROP_FUNCTION, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_FUNCTION, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     func =
@@ -971,11 +807,6 @@ public class TestDbNotificationListener {
     assertEquals(tableName, indexObj.getOrigTableName());
     assertEquals(indexTableName, indexObj.getIndexTableName());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_INDEX, firstEventId + 3);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     DummyRawStoreFailEvent.setEventSucceed(false);
@@ -1042,12 +873,6 @@ public class TestDbNotificationListener {
     assertEquals(indexTableName.toLowerCase(), dropIdxMsg.getIndexTableName());
     assertEquals(tableName.toLowerCase(), dropIdxMsg.getOrigTableName());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.DROP_INDEX, firstEventId + 4);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_INDEX, firstEventId + 3);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     index =
@@ -1122,12 +947,6 @@ public class TestDbNotificationListener {
     assertEquals(indexTableName, indexObj.getIndexTableName());
     assertTrue(indexObj.getCreateTime() < indexObj.getLastAccessTime());
 
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ALTER_INDEX, firstEventId + 4);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_INDEX, firstEventId + 3);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
-
     // When hive.metastore.transactional.event.listeners is set,
     // a failed event should not create a new notification
     DummyRawStoreFailEvent.setEventSucceed(false);
@@ -1184,10 +1003,6 @@ public class TestDbNotificationListener {
     assertEquals(tblName, event.getTableName());
     // Parse the message field
     verifyInsert(event, defaultDbName, tblName);
-
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.INSERT, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
   }
 
   @Test
@@ -1248,11 +1063,6 @@ public class TestDbNotificationListener {
     Map<String,String> partKeyValsFromNotif = insertMessage.getPartitionKeyValues();
 
     assertMapEquals(partKeyVals, partKeyValsFromNotif);
-
-    // Verify the eventID was passed to the non-transactional listener
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.INSERT, firstEventId + 3);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 2);
-    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/pom.xml b/itests/hive-blobstore/pom.xml
index d1c732d..b18398d 100644
--- a/itests/hive-blobstore/pom.xml
+++ b/itests/hive-blobstore/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_blobstore.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_blobstore.q b/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_blobstore.q
deleted file mode 100644
index 8fee8ed..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_blobstore.q
+++ /dev/null
@@ -1,45 +0,0 @@
--- Check we can create a partitioned table in the warehouse, 
--- export it to a blobstore, and then import its different partitions
--- using the blobstore as target location
-DROP TABLE exim_employee;
-CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz");
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_blobstore/export/exim_employee;
-EXPORT TABLE exim_employee
-TO '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_blobstore/export/exim_employee';
-
-DROP TABLE exim_employee;
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_blobstore/import/exim_employee;
-IMPORT TABLE exim_employee PARTITION (emp_country='us')
-FROM '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-LOCATION '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_blobstore/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-FROM '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-LOCATION '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_blobstore/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-IMPORT TABLE exim_employee PARTITION (emp_country='in')
-FROM '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-LOCATION '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_blobstore/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_local.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_local.q b/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_local.q
deleted file mode 100644
index 28bc399..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_local.q
+++ /dev/null
@@ -1,44 +0,0 @@
--- Check we can create a partitioned table in the warehouse, 
--- export it to a blobstore, and then import its different partitions
--- using the local filesystem as target location
-DROP TABLE exim_employee;
-CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz");
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_local/export/exim_employee;
-EXPORT TABLE exim_employee
-TO '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_local/export/exim_employee';
-
-DROP TABLE exim_employee;
-dfs -rm -r -f ${system:build.test.dir}/import_addpartition_blobstore_to_local/import/exim_employee;
-IMPORT TABLE exim_employee PARTITION (emp_country='us')
-FROM '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_local/export/exim_employee'
-LOCATION 'file://${system:build.test.dir}/import_addpartition_blobstore_to_local/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-FROM '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_local/export/exim_employee'
-LOCATION 'file://${system:build.test.dir}/import_addpartition_blobstore_to_local/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-IMPORT TABLE exim_employee PARTITION (emp_country='in')
-FROM '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_local/export/exim_employee'
-LOCATION 'file://${system:build.test.dir}/import_addpartition_blobstore_to_local/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_warehouse.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_warehouse.q b/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_warehouse.q
deleted file mode 100644
index 987dacf..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_blobstore_to_warehouse.q
+++ /dev/null
@@ -1,41 +0,0 @@
--- Check we can create a partitioned table in the warehouse, 
--- export it to a blobstore, and then import its different partitions
--- using the warehouse as target location
-DROP TABLE exim_employee;
-CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz");
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_warehouse/export/exim_employee;
-EXPORT TABLE exim_employee
-TO '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_warehouse/export/exim_employee';
-
-DROP TABLE exim_employee;
-IMPORT TABLE exim_employee PARTITION (emp_country='us')
-FROM '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_warehouse/export/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-FROM '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_warehouse/export/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-IMPORT TABLE exim_employee PARTITION (emp_country='in')
-FROM '${hiveconf:test.blobstore.path.unique}/import_addpartition_blobstore_to_warehouse/export/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_local_to_blobstore.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_local_to_blobstore.q b/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_local_to_blobstore.q
deleted file mode 100644
index 8fde250..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/import_addpartition_local_to_blobstore.q
+++ /dev/null
@@ -1,44 +0,0 @@
--- Check we can create a partitioned table in the warehouse, 
--- export it to the local filesystem, and then import its 
--- different partitions using a blobstore as target location
-DROP TABLE exim_employee;
-CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz");
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-dfs -rm -r -f ${system:build.test.dir}/import_addpartition_local_to_blobstore/export/exim_employee;
-EXPORT TABLE exim_employee
-TO 'file://${system:build.test.dir}/import_addpartition_local_to_blobstore/export/exim_employee';
-
-DROP TABLE exim_employee;
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_addpartition_local_to_blobstore/import/exim_employee;
-IMPORT TABLE exim_employee PARTITION (emp_country='us')
-FROM 'file://${system:build.test.dir}/import_addpartition_local_to_blobstore/export/exim_employee'
-LOCATION '${hiveconf:test.blobstore.path.unique}/import_addpartition_local_to_blobstore/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-FROM 'file://${system:build.test.dir}/import_addpartition_local_to_blobstore/export/exim_employee'
-LOCATION '${hiveconf:test.blobstore.path.unique}/import_addpartition_local_to_blobstore/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-IMPORT TABLE exim_employee PARTITION (emp_country='in')
-FROM 'file://${system:build.test.dir}/import_addpartition_local_to_blobstore/export/exim_employee'
-LOCATION '${hiveconf:test.blobstore.path.unique}/import_addpartition_local_to_blobstore/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_blobstore.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_blobstore.q b/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_blobstore.q
deleted file mode 100644
index a9f9a8f..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_blobstore.q
+++ /dev/null
@@ -1,30 +0,0 @@
--- Check we can create a partitioned table in the warehouse, 
--- export it to a blobstore, and then import the
--- whole table using the blobstore as target location
-DROP TABLE exim_employee;
-CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz");
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_blobstore_to_blobstore/export/exim_employee;
-EXPORT TABLE exim_employee PARTITION (emp_country='us')
-TO '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_blobstore/export/exim_employee';
-
-DROP TABLE exim_employee;
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_blobstore_to_blobstore/import/exim_employee;
-IMPORT FROM '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_blobstore/export/exim_employee'
-LOCATION '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_blobstore/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_blobstore_nonpart.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_blobstore_nonpart.q b/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_blobstore_nonpart.q
deleted file mode 100644
index 7b3f0a3..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_blobstore_nonpart.q
+++ /dev/null
@@ -1,25 +0,0 @@
--- Check we can create a non partitioned table in the warehouse, 
--- export it to a blobstore, and then import the
--- table using the blobstore as target location
-DROP TABLE exim_employee;
-CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee;
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_blobstore_to_blobstore_nonpart/export/exim_employee;
-EXPORT TABLE exim_employee
-TO '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_blobstore_nonpart/export/exim_employee';
-
-DROP TABLE exim_employee;
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_blobstore_to_blobstore_nonpart/import/exim_employee;
-IMPORT FROM '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_blobstore_nonpart/export/exim_employee'
-LOCATION '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_blobstore_nonpart/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_local.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_local.q b/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_local.q
deleted file mode 100644
index ac3c451..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_local.q
+++ /dev/null
@@ -1,30 +0,0 @@
--- Check we can create a partitioned table in the warehouse, 
--- export it to a blobstore, and then import the
--- whole table using the local filesystem as target location
-DROP TABLE exim_employee;
-CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz");
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_blobstore_to_local/export/exim_employee;
-EXPORT TABLE exim_employee PARTITION (emp_country='us')
-TO '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_local/export/exim_employee';
-
-DROP TABLE exim_employee;
-dfs -rm -r -f ${system:build.test.dir}/import_blobstore_to_local/import/exim_employee;
-IMPORT FROM '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_local/export/exim_employee'
-LOCATION "file://${system:build.test.dir}/import_blobstore_to_local/import/exim_employee";
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_warehouse.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_warehouse.q b/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_warehouse.q
deleted file mode 100644
index 9f6fc54..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_warehouse.q
+++ /dev/null
@@ -1,28 +0,0 @@
--- Check we can create a partitioned table in the warehouse, 
--- export it to a blobstore, and then import the
--- whole table using the warehouse as target location
-DROP TABLE exim_employee;
-CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz");
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_blobstore_to_warehouse/export/exim_employee;
-EXPORT TABLE exim_employee PARTITION (emp_country='us')
-TO '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_warehouse/export/exim_employee';
-
-DROP TABLE exim_employee;
-IMPORT FROM '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_warehouse/export/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_warehouse_nonpart.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_warehouse_nonpart.q b/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_warehouse_nonpart.q
deleted file mode 100644
index 6f28a51..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/import_blobstore_to_warehouse_nonpart.q
+++ /dev/null
@@ -1,23 +0,0 @@
--- Check we can create a non partitioned table in the warehouse, 
--- export it to a blobstore, and then import the
--- table using the warehouse as target location
-DROP TABLE exim_employee;
-CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee;
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_blobstore_to_warehouse_nonpart/export/exim_employee;
-EXPORT TABLE exim_employee
-TO '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_warehouse_nonpart/export/exim_employee';
-
-DROP TABLE exim_employee;
-IMPORT FROM '${hiveconf:test.blobstore.path.unique}/import_blobstore_to_warehouse_nonpart/export/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/import_local_to_blobstore.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/import_local_to_blobstore.q b/itests/hive-blobstore/src/test/queries/clientpositive/import_local_to_blobstore.q
deleted file mode 100644
index 0412d3c..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/import_local_to_blobstore.q
+++ /dev/null
@@ -1,31 +0,0 @@
--- Check we can create a partitioned table in the warehouse, 
--- export it to the local filesystem, and then import the
--- whole table using a blobstore as target location
-DROP TABLE exim_employee;
-CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us");
-LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz");
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;
-
-dfs -rm -r -f ${system:build.test.dir}/import_local_to_blobstore/export/exim_employee;
-
-EXPORT TABLE exim_employee PARTITION (emp_country='us')
-TO 'file://${system:build.test.dir}/import_local_to_blobstore/export/exim_employee';
-
-DROP TABLE exim_employee;
-dfs -rm -r -f ${hiveconf:test.blobstore.path.unique}/import_local_to_blobstore/import/exim_employee;
-IMPORT FROM "file://${system:build.test.dir}/import_local_to_blobstore/export/exim_employee"
-LOCATION '${hiveconf:test.blobstore.path.unique}/import_local_to_blobstore/import/exim_employee';
-
-DESCRIBE EXTENDED exim_employee;
-SELECT * FROM exim_employee;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/insert_blobstore_to_blobstore.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/insert_blobstore_to_blobstore.q b/itests/hive-blobstore/src/test/queries/clientpositive/insert_blobstore_to_blobstore.q
deleted file mode 100644
index 8219ee2..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/insert_blobstore_to_blobstore.q
+++ /dev/null
@@ -1,29 +0,0 @@
--- Test inserting into a blobstore table from another blobstore table.
-
-DROP TABLE blobstore_source;
-CREATE TABLE blobstore_source (
-    a string,
-    b string,
-    c double)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-LOCATION '${hiveconf:test.blobstore.path.unique}/insert_blobstore_to_blobstore/blobstore_source';
-
-LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source;
-
-DROP TABLE blobstore_table;
-CREATE TABLE blobstore_table LIKE blobstore_source
-LOCATION '${hiveconf:test.blobstore.path.unique}/insert_blobstore_to_blobstore/blobstore_table';
-
-INSERT OVERWRITE TABLE blobstore_table SELECT * FROM blobstore_source;
-
-SELECT COUNT(*) FROM blobstore_table;
-
--- INSERT INTO should append all records to existing ones.
-INSERT INTO TABLE blobstore_table SELECT * FROM blobstore_source;
-
-SELECT COUNT(*) FROM blobstore_table;
-
-SELECT * FROM blobstore_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/insert_empty_into_blobstore.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/insert_empty_into_blobstore.q b/itests/hive-blobstore/src/test/queries/clientpositive/insert_empty_into_blobstore.q
deleted file mode 100644
index d4f0c71..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/insert_empty_into_blobstore.q
+++ /dev/null
@@ -1,53 +0,0 @@
--- Test inserting empty rows into dynamic partitioned and list bucketed blobstore tables
-
-SET hive.blobstore.optimizations.enabled=true;
-
-DROP TABLE empty;
-DROP TABLE blobstore_dynamic_partitioning;
-DROP TABLE blobstore_list_bucketing;
-
-CREATE TABLE empty (
-    id int,
-    name string,
-    dept string,
-    pt string,
-    dt string,
-    hr string);
-
-CREATE TABLE blobstore_dynamic_partitioning (
-    id int,
-    name string,
-    dept string)
-PARTITIONED BY (
-    pt string,
-    dt string,
-    hr string)
-LOCATION '${hiveconf:test.blobstore.path.unique}/insert_empty_into_blobstore/blobstore_dynamic_partitioning';
-
-INSERT INTO TABLE blobstore_dynamic_partitioning PARTITION (pt='a', dt, hr) SELECT id, name, dept, dt, hr FROM empty;
-
-SELECT COUNT(*) FROM blobstore_dynamic_partitioning;
-
-CREATE TABLE blobstore_list_bucketing (
-    id int,
-    name string,
-    dept string)
-PARTITIONED BY (
-    pt string,
-    dt string,
-    hr string)
-SKEWED BY (id) ON ('1', '2', '3') STORED AS DIRECTORIES
-LOCATION '${hiveconf:test.blobstore.path.unique}/insert_empty_into_blobstore/blobstore_list_bucketing';
-
-INSERT INTO TABLE blobstore_list_bucketing PARTITION (pt='a', dt='a', hr='a') SELECT id, name, dept FROM empty;
-
-SELECT COUNT(*) FROM blobstore_list_bucketing;
-
--- Now test empty inserts with blobstore optimizations turned off. This should give us same results.
-SET hive.blobstore.optimizations.enabled=false;
-
-INSERT INTO TABLE blobstore_dynamic_partitioning PARTITION (pt='b', dt, hr) SELECT id, name, dept, dt, hr FROM empty;
-SELECT COUNT(*) FROM blobstore_dynamic_partitioning;
-
-INSERT INTO TABLE blobstore_list_bucketing PARTITION (pt='b', dt='b', hr='b') SELECT id, name, dept FROM empty;
-SELECT COUNT(*) FROM blobstore_list_bucketing;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/orc_buckets.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/orc_buckets.q b/itests/hive-blobstore/src/test/queries/clientpositive/orc_buckets.q
deleted file mode 100644
index 9571842..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/orc_buckets.q
+++ /dev/null
@@ -1,31 +0,0 @@
--- Test simple interaction with partitioned bucketed table with orc format in blobstore
-
-SET hive.exec.dynamic.partition=true;
-SET hive.exec.reducers.max=10;
-SET hive.exec.dynamic.partition.mode=nonstrict;
-
-DROP TABLE blobstore_source;
-CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_buckets/blobstore_source/';
-LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source;
-
-DROP TABLE orc_buckets;
-CREATE TABLE orc_buckets (a STRING, value DOUBLE)
-PARTITIONED BY (b STRING)
-CLUSTERED BY (a) INTO 10 BUCKETS
-STORED AS ORC
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_buckets/orc_buckets';
-
-INSERT OVERWRITE TABLE orc_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source;
-SELECT * FROM orc_buckets;
-
-INSERT INTO TABLE orc_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source;
-SELECT * FROM orc_buckets;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/orc_format_nonpart.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/orc_format_nonpart.q b/itests/hive-blobstore/src/test/queries/clientpositive/orc_format_nonpart.q
deleted file mode 100644
index ad95459..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/orc_format_nonpart.q
+++ /dev/null
@@ -1,30 +0,0 @@
--- Test INSERT OVERWRITE and INSERT INTO on orc table in blobstore
-
-DROP TABLE blobstore_source;
-CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_format_nonpart/blobstore_source/';
-LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source;
-
-DROP TABLE orc_table;
-CREATE EXTERNAL TABLE orc_table (a INT, b STRING, value DOUBLE) STORED AS ORC
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_format_nonpart/orc_table';
- 
-INSERT OVERWRITE TABLE orc_table
-SELECT * FROM blobstore_source;
- 
-SELECT * FROM orc_table;
-SELECT a FROM orc_table GROUP BY a;
-SELECT b FROM orc_table GROUP BY b;
-SELECT value FROM orc_table GROUP BY value;
-
-INSERT INTO TABLE orc_table
-SELECT * FROM blobstore_source;
-
-SELECT * FROM orc_table;
-SELECT a FROM orc_table GROUP BY a;
-SELECT b FROM orc_table GROUP BY b;
-SELECT value FROM orc_table GROUP BY value;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/orc_format_part.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/orc_format_part.q b/itests/hive-blobstore/src/test/queries/clientpositive/orc_format_part.q
deleted file mode 100644
index 358eccd..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/orc_format_part.q
+++ /dev/null
@@ -1,67 +0,0 @@
--- Test INSERT INTO and INSERT OVERWRITE on partitioned orc table in blobstore
-
-DROP TABLE src_events;
-CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_format_part/src_events/';
-LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events;
-
-DROP TABLE orc_events;
-CREATE TABLE orc_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS ORC
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_format_part/orc_events';
-
-SET hive.exec.dynamic.partition=true;
-SET hive.exec.dynamic.partition.mode=nonstrict;
-
-INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events;
-SHOW PARTITIONS orc_events;
-SELECT COUNT(*) FROM orc_events;
-SELECT COUNT(*) FROM orc_events WHERE run_date=20120921;
-SELECT COUNT(*) FROM orc_events WHERE run_date=20121121;
-
-INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201211, game_id, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,game_id,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211';
-SHOW PARTITIONS orc_events;
-SELECT COUNT(*) FROM orc_events;
-
-INSERT INTO TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39;
-SELECT COUNT(*) FROM orc_events;
-
-INSERT INTO TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change';
-SELECT COUNT(*) FROM orc_events;
-
-INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change';
-SELECT COUNT(*) FROM orc_events;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/orc_nonstd_partitions_loc.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/orc_nonstd_partitions_loc.q b/itests/hive-blobstore/src/test/queries/clientpositive/orc_nonstd_partitions_loc.q
deleted file mode 100644
index c462538..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/orc_nonstd_partitions_loc.q
+++ /dev/null
@@ -1,100 +0,0 @@
--- Test table in orc format with non-standard partition locations in blobstore
-
-DROP TABLE src_events;
-CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_nonstd_partitions_loc/src_events/';
-LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events;
-
-DROP TABLE orc_events;
-CREATE TABLE orc_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS ORC
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_nonstd_partitions_loc/orc_events/';
-
-SET hive.exec.dynamic.partition=true;
-SET hive.exec.dynamic.partition.mode=nonstrict;
-
-INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events;
-SHOW PARTITIONS orc_events;
-SELECT COUNT(*) FROM orc_events;
-
--- verify INSERT OVERWRITE and INSERT INTO nonstandard partition location
-ALTER TABLE orc_events ADD PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_nonstd_partitions_loc/orc_nonstd_loc/ns-part-1/';
-INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211';
-SHOW PARTITIONS orc_events;
-SELECT COUNT(*) FROM orc_events;
-INSERT INTO TABLE orc_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211';
-SHOW PARTITIONS orc_events;
-SELECT COUNT(*) FROM orc_events;
-
-SET hive.merge.mapfiles=false;
-
--- verify INSERT OVERWRITE and INSERT INTO nonstandard partition location with hive.merge.mapfiles false
-ALTER TABLE orc_events ADD PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_nonstd_partitions_loc/orc_nonstd_loc/ns-part-2/';
-INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209';
-INSERT INTO TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209';
-SHOW PARTITIONS orc_events;
-SELECT COUNT(*) FROM orc_events;
-
--- verify dynamic INSERT OVERWRITE over all partitions (standard and nonstandard locations) with hive.merge.mapfiles false
-INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events;
-SHOW PARTITIONS orc_events;
-SELECT COUNT(*) FROM orc_events;
-
-SET hive.merge.mapfiles=true;
-
-ALTER TABLE orc_events ADD PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-LOCATION '${hiveconf:test.blobstore.path.unique}/orc_nonstd_partitions_loc/orc_nonstd_loc/ns-part-3/';
-INSERT INTO TABLE orc_events PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209';
-SHOW PARTITIONS orc_events;
-SELECT COUNT(*) FROM orc_events;
-
--- verify dynamic INSERT OVERWRITE over all partitions (standard and nonstandard locations) with hive.merge.mapfiles true
-INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events;
-SHOW PARTITIONS orc_events;
-SELECT COUNT(*) FROM orc_events;
-
-ALTER TABLE orc_events DROP PARTITION (run_date=201211, game_id=39, event_name='hq_change');
-ALTER TABLE orc_events DROP PARTITION (run_date=201209, game_id=39, event_name='hq_change');
-ALTER TABLE orc_events DROP PARTITION (run_date=201207, game_id=39, event_name='hq_change');
-SHOW PARTITIONS orc_events;
-SELECT COUNT(*) FROM orc_events;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_buckets.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_buckets.q b/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_buckets.q
deleted file mode 100644
index 606ef720..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_buckets.q
+++ /dev/null
@@ -1,31 +0,0 @@
--- Test simple interaction with partitioned bucketed table with rcfile format in blobstore
-
-SET hive.exec.dynamic.partition=true;
-SET hive.exec.reducers.max=10;
-SET hive.exec.dynamic.partition.mode=nonstrict;
-
-DROP TABLE blobstore_source;
-CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_buckets/blobstore_source/';
-LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source;
-
-DROP TABLE rcfile_buckets;
-CREATE TABLE rcfile_buckets (a STRING, value DOUBLE)
-PARTITIONED BY (b STRING)
-CLUSTERED BY (a) INTO 10 BUCKETS
-STORED AS RCFILE
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_buckets/rcfile_buckets';
-
-INSERT OVERWRITE TABLE rcfile_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source;
-SELECT * FROM rcfile_buckets;
-
-INSERT INTO TABLE rcfile_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source;
-SELECT * FROM rcfile_buckets;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_format_nonpart.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_format_nonpart.q b/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_format_nonpart.q
deleted file mode 100644
index 9cd909e..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_format_nonpart.q
+++ /dev/null
@@ -1,30 +0,0 @@
--- Test INSERT OVERWRITE and INSERT INTO on rcfile table in blobstore
-
-DROP TABLE blobstore_source;
-CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_format_nonpart/blobstore_source/';
-LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source;
-
-DROP TABLE rcfile_table;
-CREATE TABLE rcfile_table (a INT, b STRING, value DOUBLE) STORED AS RCFILE
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_format_nonpart/rcfile_table';
- 
-INSERT OVERWRITE TABLE rcfile_table
-SELECT * FROM blobstore_source;
- 
-SELECT * FROM rcfile_table;
-SELECT a FROM rcfile_table GROUP BY a;
-SELECT b FROM rcfile_table GROUP BY b;
-SELECT VALUE FROM rcfile_table GROUP BY VALUE;
-
-INSERT INTO TABLE rcfile_table
-SELECT * FROM blobstore_source;
-
-SELECT * FROM rcfile_table;
-SELECT a FROM rcfile_table GROUP BY a;
-SELECT b FROM rcfile_table GROUP BY b;
-SELECT value FROM rcfile_table GROUP BY value;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_format_part.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_format_part.q b/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_format_part.q
deleted file mode 100644
index c563d3a..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_format_part.q
+++ /dev/null
@@ -1,67 +0,0 @@
--- Test INSERT INTO and INSERT OVERWRITE on partitioned rcfile table in blobstore
-
-DROP TABLE src_events;
-CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_format_part/src_events/';
-LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events;
-
-DROP TABLE rcfile_events;
-CREATE TABLE rcfile_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS RCFILE
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_format_part/rcfile_events';
-
-SET hive.exec.dynamic.partition=true;
-SET hive.exec.dynamic.partition.mode=nonstrict;
-
-INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events;
-SHOW PARTITIONS rcfile_events;
-SELECT COUNT(*) FROM rcfile_events;
-SELECT COUNT(*) FROM rcfile_events WHERE run_date=20120921;
-SELECT COUNT(*) FROM rcfile_events WHERE run_date=20121121;
-
-INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date=201211, game_id, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,game_id,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211';
-SHOW PARTITIONS rcfile_events;
-SELECT COUNT(*) FROM rcfile_events;
-
-INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39;
-SELECT COUNT(*) FROM rcfile_events;
-
-INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change';
-SELECT COUNT(*) FROM rcfile_events;
-
-INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change';
-SELECT COUNT(*) FROM rcfile_events;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_nonstd_partitions_loc.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_nonstd_partitions_loc.q b/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_nonstd_partitions_loc.q
deleted file mode 100644
index d17c281..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/rcfile_nonstd_partitions_loc.q
+++ /dev/null
@@ -1,100 +0,0 @@
--- Test table in rcfile format with non-standard partition locations in blobstore
-
-DROP TABLE src_events;
-CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_nonstd_partitions_loc/src_events/';
-LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events;
-
-DROP TABLE rcfile_events;
-CREATE TABLE rcfile_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS RCFILE
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_nonstd_partitions_loc/rcfile_events/';
-
-SET hive.exec.dynamic.partition=true;
-SET hive.exec.dynamic.partition.mode=nonstrict;
-
-INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events;
-SHOW PARTITIONS rcfile_events;
-SELECT COUNT(*) FROM rcfile_events;
-
--- verify INSERT OVERWRITE and INSERT INTO nonstandard partition location
-ALTER TABLE rcfile_events ADD PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_nonstd_partitions_loc/rcfile_nonstd_loc/ns-part-1/';
-INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211';
-SHOW PARTITIONS rcfile_events;
-SELECT COUNT(*) FROM rcfile_events;
-INSERT INTO TABLE rcfile_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211';
-SHOW PARTITIONS rcfile_events;
-SELECT COUNT(*) FROM rcfile_events;
-
-SET hive.merge.mapfiles=false;
-
--- verify INSERT OVERWRITE and INSERT INTO nonstandard partition location with hive.merge.mapfiles false
-ALTER TABLE rcfile_events ADD PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_nonstd_partitions_loc/rcfile_nonstd_loc/ns-part-2/';
-INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209';
-INSERT INTO TABLE rcfile_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209';
-SHOW PARTITIONS rcfile_events;
-SELECT COUNT(*) FROM rcfile_events;
-
--- verify dynamic INSERT OVERWRITE over all partitions (standard and nonstandard locations) with hive.merge.mapfiles false
-INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events;
-SHOW PARTITIONS rcfile_events;
-SELECT COUNT(*) FROM rcfile_events;
-
-SET hive.merge.mapfiles=true;
-
-ALTER TABLE rcfile_events ADD PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-LOCATION '${hiveconf:test.blobstore.path.unique}/rcfile_nonstd_partitions_loc/rcfile_nonstd_loc/ns-part-3/';
-INSERT INTO TABLE rcfile_events PARTITION(run_date=201207,game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209';
-SHOW PARTITIONS rcfile_events;
-SELECT COUNT(*) FROM rcfile_events;
-
--- verify dynamic INSERT OVERWRITE over all partitions (standard and nonstandard locations) with hive.merge.mapfiles true
-INSERT OVERWRITE TABLE rcfile_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events;
-SHOW PARTITIONS rcfile_events;
-SELECT COUNT(*) FROM rcfile_events;
-
-ALTER TABLE rcfile_events DROP PARTITION (run_date=201211,game_id=39, event_name='hq_change');
-ALTER TABLE rcfile_events DROP PARTITION (run_date=201209,game_id=39, event_name='hq_change');
-ALTER TABLE rcfile_events DROP PARTITION (run_date=201207,game_id=39, event_name='hq_change');
-SHOW PARTITIONS rcfile_events;
-SELECT COUNT(*) FROM rcfile_events;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/zero_rows_blobstore.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/zero_rows_blobstore.q b/itests/hive-blobstore/src/test/queries/clientpositive/zero_rows_blobstore.q
deleted file mode 100644
index 1f663ef..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/zero_rows_blobstore.q
+++ /dev/null
@@ -1,19 +0,0 @@
--- Insert overwrite into blobstore when WHERE clause returns zero rows
-DROP TABLE blobstore_source;
-CREATE TABLE blobstore_source (
-    key int
-) 
-LOCATION '${hiveconf:test.blobstore.path.unique}/zero_rows_blobstore/blobstore_source/';
-LOAD DATA LOCAL INPATH '../../data/files/kv6.txt' INTO TABLE blobstore_source;
-
-DROP TABLE blobstore_target;
-CREATE TABLE blobstore_target (
-    key int
-) 
-LOCATION '${hiveconf:test.blobstore.path.unique}/zero_rows_blobstore/blobstore_target';
-
-SELECT COUNT(*) FROM blobstore_target;
-INSERT OVERWRITE TABLE blobstore_target SELECT key FROM blobstore_source;
-SELECT COUNT(*) FROM blobstore_target;
-INSERT OVERWRITE TABLE blobstore_target SELECT key FROM blobstore_source WHERE FALSE;
-SELECT COUNT(*) FROM blobstore_target;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/queries/clientpositive/zero_rows_hdfs.q
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/queries/clientpositive/zero_rows_hdfs.q b/itests/hive-blobstore/src/test/queries/clientpositive/zero_rows_hdfs.q
deleted file mode 100644
index ef3b71d..0000000
--- a/itests/hive-blobstore/src/test/queries/clientpositive/zero_rows_hdfs.q
+++ /dev/null
@@ -1,18 +0,0 @@
--- Insert overwrite into hdfs from blobstore when WHERE clause returns zero rows
-DROP TABLE blobstore_source;
-CREATE TABLE blobstore_source (
-    key int
-)
-LOCATION '${hiveconf:test.blobstore.path.unique}/zero_rows_hdfs/blobstore_source/';
-LOAD DATA LOCAL INPATH '../../data/files/kv6.txt' INTO TABLE blobstore_source;
-
-DROP TABLE hdfs_target;
-CREATE TABLE hdfs_target (
-    key int
-);
-
-SELECT COUNT(*) FROM hdfs_target;
-INSERT OVERWRITE TABLE hdfs_target SELECT key FROM blobstore_source;
-SELECT COUNT(*) FROM hdfs_target;
-INSERT OVERWRITE TABLE hdfs_target SELECT key FROM blobstore_source WHERE FALSE;
-SELECT COUNT(*) FROM hdfs_target;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/resources/hive-site.xml
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/resources/hive-site.xml b/itests/hive-blobstore/src/test/resources/hive-site.xml
index 038db0d..406b3b8 100644
--- a/itests/hive-blobstore/src/test/resources/hive-site.xml
+++ b/itests/hive-blobstore/src/test/resources/hive-site.xml
@@ -279,11 +279,6 @@
     <value>pblob:${system:test.tmp.dir}/bucket</value>
   </property>
 
-  <property>
-    <name>hive.exim.uri.scheme.whitelist</name>
-    <value>hdfs,pfile,file,s3,s3a,pblob</value>
-  </property>
-
   <!--
   To run these tests:
   # Create a file blobstore-conf.xml  - DO NOT ADD TO REVISION CONTROL

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_blobstore.q.out
deleted file mode 100644
index c1e57ee..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/import_addpartition_blobstore_to_blobstore.q.out
+++ /dev/null
@@ -1,283 +0,0 @@
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: CREATE TABLE exim_employee (emp_id int COMMENT "employee id")
-COMMENT "employee table"
-PARTITIONED BY (emp_country string COMMENT "two char iso code")
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="in")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="us")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/test.dat"
-INTO TABLE exim_employee PARTITION (emp_country="cz")
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: EXPORT TABLE exim_employee
-TO '### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-PREHOOK: type: EXPORT
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-PREHOOK: Output: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee
-POSTHOOK: query: EXPORT TABLE exim_employee
-TO '### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-POSTHOOK: type: EXPORT
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-POSTHOOK: Output: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee
-PREHOOK: query: DROP TABLE exim_employee
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: DROP TABLE exim_employee
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Output: default@exim_employee
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='us')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/import/exim_employee
-PREHOOK: Output: database:default
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='us')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/import/exim_employee
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=us
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/import/exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='cz')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/import/exim_employee
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=cz
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us
-PREHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='in')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-#### A masked pattern was here ####
-PREHOOK: type: IMPORT
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee
-PREHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/import/exim_employee
-PREHOOK: Output: default@exim_employee
-POSTHOOK: query: IMPORT TABLE exim_employee PARTITION (emp_country='in')
-FROM '### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee'
-#### A masked pattern was here ####
-POSTHOOK: type: IMPORT
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/export/exim_employee
-POSTHOOK: Input: ### test.blobstore.path ###/import_addpartition_blobstore_to_blobstore/import/exim_employee
-POSTHOOK: Output: default@exim_employee
-POSTHOOK: Output: default@exim_employee@emp_country=in
-PREHOOK: query: DESCRIBE EXTENDED exim_employee
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@exim_employee
-POSTHOOK: query: DESCRIBE EXTENDED exim_employee
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@exim_employee
-emp_id              	int                 	employee id         
-emp_country         	string              	two char iso code   
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-emp_country         	string              	two char iso code   
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM exim_employee
-PREHOOK: type: QUERY
-PREHOOK: Input: default@exim_employee
-PREHOOK: Input: default@exim_employee@emp_country=cz
-PREHOOK: Input: default@exim_employee@emp_country=in
-PREHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM exim_employee
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@exim_employee
-POSTHOOK: Input: default@exim_employee@emp_country=cz
-POSTHOOK: Input: default@exim_employee@emp_country=in
-POSTHOOK: Input: default@exim_employee@emp_country=us
-#### A masked pattern was here ####
-1	cz
-2	cz
-3	cz
-4	cz
-5	cz
-6	cz
-1	in
-2	in
-3	in
-4	in
-5	in
-6	in
-1	us
-2	us
-3	us
-4	us
-5	us
-6	us


[49/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/AccumuloIndexDefinition.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/AccumuloIndexDefinition.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/AccumuloIndexDefinition.java
deleted file mode 100644
index 51531d6..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/AccumuloIndexDefinition.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.accumulo.mr;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Index table definition.
- */
-public class AccumuloIndexDefinition {
-  private final String baseTable;
-  private final String indexTable;
-  private final Map<String, String> colMap;
-
-
-  public AccumuloIndexDefinition(String baseTable, String indexTable) {
-    this.colMap = new HashMap<String, String>();
-    this.baseTable = baseTable;
-    this.indexTable = indexTable;
-  }
-
-  public String getBaseTable() {
-    return baseTable;
-  }
-
-
-  public String getIndexTable() {
-    return indexTable;
-  }
-
-  public void addIndexCol(String cf, String cq, String colType) {
-    colMap.put(encode(cf, cq), colType);
-  }
-
-  public Map<String, String> getColumnMap() {
-    return colMap;
-  }
-
-  public void setColumnTuples(String columns) {
-    if (columns != null) {
-      String cols = columns.trim();
-      if (!cols.isEmpty() && !"*".equals(cols)) {
-        for (String col : cols.split(",")) {
-          String[] cfcqtp = col.trim().split(":");
-          addIndexCol(cfcqtp[0], cfcqtp[1], cfcqtp[2]);
-        }
-      }
-    }
-  }
-
-  public boolean contains(String cf, String cq) {
-    return colMap.containsKey(encode(cf, cq));
-  }
-
-  public String getColType(String cf, String cq) {
-    return colMap.get(encode(cf, cq));
-  }
-
-  private String encode(String cf, String cq) {
-    return cq + ":" + cq;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/AccumuloIndexedOutputFormat.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/AccumuloIndexedOutputFormat.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/AccumuloIndexedOutputFormat.java
deleted file mode 100644
index a055233..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/AccumuloIndexedOutputFormat.java
+++ /dev/null
@@ -1,334 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.accumulo.mr;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MultiTableBatchWriter;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.mapred.AccumuloOutputFormat;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hive.accumulo.AccumuloIndexLexicoder;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.RecordWriter;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.util.Progressable;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Extension of AccumuloOutputFormat to support indexing.
- */
-public class AccumuloIndexedOutputFormat extends AccumuloOutputFormat {
-  private static final Logger LOG = Logger.getLogger(AccumuloIndexedOutputFormat.class);
-  private static final Class<?> CLASS = AccumuloOutputFormat.class;
-  private static final byte[] EMPTY_BYTES = new byte[0];
-
-  public static void setIndexTableName(JobConf job, String tableName) {
-    IndexOutputConfigurator.setIndexTableName(CLASS, job, tableName);
-  }
-
-  protected static String getIndexTableName(JobConf job) {
-    return IndexOutputConfigurator.getIndexTableName(CLASS, job);
-  }
-
-  public static void setIndexColumns(JobConf job, String fields) {
-    IndexOutputConfigurator.setIndexColumns(CLASS, job, fields);
-  }
-
-  protected static String getIndexColumns(JobConf job) {
-    return IndexOutputConfigurator.getIndexColumns(CLASS, job);
-  }
-
-  public static void setStringEncoding(JobConf job, Boolean isStringEncoding) {
-    IndexOutputConfigurator.setRecordEncoding(CLASS, job, isStringEncoding);
-  }
-
-  protected static Boolean getStringEncoding(JobConf job) {
-    return IndexOutputConfigurator.getRecordEncoding(CLASS, job);
-  }
-
-  public RecordWriter<Text, Mutation> getRecordWriter(FileSystem ignored, JobConf job,
-                                           String name, Progressable progress) throws IOException {
-    try {
-      return new AccumuloIndexedOutputFormat.AccumuloRecordWriter(job);
-    } catch (Exception e) {
-      throw new IOException(e);
-    }
-  }
-
-  protected static class AccumuloRecordWriter implements RecordWriter<Text, Mutation> {
-    private MultiTableBatchWriter mtbw = null;
-    private Map<Text, BatchWriter> bws = null;
-    private Text defaultTableName = null;
-    private Text indexTableName = null;
-    private boolean simulate = false;
-    private boolean createTables = false;
-    private boolean isStringEncoded = true;
-    private long mutCount = 0L;
-    private long valCount = 0L;
-    private Connector conn;
-    private AccumuloIndexDefinition indexDef = null;
-
-    protected AccumuloRecordWriter(JobConf job)
-        throws AccumuloException, AccumuloSecurityException, IOException {
-      Level l = AccumuloIndexedOutputFormat.getLogLevel(job);
-      if (l != null) {
-        LOG.setLevel(AccumuloIndexedOutputFormat.getLogLevel(job));
-      }
-      this.isStringEncoded = AccumuloIndexedOutputFormat.getStringEncoding(job).booleanValue();
-      this.simulate = AccumuloIndexedOutputFormat.getSimulationMode(job).booleanValue();
-      this.createTables = AccumuloIndexedOutputFormat.canCreateTables(job).booleanValue();
-      if (this.simulate) {
-        LOG.info("Simulating output only. No writes to tables will occur");
-      }
-
-      this.bws = new HashMap();
-      String tname = AccumuloIndexedOutputFormat.getDefaultTableName(job);
-      this.defaultTableName = tname == null ? null : new Text(tname);
-
-      String iname = AccumuloIndexedOutputFormat.getIndexTableName(job);
-      if (iname != null) {
-        LOG.info("Index Table = " + iname);
-        this.indexTableName = new Text(iname);
-        this.indexDef = createIndexDefinition(job, tname, iname);
-      }
-      if (!this.simulate) {
-        this.conn = AccumuloIndexedOutputFormat.getInstance(job)
-            .getConnector(AccumuloIndexedOutputFormat.getPrincipal(job),
-                          AccumuloIndexedOutputFormat.getAuthenticationToken(job));
-        this.mtbw = this.conn.createMultiTableBatchWriter(
-            AccumuloIndexedOutputFormat.getBatchWriterOptions(job));
-      }
-    }
-
-     AccumuloIndexDefinition createIndexDefinition(JobConf job, String tname, String iname) {
-      AccumuloIndexDefinition def = new AccumuloIndexDefinition(tname, iname);
-      String cols = AccumuloIndexedOutputFormat.getIndexColumns(job);
-      LOG.info("Index Cols = " + cols);
-      def.setColumnTuples(cols);
-      return def;
-    }
-
-    public void write(Text table, Mutation mutation) throws IOException {
-      if(table == null || table.toString().isEmpty()) {
-        table = this.defaultTableName;
-      }
-
-      if(!this.simulate && table == null) {
-        throw new IOException("No table or default table specified. Try simulation mode next time");
-      } else {
-        ++this.mutCount;
-        this.valCount += (long)mutation.size();
-        this.printMutation(table, mutation);
-        if(!this.simulate) {
-          if(!this.bws.containsKey(table)) {
-            try {
-              this.addTable(table);
-            } catch (Exception var5) {
-              LOG.error(var5);
-              throw new IOException(var5);
-            }
-          }
-          if(indexTableName != null && !this.bws.containsKey(indexTableName)) {
-            try {
-              this.addTable(indexTableName);
-            } catch (Exception var6) {
-              LOG.error(var6);
-              throw new IOException(var6);
-            }
-          }
-
-          try {
-            ((BatchWriter)this.bws.get(table)).addMutation(mutation);
-          } catch (MutationsRejectedException var4) {
-            throw new IOException(var4);
-          }
-
-          // if this table has an associated index table then attempt to build
-          // index mutations
-          if (indexTableName != null) {
-            List<Mutation> idxMuts = getIndexMutations(mutation);
-            if (!idxMuts.isEmpty()) {
-              try {
-                BatchWriter writer = this.bws.get(indexTableName);
-                for (Mutation m : idxMuts) {
-                  writer.addMutation(m);
-                }
-              } catch (MutationsRejectedException var4) {
-                throw new IOException(var4);
-              }
-            }
-          }
-        }
-      }
-    }
-
-    public void addTable(Text tableName) throws AccumuloException, AccumuloSecurityException {
-      if(this.simulate) {
-        LOG.info("Simulating adding table: " + tableName);
-      } else {
-        LOG.debug("Adding table: " + tableName);
-        BatchWriter bw = null;
-        String table = tableName.toString();
-        if(this.createTables && !this.conn.tableOperations().exists(table)) {
-          try {
-            this.conn.tableOperations().create(table);
-          } catch (AccumuloSecurityException var8) {
-            LOG.error("Accumulo security violation creating " + table, var8);
-            throw var8;
-          } catch (TableExistsException var9) {
-            LOG.warn("Table Exists " + table, var9);
-          }
-        }
-
-        try {
-          bw = this.mtbw.getBatchWriter(table);
-        } catch (TableNotFoundException var5) {
-          LOG.error("Accumulo table " + table + " doesn't exist and cannot be created.", var5);
-          throw new AccumuloException(var5);
-        }
-
-        if(bw != null) {
-          this.bws.put(tableName, bw);
-        }
-
-      }
-    }
-
-    private int printMutation(Text table, Mutation m) {
-      if(LOG.isTraceEnabled()) {
-        LOG.trace(String.format("Table %s row key: %s",
-            new Object[]{table, this.hexDump(m.getRow())}));
-        Iterator itr = m.getUpdates().iterator();
-
-        while(itr.hasNext()) {
-          ColumnUpdate cu = (ColumnUpdate)itr.next();
-          LOG.trace(String.format("Table %s column: %s:%s",
-              new Object[]{table, this.hexDump(cu.getColumnFamily()),
-                           this.hexDump(cu.getColumnQualifier())}));
-          LOG.trace(String.format("Table %s security: %s",
-              new Object[]{table, (new ColumnVisibility(cu.getColumnVisibility())).toString()}));
-          LOG.trace(String.format("Table %s value: %s",
-              new Object[]{table, this.hexDump(cu.getValue())}));
-        }
-      }
-
-      return m.getUpdates().size();
-    }
-
-    private List<Mutation> getIndexMutations(Mutation baseMut) {
-      List indexMuts = new ArrayList<Mutation>();
-
-      // nothing to do if there is not a index definition for this table
-      if (null != indexDef) {
-
-        byte[] rowId = baseMut.getRow();
-
-
-        for (ColumnUpdate cu : baseMut.getUpdates()) {
-          String cf = new String(cu.getColumnFamily());
-          String cq = new String(cu.getColumnQualifier());
-
-          // if this columnFamily/columnQualifier pair is defined in the index build a new mutation
-          // so key=value, cf=columnFamily_columnQualifer, cq=rowKey, cv=columnVisibility value=[]
-          String colType = indexDef.getColType(cf, cq);
-          if (colType != null) {
-            LOG.trace(String.format("Building index for column %s:%s", new Object[]{cf, cq}));
-            Mutation m = new Mutation(AccumuloIndexLexicoder.encodeValue(cu.getValue(), colType,
-                                               isStringEncoded));
-            String colFam = cf + "_" + cq;
-            m.put(colFam.getBytes(), rowId, new ColumnVisibility(cu.getColumnVisibility()),
-                  EMPTY_BYTES);
-            indexMuts.add(m);
-          }
-        }
-      }
-      return indexMuts;
-    }
-
-    private String hexDump(byte[] ba) {
-      StringBuilder sb = new StringBuilder();
-      byte[] arr = ba;
-      int len = ba.length;
-
-      for(int i = 0; i < len; ++i) {
-        byte b = arr[i];
-        if(b > 32 && b < 126) {
-          sb.append((char)b);
-        } else {
-          sb.append(String.format("x%02x", new Object[]{Byte.valueOf(b)}));
-        }
-      }
-
-      return sb.toString();
-    }
-
-    public void close(Reporter reporter) throws IOException {
-      LOG.debug("mutations written: " + this.mutCount + ", values written: " + this.valCount);
-      if(!this.simulate) {
-        try {
-          this.mtbw.close();
-        } catch (MutationsRejectedException var7) {
-          if(var7.getAuthorizationFailuresMap().size() >= 0) {
-            Map tables = new HashMap();
-
-            Map.Entry ke;
-            Object secCodes;
-            for(Iterator itr = var7.getAuthorizationFailuresMap().entrySet().iterator();
-                itr.hasNext(); ((Set)secCodes).addAll((Collection)ke.getValue())) {
-              ke = (Map.Entry)itr.next();
-              secCodes = (Set)tables.get(((KeyExtent)ke.getKey()).getTableId().toString());
-              if(secCodes == null) {
-                secCodes = new HashSet();
-                tables.put(((KeyExtent)ke.getKey()).getTableId().toString(), secCodes);
-              }
-            }
-
-            LOG.error("Not authorized to write to tables : " + tables);
-          }
-
-          if(var7.getConstraintViolationSummaries().size() > 0) {
-            LOG.error("Constraint violations : " + var7.getConstraintViolationSummaries().size());
-          }
-          throw new IOException(var7);
-        }
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java
index bfa764a..3ae5431 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java
@@ -1,11 +1,10 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
  *     http://www.apache.org/licenses/LICENSE-2.0
  *
@@ -15,7 +14,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hive.accumulo.mr;
 
 import java.io.IOException;
@@ -29,11 +27,8 @@ import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters;
-import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloIndexParameters;
 import org.apache.hadoop.hive.accumulo.HiveAccumuloHelper;
 import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters;
-import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RecordWriter;
@@ -47,7 +42,7 @@ import com.google.common.base.Preconditions;
 /**
  *
  */
-public class HiveAccumuloTableOutputFormat extends AccumuloIndexedOutputFormat {
+public class HiveAccumuloTableOutputFormat extends AccumuloOutputFormat {
 
   protected final HiveAccumuloHelper helper = new HiveAccumuloHelper();
 
@@ -59,8 +54,7 @@ public class HiveAccumuloTableOutputFormat extends AccumuloIndexedOutputFormat {
   }
 
   @Override
-  public RecordWriter<Text, Mutation> getRecordWriter(FileSystem ignored, JobConf job, String name,
-                                                     Progressable progress) throws IOException {
+  public RecordWriter<Text,Mutation> getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException {
     configureAccumuloOutputFormat(job);
 
     return super.getRecordWriter(ignored, job, name, progress);
@@ -123,16 +117,6 @@ public class HiveAccumuloTableOutputFormat extends AccumuloIndexedOutputFormat {
 
       // Set the table where we're writing this data
       setDefaultAccumuloTableName(job, tableName);
-
-      // Set the index table information
-      final String indexTableName = job.get(AccumuloIndexParameters.INDEXTABLE_NAME);
-      final String indexedColumns = job.get(AccumuloIndexParameters.INDEXED_COLUMNS);
-      final String columnTypes = job.get(serdeConstants.LIST_COLUMN_TYPES);
-      final boolean binaryEncoding = ColumnEncoding.BINARY.getName()
-          .equalsIgnoreCase(job.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE));
-      setAccumuloIndexTableName(job, indexTableName);
-      setAccumuloIndexColumns(job, indexedColumns);
-      setAccumuloStringEncoding(job, !binaryEncoding);
     } catch (AccumuloSecurityException e) {
       log.error("Could not connect to Accumulo with provided credentials", e);
       throw new IOException(e);
@@ -141,10 +125,10 @@ public class HiveAccumuloTableOutputFormat extends AccumuloIndexedOutputFormat {
 
   // Non-static methods to wrap the static AccumuloOutputFormat methods to enable testing
 
-  protected void setConnectorInfoWithErrorChecking(JobConf conf, String username,
-                                     AuthenticationToken token) throws AccumuloSecurityException {
+  protected void setConnectorInfoWithErrorChecking(JobConf conf, String username, AuthenticationToken token)
+      throws AccumuloSecurityException {
     try {
-      AccumuloIndexedOutputFormat.setConnectorInfo(conf, username, token);
+      AccumuloOutputFormat.setConnectorInfo(conf, username, token);
     } catch (IllegalStateException e) {
       // AccumuloOutputFormat complains if you re-set an already set value. We just don't care.
       log.debug("Ignoring exception setting Accumulo Connector instance for user " + username, e);
@@ -152,8 +136,8 @@ public class HiveAccumuloTableOutputFormat extends AccumuloIndexedOutputFormat {
   }
 
   @SuppressWarnings("deprecation")
-  protected void setZooKeeperInstanceWithErrorChecking(JobConf conf, String instanceName,
-                                           String zookeepers, boolean isSasl) throws IOException {
+  protected void setZooKeeperInstanceWithErrorChecking(JobConf conf, String instanceName, String zookeepers,
+      boolean isSasl) throws IOException {
     try {
       if (isSasl) {
         // Reflection to support Accumulo 1.5. Remove when Accumulo 1.5 support is dropped
@@ -162,7 +146,7 @@ public class HiveAccumuloTableOutputFormat extends AccumuloIndexedOutputFormat {
         getHelper().setZooKeeperInstance(conf, AccumuloOutputFormat.class, zookeepers, instanceName,
             isSasl);
       } else {
-        AccumuloIndexedOutputFormat.setZooKeeperInstance(conf, instanceName, zookeepers);
+        AccumuloOutputFormat.setZooKeeperInstance(conf, instanceName, zookeepers);
       }
     } catch (IllegalStateException ise) {
       // AccumuloOutputFormat complains if you re-set an already set value. We just don't care.
@@ -173,7 +157,7 @@ public class HiveAccumuloTableOutputFormat extends AccumuloIndexedOutputFormat {
 
   protected void setMockInstanceWithErrorChecking(JobConf conf, String instanceName) {
     try {
-      AccumuloIndexedOutputFormat.setMockInstance(conf, instanceName);
+      AccumuloOutputFormat.setMockInstance(conf, instanceName);
     } catch (IllegalStateException e) {
       // AccumuloOutputFormat complains if you re-set an already set value. We just don't care.
       log.debug("Ignoring exception setting mock instance of " + instanceName, e);
@@ -181,19 +165,7 @@ public class HiveAccumuloTableOutputFormat extends AccumuloIndexedOutputFormat {
   }
 
   protected void setDefaultAccumuloTableName(JobConf conf, String tableName) {
-    AccumuloIndexedOutputFormat.setDefaultTableName(conf, tableName);
-  }
-
-  protected void setAccumuloIndexTableName(JobConf conf, String indexTableName) {
-    AccumuloIndexedOutputFormat.setIndexTableName(conf, indexTableName);
-  }
-
-  protected void setAccumuloIndexColumns(JobConf conf, String indexColumns) {
-    AccumuloIndexedOutputFormat.setIndexColumns(conf, indexColumns);
-  }
-
-  protected void setAccumuloStringEncoding(JobConf conf, Boolean isStringEncoded) {
-    AccumuloIndexedOutputFormat.setStringEncoding(conf, isStringEncoded);
+    AccumuloOutputFormat.setDefaultTableName(conf, tableName);
   }
 
   HiveAccumuloHelper getHelper() {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/IndexOutputConfigurator.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/IndexOutputConfigurator.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/IndexOutputConfigurator.java
deleted file mode 100644
index 98294bb..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/IndexOutputConfigurator.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.accumulo.mr;
-
-import org.apache.accumulo.core.client.mapreduce.lib.impl.OutputConfigurator;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Extension of OutputConfigurtion to support indexing.
- */
-public class IndexOutputConfigurator extends OutputConfigurator {
-  /**
-   * Accumulo Write options.
-   */
-  public static enum WriteOpts {
-    DEFAULT_TABLE_NAME,
-    INDEX_TABLE_NAME,
-    INDEX_COLUMNS,
-    COLUMN_TYPES,
-    BINARY_ENCODING,
-    BATCH_WRITER_CONFIG;
-
-    private WriteOpts() {
-    }
-  }
-
-  public static void setIndexTableName(Class<?> implementingClass, Configuration conf,
-                                       String tableName) {
-    if(tableName != null) {
-      conf.set(enumToConfKey(implementingClass, WriteOpts.INDEX_TABLE_NAME), tableName);
-    }
-  }
-
-  public static String getIndexTableName(Class<?> implementingClass, Configuration conf) {
-    return conf.get(enumToConfKey(implementingClass, WriteOpts.INDEX_TABLE_NAME));
-  }
-
-  public static void setIndexColumns(Class<?> implementingClass, Configuration conf,
-                                     String tableName) {
-    if(tableName != null) {
-      conf.set(enumToConfKey(implementingClass, WriteOpts.INDEX_COLUMNS), tableName);
-    }
-  }
-
-  public static String getIndexColumns(Class<?> implementingClass, Configuration conf) {
-    return conf.get(enumToConfKey(implementingClass, WriteOpts.INDEX_COLUMNS));
-  }
-
-
-  public static void setRecordEncoding(Class<?> implementingClass, Configuration conf,
-                                       Boolean isBinary) {
-      conf.set(enumToConfKey(implementingClass, WriteOpts.BINARY_ENCODING), isBinary.toString());
-  }
-
-  public static Boolean getRecordEncoding(Class<?> implementingClass, Configuration conf) {
-    return Boolean.valueOf(conf.get(enumToConfKey(implementingClass, WriteOpts.BINARY_ENCODING)));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/package-info.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/package-info.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/package-info.java
deleted file mode 100644
index 599b1ea..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/package-info.java
+++ /dev/null
@@ -1,4 +0,0 @@
-/**
- * map reduce and supporting classes
- */
-package org.apache.hadoop.hive.accumulo.mr;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
index 718a5c5..a7ec7c5 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
@@ -1,11 +1,10 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
  *     http://www.apache.org/licenses/LICENSE-2.0
  *
@@ -30,7 +29,6 @@ import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.data.Range;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding;
 import org.apache.hadoop.hive.accumulo.columns.ColumnMapper;
 import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloColumnMapping;
 import org.apache.hadoop.hive.accumulo.predicate.compare.CompareOp;
@@ -88,13 +86,13 @@ public class AccumuloPredicateHandler {
   private static final List<Range> TOTAL_RANGE = Collections.singletonList(new Range());
 
   private static AccumuloPredicateHandler handler = new AccumuloPredicateHandler();
-  private static Map<String, Class<? extends CompareOp>> compareOps = Maps.newHashMap();
-  private static Map<String, Class<? extends PrimitiveComparison>> pComparisons = Maps.newHashMap();
+  private static Map<String,Class<? extends CompareOp>> compareOps = Maps.newHashMap();
+  private static Map<String,Class<? extends PrimitiveComparison>> pComparisons = Maps.newHashMap();
 
   // Want to start sufficiently "high" enough in the iterator stack
   private static int iteratorCount = 50;
 
-  private static final Logger LOG = LoggerFactory.getLogger(AccumuloPredicateHandler.class);
+  private static final Logger log = LoggerFactory.getLogger(AccumuloPredicateHandler.class);
   static {
     compareOps.put(GenericUDFOPEqual.class.getName(), Equal.class);
     compareOps.put(GenericUDFOPNotEqual.class.getName(), NotEqual.class);
@@ -138,9 +136,8 @@ public class AccumuloPredicateHandler {
    */
   public Class<? extends CompareOp> getCompareOpClass(String udfType)
       throws NoSuchCompareOpException {
-    if (!compareOps.containsKey(udfType)) {
+    if (!compareOps.containsKey(udfType))
       throw new NoSuchCompareOpException("Null compare op for specified key: " + udfType);
-    }
     return compareOps.get(udfType);
   }
 
@@ -170,10 +167,9 @@ public class AccumuloPredicateHandler {
    */
   public Class<? extends PrimitiveComparison> getPrimitiveComparisonClass(String type)
       throws NoSuchPrimitiveComparisonException {
-    if (!pComparisons.containsKey(type)) {
+    if (!pComparisons.containsKey(type))
       throw new NoSuchPrimitiveComparisonException("Null primitive comparison for specified key: "
           + type);
-    }
     return pComparisons.get(type);
   }
 
@@ -200,8 +196,7 @@ public class AccumuloPredicateHandler {
   /**
    * Loop through search conditions and build ranges for predicates involving rowID column, if any.
    */
-  public List<Range> getRanges(Configuration conf, ColumnMapper columnMapper)
-      throws SerDeException {
+  public List<Range> getRanges(Configuration conf, ColumnMapper columnMapper) throws SerDeException {
     if (!columnMapper.hasRowIdMapping()) {
       return TOTAL_RANGE;
     }
@@ -223,16 +218,16 @@ public class AccumuloPredicateHandler {
       return TOTAL_RANGE;
     }
 
-    Object result = generateRanges(conf, columnMapper, hiveRowIdColumnName, root);
+    Object result = generateRanges(columnMapper, hiveRowIdColumnName, root);
 
     if (null == result) {
-      LOG.info("Calculated null set of ranges, scanning full table");
+      log.info("Calculated null set of ranges, scanning full table");
       return TOTAL_RANGE;
     } else if (result instanceof Range) {
-      LOG.info("Computed a single Range for the query: " + result);
+      log.info("Computed a single Range for the query: " + result);
       return Collections.singletonList((Range) result);
     } else if (result instanceof List) {
-      LOG.info("Computed a collection of Ranges for the query: " + result);
+      log.info("Computed a collection of Ranges for the query: " + result);
       @SuppressWarnings("unchecked")
       List<Range> ranges = (List<Range>) result;
       return ranges;
@@ -242,11 +237,9 @@ public class AccumuloPredicateHandler {
   }
 
   /**
-   * Encapsulates the traversal over some {@link ExprNodeDesc} tree for the generation of Accumuluo.
-   * Ranges using expressions involving the Accumulo rowid-mapped Hive column.
+   * Encapsulates the traversal over some {@link ExprNodeDesc} tree for the generation of Accumuluo
+   * Ranges using expressions involving the Accumulo rowid-mapped Hive column
    *
-   * @param conf
-   *          Hadoop configuration
    * @param columnMapper
    *          Mapping of Hive to Accumulo columns for the query
    * @param hiveRowIdColumnName
@@ -256,16 +249,15 @@ public class AccumuloPredicateHandler {
    * @return An object representing the result from the ExprNodeDesc tree traversal using the
    *         AccumuloRangeGenerator
    */
-  protected Object generateRanges(Configuration conf, ColumnMapper columnMapper,
-                                  String hiveRowIdColumnName, ExprNodeDesc root) {
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler,
+  protected Object generateRanges(ColumnMapper columnMapper, String hiveRowIdColumnName, ExprNodeDesc root) {
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler,
         columnMapper.getRowIdMapping(), hiveRowIdColumnName);
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
-        Collections.<Rule, NodeProcessor> emptyMap(), null);
+        Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
-    List<Node> roots = new ArrayList<Node>();
+    ArrayList<Node> roots = new ArrayList<Node>();
     roots.add(root);
-    HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
+    HashMap<Node,Object> nodeOutput = new HashMap<Node,Object>();
 
     try {
       ogw.startWalking(roots, nodeOutput);
@@ -290,13 +282,10 @@ public class AccumuloPredicateHandler {
     boolean shouldPushdown = conf.getBoolean(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY,
         AccumuloSerDeParameters.ITERATOR_PUSHDOWN_DEFAULT);
     if (!shouldPushdown) {
-      LOG.info("Iterator pushdown is disabled for this table");
+      log.info("Iterator pushdown is disabled for this table");
       return itrs;
     }
 
-    boolean binaryEncodedRow = ColumnEncoding.BINARY.getName().
-        equalsIgnoreCase(conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE));
-
     int rowIdOffset = columnMapper.getRowIdOffset();
     String[] hiveColumnNamesArr = conf.getStrings(serdeConstants.LIST_COLUMNS);
 
@@ -317,12 +306,11 @@ public class AccumuloPredicateHandler {
       if (hiveRowIdColumnName == null || !hiveRowIdColumnName.equals(col)) {
         HiveAccumuloColumnMapping mapping = (HiveAccumuloColumnMapping) columnMapper
             .getColumnMappingForHiveColumn(hiveColumnNames, col);
-        itrs.add(toSetting(mapping, sc, binaryEncodedRow));
+        itrs.add(toSetting(mapping, sc));
       }
     }
-    if (LOG.isInfoEnabled()) {
-      LOG.info("num iterators = " + itrs.size());
-    }
+    if (log.isInfoEnabled())
+      log.info("num iterators = " + itrs.size());
     return itrs;
   }
 
@@ -334,19 +322,15 @@ public class AccumuloPredicateHandler {
    *          ColumnMapping to filter
    * @param sc
    *          IndexSearchCondition
-   * @param binaryEncodedValues
-   *          flag for binary encodedValues
    * @return IteratorSetting
    * @throws SerDeException
    */
   public IteratorSetting toSetting(HiveAccumuloColumnMapping accumuloColumnMapping,
-      IndexSearchCondition sc, boolean binaryEncodedValues) throws SerDeException {
+      IndexSearchCondition sc) throws SerDeException {
     iteratorCount++;
     final IteratorSetting is = new IteratorSetting(iteratorCount,
-        PrimitiveComparisonFilter.FILTER_PREFIX + iteratorCount,
-        PrimitiveComparisonFilter.class);
-    final String type =  binaryEncodedValues ? sc.getColumnDesc().getTypeString()
-                                             : ColumnEncoding.STRING.getName();
+        PrimitiveComparisonFilter.FILTER_PREFIX + iteratorCount, PrimitiveComparisonFilter.class);
+    final String type = sc.getColumnDesc().getTypeString();
     final String comparisonOpStr = sc.getComparisonOp();
 
     PushdownTuple tuple;
@@ -371,9 +355,8 @@ public class AccumuloPredicateHandler {
 
   public ExprNodeDesc getExpression(Configuration conf) {
     String filteredExprSerialized = conf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
-    if (filteredExprSerialized == null) {
+    if (filteredExprSerialized == null)
       return null;
-    }
 
     return SerializationUtilities.deserializeExpression(filteredExprSerialized);
   }
@@ -392,9 +375,8 @@ public class AccumuloPredicateHandler {
     }
     IndexPredicateAnalyzer analyzer = newAnalyzer(conf);
     ExprNodeDesc residual = analyzer.analyzePredicate(filterExpr, sConditions);
-    if (residual != null) {
+    if (residual != null)
       throw new RuntimeException("Unexpected residual predicate: " + residual.getExprString());
-    }
     return sConditions;
   }
 
@@ -412,7 +394,8 @@ public class AccumuloPredicateHandler {
     ExprNodeDesc residualPredicate = analyzer.analyzePredicate(desc, sConditions);
 
     if (sConditions.size() == 0) {
-      LOG.info("nothing to decompose. Returning");
+      if (log.isInfoEnabled())
+        log.info("nothing to decompose. Returning");
       return null;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
index 90607ed..21392d1 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
@@ -1,11 +1,10 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
  *     http://www.apache.org/licenses/LICENSE-2.0
  *
@@ -15,15 +14,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hive.accumulo.predicate;
 
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Stack;
+
 import org.apache.accumulo.core.data.Range;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloIndexParameters;
-import org.apache.hadoop.hive.accumulo.AccumuloIndexScanner;
-import org.apache.hadoop.hive.accumulo.AccumuloIndexScannerException;
-import org.apache.hadoop.hive.accumulo.AccumuloIndexLexicoder;
 import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping;
 import org.apache.hadoop.hive.accumulo.predicate.compare.CompareOp;
 import org.apache.hadoop.hive.accumulo.predicate.compare.Equal;
@@ -44,19 +43,17 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantBooleanObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantByteObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantDoubleObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantFloatObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantIntObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantLongObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantShortObjectInspector;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.UTF8;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Stack;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
 /**
  *
  */
@@ -66,27 +63,12 @@ public class AccumuloRangeGenerator implements NodeProcessor {
   private final AccumuloPredicateHandler predicateHandler;
   private final HiveAccumuloRowIdColumnMapping rowIdMapping;
   private final String hiveRowIdColumnName;
-  private AccumuloIndexScanner indexScanner;
 
-  public AccumuloRangeGenerator(Configuration conf, AccumuloPredicateHandler predicateHandler,
+  public AccumuloRangeGenerator(AccumuloPredicateHandler predicateHandler,
       HiveAccumuloRowIdColumnMapping rowIdMapping, String hiveRowIdColumnName) {
     this.predicateHandler = predicateHandler;
     this.rowIdMapping = rowIdMapping;
     this.hiveRowIdColumnName = hiveRowIdColumnName;
-    try {
-      this.indexScanner = new AccumuloIndexParameters(conf).createScanner();
-    } catch (AccumuloIndexScannerException e) {
-      LOG.error(e.getLocalizedMessage(), e);
-      this.indexScanner = null;
-    }
-  }
-
-  public AccumuloIndexScanner getIndexScanner() {
-    return indexScanner;
-  }
-
-  public void setIndexScanner(AccumuloIndexScanner indexScanner) {
-    this.indexScanner = indexScanner;
   }
 
   @Override
@@ -252,39 +234,13 @@ public class AccumuloRangeGenerator implements NodeProcessor {
       return null;
     }
 
-    ConstantObjectInspector objInspector = constantDesc.getWritableObjectInspector();
-
-    // Reject any clauses that are against a column that isn't the rowId mapping or indexed
+    // Reject any clauses that are against a column that isn't the rowId mapping
     if (!this.hiveRowIdColumnName.equals(columnDesc.getColumn())) {
-      if (this.indexScanner != null && this.indexScanner.isIndexed(columnDesc.getColumn())) {
-        return getIndexedRowIds(genericUdf, leftHandNode, columnDesc.getColumn(), objInspector);
-      }
       return null;
     }
 
-    Text constText = getConstantText(objInspector);
-
-    return getRange(genericUdf, leftHandNode, constText);
-  }
-
-  private Range getRange(GenericUDF genericUdf, ExprNodeDesc leftHandNode, Text constText) {
-    Class<? extends CompareOp> opClz;
-    try {
-      opClz = predicateHandler.getCompareOpClass(genericUdf.getUdfName());
-    } catch (NoSuchCompareOpException e) {
-      throw new IllegalArgumentException("Unhandled UDF class: " + genericUdf.getUdfName());
-    }
-
-    if (leftHandNode instanceof ExprNodeConstantDesc) {
-      return getConstantOpColumnRange(opClz, constText);
-    } else if (leftHandNode instanceof ExprNodeColumnDesc) {
-      return getColumnOpConstantRange(opClz, constText);
-    } else {
-      throw new IllegalStateException("Expected column or constant on LHS of expression");
-    }
-  }
+    ConstantObjectInspector objInspector = constantDesc.getWritableObjectInspector();
 
-  private Text getConstantText(ConstantObjectInspector objInspector) throws SemanticException {
     Text constText;
     switch (rowIdMapping.getEncoding()) {
       case STRING:
@@ -301,7 +257,21 @@ public class AccumuloRangeGenerator implements NodeProcessor {
         throw new SemanticException("Unable to parse unknown encoding: "
             + rowIdMapping.getEncoding());
     }
-    return constText;
+
+    Class<? extends CompareOp> opClz;
+    try {
+      opClz = predicateHandler.getCompareOpClass(genericUdf.getUdfName());
+    } catch (NoSuchCompareOpException e) {
+      throw new IllegalArgumentException("Unhandled UDF class: " + genericUdf.getUdfName());
+    }
+
+    if (leftHandNode instanceof ExprNodeConstantDesc) {
+      return getConstantOpColumnRange(opClz, constText);
+    } else if (leftHandNode instanceof ExprNodeColumnDesc) {
+      return getColumnOpConstantRange(opClz, constText);
+    } else {
+      throw new IllegalStateException("Expected column or constant on LHS of expression");
+    }
   }
 
   protected Range getConstantOpColumnRange(Class<? extends CompareOp> opClz, Text constText) {
@@ -341,21 +311,6 @@ public class AccumuloRangeGenerator implements NodeProcessor {
     }
   }
 
-
-  protected Object getIndexedRowIds(GenericUDF genericUdf, ExprNodeDesc leftHandNode,
-                                    String columnName, ConstantObjectInspector objInspector)
-      throws SemanticException {
-    Text constText = getConstantText(objInspector);
-    byte[] value = constText.toString().getBytes(UTF_8);
-    byte[] encoded = AccumuloIndexLexicoder.encodeValue(value, leftHandNode.getTypeString(), true);
-    Range range = getRange(genericUdf, leftHandNode, new Text(encoded));
-    if (indexScanner != null) {
-      return indexScanner.getIndexRowRanges(columnName, range);
-    }
-    return null;
-  }
-
-
   protected Text getUtf8Value(ConstantObjectInspector objInspector) {
     // TODO is there a more correct way to get the literal value for the Object?
     return new Text(objInspector.getWritableConstantValue().toString());
@@ -372,7 +327,7 @@ public class AccumuloRangeGenerator implements NodeProcessor {
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     if (objInspector instanceof PrimitiveObjectInspector) {
       LazyUtils.writePrimitive(out, objInspector.getWritableConstantValue(),
-          (PrimitiveObjectInspector) objInspector);
+        (PrimitiveObjectInspector) objInspector);
     } else {
       return getUtf8Value(objInspector);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java
index 5121ea3..17d5529 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -37,6 +37,7 @@ import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloColumnMapping;
 import org.apache.hadoop.hive.accumulo.predicate.compare.CompareOp;
 import org.apache.hadoop.hive.accumulo.predicate.compare.PrimitiveComparison;
 import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.io.Text;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -53,7 +54,7 @@ import com.google.common.collect.Lists;
  */
 public class PrimitiveComparisonFilter extends WholeRowIterator {
   @SuppressWarnings("unused")
-  private static final Logger LOG = LoggerFactory.getLogger(PrimitiveComparisonFilter.class);
+  private static final Logger log = LoggerFactory.getLogger(PrimitiveComparisonFilter.class);
 
   public static final String FILTER_PREFIX = "accumulo.filter.compare.iterator.";
   public static final String P_COMPARE_CLASS = "accumulo.filter.iterator.p.compare.class";
@@ -67,7 +68,7 @@ public class PrimitiveComparisonFilter extends WholeRowIterator {
 
   @Override
   protected boolean filter(Text currentRow, List<Key> keys, List<Value> values) {
-    SortedMap<Key, Value> items;
+    SortedMap<Key,Value> items;
     boolean allow;
     try { // if key doesn't contain CF, it's an encoded value from a previous iterator.
       while (keys.get(0).getColumnFamily().getBytes().length == 0) {
@@ -102,11 +103,11 @@ public class PrimitiveComparisonFilter extends WholeRowIterator {
   }
 
   @Override
-  public void init(SortedKeyValueIterator<Key, Value> source, Map<String, String> options,
+  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
       IteratorEnvironment env) throws IOException {
     super.init(source, options, env);
     String serializedColumnMapping = options.get(COLUMN);
-    Entry<String, String> pair = ColumnMappingFactory.parseMapping(serializedColumnMapping);
+    Entry<String,String> pair = ColumnMappingFactory.parseMapping(serializedColumnMapping);
 
     // The ColumnEncoding, column name and type are all irrelevant at this point, just need the
     // cf:[cq]
@@ -134,7 +135,7 @@ public class PrimitiveComparisonFilter extends WholeRowIterator {
     }
   }
 
-  protected byte[] getConstant(Map<String, String> options) {
+  protected byte[] getConstant(Map<String,String> options) {
     String b64Const = options.get(CONST_VAL);
     return Base64.decodeBase64(b64Const.getBytes());
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloIndexParameters.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloIndexParameters.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloIndexParameters.java
deleted file mode 100644
index d295c7b..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloIndexParameters.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.accumulo.serde;
-
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.accumulo.AccumuloDefaultIndexScanner;
-import org.apache.hadoop.hive.accumulo.AccumuloIndexScanner;
-import org.apache.hadoop.hive.accumulo.AccumuloIndexScannerException;
-
-import java.nio.charset.StandardCharsets;
-import java.util.HashSet;
-import java.util.Set;
-
-
-/**
- * Accumulo Index Parameters for Hive tables.
- */
-public class AccumuloIndexParameters {
-  public static final int DEFAULT_MAX_ROWIDS = 20000;
-  public static final String INDEX_SCANNER = "accumulo.index.scanner";
-  public static final String MAX_INDEX_ROWS = "accumulo.index.rows.max";
-  public static final String INDEXED_COLUMNS = "accumulo.indexed.columns";
-  public static final String INDEXTABLE_NAME = "accumulo.indextable.name";
-  private static final Set<String> EMPTY_SET = new HashSet<String>();
-  private Configuration conf;
-
-  public AccumuloIndexParameters(Configuration conf) {
-    this.conf = conf;
-  }
-
-  public String getIndexTable() {
-    return this.conf.get(INDEXTABLE_NAME);
-  }
-
-  public int getMaxIndexRows() {
-    return this.conf.getInt(MAX_INDEX_ROWS, DEFAULT_MAX_ROWIDS);
-  }
-
-  public final Set<String> getIndexColumns() {
-    String colmap = conf.get(INDEXED_COLUMNS);
-    if (colmap != null) {
-      Set<String> cols = new HashSet<String>();
-        for (String col : colmap.split(",")) {
-          cols.add(col.trim());
-        }
-        return cols;
-    }
-    return EMPTY_SET;
-  }
-
-
-  public final Authorizations getTableAuths() {
-    String auths = conf.get(AccumuloSerDeParameters.AUTHORIZATIONS_KEY);
-    if (auths != null && !auths.isEmpty()) {
-      return new Authorizations(auths.trim().getBytes(StandardCharsets.UTF_8));
-    }
-    return new Authorizations();
-  }
-
-  public Configuration getConf() {
-    return conf;
-  }
-
-  public final AccumuloIndexScanner createScanner() throws AccumuloIndexScannerException {
-    AccumuloIndexScanner handler;
-
-    String classname = conf.get(INDEX_SCANNER);
-    if (classname != null) {
-      try {
-        handler = (AccumuloIndexScanner) Class.forName(classname).newInstance();
-      } catch (ClassCastException | InstantiationException |  IllegalAccessException
-          | ClassNotFoundException e) {
-        throw new AccumuloIndexScannerException("Cannot use index scanner class: " + classname, e);
-      }
-    } else {
-      handler = new AccumuloDefaultIndexScanner();
-    }
-    if (handler != null) {
-      handler.init(conf);
-    }
-    return handler;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDeParameters.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDeParameters.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDeParameters.java
index ef454f0..09c5f24 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDeParameters.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDeParameters.java
@@ -17,11 +17,9 @@
 package org.apache.hadoop.hive.accumulo.serde;
 
 import java.util.Collections;
-import java.util.HashSet;
 import java.util.List;
 import java.util.NoSuchElementException;
 import java.util.Properties;
-import java.util.Set;
 
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
@@ -60,21 +58,12 @@ public class AccumuloSerDeParameters extends AccumuloConnectionParameters {
 
   public static final String COMPOSITE_ROWID_FACTORY = "accumulo.composite.rowid.factory";
   public static final String COMPOSITE_ROWID_CLASS = "accumulo.composite.rowid";
-  public static final int DEFAULT_MAX_ROWIDS = 20000;
-  public static final String INDEX_SCANNER = "accumulo.index.scanner";
-  public static final String MAX_INDEX_ROWS = "accumulo.index.rows.max";
-  public static final String INDEXED_COLUMNS = "accumulo.indexed.columns";
-  public static final String INDEXTABLE_NAME = "accumulo.indextable.name";
-  private static final Set<String> EMPTY_SET = new HashSet<String>();
-
-
 
   protected final ColumnMapper columnMapper;
 
   private Properties tableProperties;
   private String serdeName;
   private LazySerDeParameters lazySerDeParameters;
-  private AccumuloIndexParameters indexParams;
   private AccumuloRowIdFactory rowIdFactory;
 
   public AccumuloSerDeParameters(Configuration conf, Properties tableProperties, String serdeName)
@@ -84,7 +73,6 @@ public class AccumuloSerDeParameters extends AccumuloConnectionParameters {
     this.serdeName = serdeName;
 
     lazySerDeParameters = new LazySerDeParameters(conf, tableProperties, serdeName);
-    indexParams = new AccumuloIndexParameters(conf);
 
     // The default encoding for this table when not otherwise specified
     String defaultStorage = tableProperties.getProperty(DEFAULT_STORAGE_TYPE);
@@ -147,17 +135,10 @@ public class AccumuloSerDeParameters extends AccumuloConnectionParameters {
     return new DefaultAccumuloRowIdFactory();
   }
 
-  public AccumuloIndexParameters getIndexParams() {
-    return indexParams;
-  }
-
   public LazySerDeParameters getSerDeParameters() {
-
     return lazySerDeParameters;
   }
 
-
-
   public Properties getTableProperties() {
     return tableProperties;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/package-info.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/package-info.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/package-info.java
deleted file mode 100644
index 7311e87..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/package-info.java
+++ /dev/null
@@ -1,4 +0,0 @@
-/**
- * accumulo serde classes
- */
-package org.apache.hadoop.hive.accumulo.serde;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloDefaultIndexScanner.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloDefaultIndexScanner.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloDefaultIndexScanner.java
deleted file mode 100644
index 7d6cc0e..0000000
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloDefaultIndexScanner.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.accumulo;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.mock.MockInstance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloIndexParameters;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-public class TestAccumuloDefaultIndexScanner {
-  private static final Logger LOG = LoggerFactory.getLogger(TestAccumuloDefaultIndexScanner.class);
-  private static final Value EMPTY_VALUE = new Value();
-
-  private static void addRow(BatchWriter writer, String rowId, String cf, String cq) throws MutationsRejectedException {
-    Mutation mut = new Mutation(rowId);
-    mut.put(new Text(cf), new Text(cq), EMPTY_VALUE);
-    writer.addMutation(mut);
-  }
-
-  private static void addRow(BatchWriter writer, Integer rowId, String cf, String cq) throws MutationsRejectedException {
-    Mutation mut = new Mutation(AccumuloIndexLexicoder.encodeValue(String.valueOf(rowId).getBytes(), "int", true));
-    mut.put(new Text(cf), new Text(cq), EMPTY_VALUE);
-    writer.addMutation(mut);
-  }
-
-  private static void addRow(BatchWriter writer, boolean rowId, String cf, String cq) throws MutationsRejectedException {
-    Mutation mut = new Mutation(String.valueOf(rowId));
-    mut.put(new Text(cf), new Text(cq), EMPTY_VALUE);
-    writer.addMutation(mut);
-  }
-
-  public static AccumuloDefaultIndexScanner buildMockHandler(int maxMatches) {
-    try {
-      String table = "table";
-      Text emptyText = new Text("");
-      Configuration conf = new Configuration();
-      conf.set(AccumuloIndexParameters.INDEXTABLE_NAME, table);
-      conf.setInt(AccumuloIndexParameters.MAX_INDEX_ROWS, maxMatches);
-      conf.set(AccumuloIndexParameters.INDEXED_COLUMNS, "*");
-      conf.set(serdeConstants.LIST_COLUMNS, "rid,name,age,cars,mgr");
-      conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowId,name:name,age:age,cars:cars,mgr:mgr");
-      AccumuloDefaultIndexScanner handler = new AccumuloDefaultIndexScanner();
-      handler.init(conf);
-
-      MockInstance inst = new MockInstance("test_instance");
-      Connector conn = inst.getConnector("root", new PasswordToken(""));
-      if (!conn.tableOperations().exists(table)) {
-        conn.tableOperations().create(table);
-        BatchWriterConfig batchConfig = new BatchWriterConfig();
-        BatchWriter writer = conn.createBatchWriter(table, batchConfig);
-        addRow(writer, "fred", "name_name", "row1");
-        addRow(writer, "25", "age_age", "row1");
-        addRow(writer, 5, "cars_cars", "row1");
-        addRow(writer, true, "mgr_mgr", "row1");
-        addRow(writer, "bill", "name_name", "row2");
-        addRow(writer, "20", "age_age", "row2");
-        addRow(writer, 2, "cars_cars", "row2");
-        addRow(writer, false, "mgr_mgr", "row2");
-        addRow(writer, "sally", "name_name", "row3");
-        addRow(writer, "23", "age_age", "row3");
-        addRow(writer, 6, "cars_cars", "row3");
-        addRow(writer, true, "mgr_mgr", "row3");
-        addRow(writer, "rob", "name_name", "row4");
-        addRow(writer, "60", "age_age", "row4");
-        addRow(writer, 1, "cars_cars", "row4");
-        addRow(writer, false, "mgr_mgr", "row4");
-        writer.close();
-      }
-      AccumuloConnectionParameters connectionParams = Mockito
-          .mock(AccumuloConnectionParameters.class);
-      AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class);
-
-      Mockito.when(connectionParams.getConnector()).thenReturn(conn);
-      handler.setConnectParams(connectionParams);
-      return handler;
-    } catch (AccumuloSecurityException | AccumuloException | TableExistsException | TableNotFoundException e) {
-      LOG.error(e.getLocalizedMessage(), e);
-    }
-    return null;
-  }
-
-  @Test
-  public void testMatchNone() {
-    AccumuloDefaultIndexScanner handler = buildMockHandler(10);
-    List<Range> ranges = handler.getIndexRowRanges("name", new Range("mike"));
-    assertEquals(0, ranges.size());
-  }
-
-  @Test
-  public void testMatchRange() {
-    AccumuloDefaultIndexScanner handler = buildMockHandler(10);
-    List<Range> ranges = handler.getIndexRowRanges("age", new Range("10", "50"));
-    assertEquals(3, ranges.size());
-    assertTrue("does not contain row1", ranges.contains(new Range("row1")));
-    assertTrue("does not contain row2", ranges.contains(new Range("row2")));
-    assertTrue("does not contain row3", ranges.contains(new Range("row3")));
-  }
-
-  public void testTooManyMatches() {
-    AccumuloDefaultIndexScanner handler = buildMockHandler(2);
-    List<Range> ranges = handler.getIndexRowRanges("age", new Range("10", "50"));
-    assertNull("ranges should be null", ranges);
-  }
-
-  @Test
-  public void testMatchExact() {
-    AccumuloDefaultIndexScanner handler = buildMockHandler(10);
-    List<Range> ranges = handler.getIndexRowRanges("age", new Range("20"));
-    assertEquals(1, ranges.size());
-    assertTrue("does not contain row2", ranges.contains(new Range("row2")));
-  }
-
-  @Test
-  public void testValidIndex() {
-    Configuration conf = new Configuration();
-    conf.set(AccumuloIndexParameters.INDEXED_COLUMNS, "name,age,phone,email");
-    conf.set(AccumuloIndexParameters.INDEXTABLE_NAME, "contact");
-    AccumuloDefaultIndexScanner handler = new AccumuloDefaultIndexScanner();
-    handler.init(conf);
-    assertTrue("name is not identified as an index", handler.isIndexed("name"));
-    assertTrue("age is not identified as an index", handler.isIndexed("age"));
-    assertTrue("phone is not identified as an index", handler.isIndexed("phone"));
-    assertTrue("email is not identified as an index", handler.isIndexed("email"));
-  }
-
-  @Test
-  public void testInvalidIndex() {
-    Configuration conf = new Configuration();
-    conf.set(AccumuloIndexParameters.INDEXED_COLUMNS, "name,age,phone,email");
-    conf.set(AccumuloIndexParameters.INDEXTABLE_NAME, "contact");
-    AccumuloDefaultIndexScanner handler = new AccumuloDefaultIndexScanner();
-    handler.init(conf);
-    assertFalse("mobile is identified as an index", handler.isIndexed("mobile"));
-    assertFalse("mail is identified as an index", handler.isIndexed("mail"));
-  }
-
-
-  @Test
-  public void testMissingTable() {
-    Configuration conf = new Configuration();
-    conf.set(AccumuloIndexParameters.INDEXED_COLUMNS, "name,age,phone,email");
-    AccumuloDefaultIndexScanner handler = new AccumuloDefaultIndexScanner();
-    handler.init(conf);
-    assertFalse("name is identified as an index", handler.isIndexed("name"));
-    assertFalse("age is identified as an index", handler.isIndexed("age"));
-  }
-
-  @Test
-  public void testWildcardIndex() {
-    Configuration conf = new Configuration();
-    conf.set(AccumuloIndexParameters.INDEXED_COLUMNS, "*");
-    conf.set(AccumuloIndexParameters.INDEXTABLE_NAME, "contact");
-    AccumuloDefaultIndexScanner handler = new AccumuloDefaultIndexScanner();
-    handler.init(conf);
-    assertTrue("name is not identified as an index", handler.isIndexed("name"));
-    assertTrue("age is not identified as an index", handler.isIndexed("age"));
-  }
-
-  @Test
-  public void testNullIndex() {
-    Configuration conf = new Configuration();
-    conf.set(AccumuloIndexParameters.INDEXTABLE_NAME, "contact");
-    AccumuloDefaultIndexScanner handler = new AccumuloDefaultIndexScanner();
-    handler.init(conf);
-    assertTrue("name is not identified as an index", handler.isIndexed("name"));
-  }
-
-  @Test
-  public void testEmptyIndex() {
-    Configuration conf = new Configuration();
-    conf.set(AccumuloIndexParameters.INDEXED_COLUMNS, "");
-    conf.set(AccumuloIndexParameters.INDEXTABLE_NAME, "contact");
-    AccumuloDefaultIndexScanner handler = new AccumuloDefaultIndexScanner();
-    handler.init(conf);
-    assertFalse("name is identified as an index", handler.isIndexed("name"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexLexicoder.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexLexicoder.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexLexicoder.java
deleted file mode 100644
index b19f10e..0000000
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexLexicoder.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.accumulo;
-
-import org.apache.accumulo.core.client.lexicoder.BigIntegerLexicoder;
-import org.apache.accumulo.core.client.lexicoder.DoubleLexicoder;
-import org.apache.accumulo.core.client.lexicoder.IntegerLexicoder;
-import org.apache.accumulo.core.client.lexicoder.LongLexicoder;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.math.BigInteger;
-import java.nio.ByteBuffer;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-
-/**
- *
- */
-public class TestAccumuloIndexLexicoder {
-
-  @Test
-  public void testBooleanString() {
-    byte[] value = Boolean.TRUE.toString().getBytes(UTF_8);
-    assertArrayEquals(AccumuloIndexLexicoder.encodeValue(value, serdeConstants.BOOLEAN_TYPE_NAME,
-        true), value);
-  }
-
-  @Test
-  public void testBooleanBinary() {
-    byte[] value = new byte[] { 1 };
-    assertArrayEquals(AccumuloIndexLexicoder.encodeValue(value, serdeConstants.BOOLEAN_TYPE_NAME,
-        false), Boolean.TRUE.toString().getBytes(UTF_8));
-  }
-
-  @Test
-  public void testIntString() {
-    byte[] value = "10".getBytes(UTF_8);
-    byte[] encoded = new IntegerLexicoder().encode(10);
-
-    byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.INT_TYPE_NAME, true);
-    assertArrayEquals(lex, encoded);
-
-    lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.SMALLINT_TYPE_NAME, true);
-    assertArrayEquals(lex, encoded);
-
-    lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.TINYINT_TYPE_NAME, true);
-    assertArrayEquals(lex, encoded);
-  }
-
-  @Test
-  public void testIntBinary() {
-    byte[] value = ByteBuffer.allocate(4).putInt(10).array();
-    byte[] encoded = new IntegerLexicoder().encode(10);
-
-    byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.INT_TYPE_NAME, false);
-    assertArrayEquals(lex, encoded);
-
-    value = ByteBuffer.allocate(2).putShort((short) 10).array();
-    lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.SMALLINT_TYPE_NAME, false);
-    assertArrayEquals(lex, encoded);
-
-    value = ByteBuffer.allocate(1).put((byte)10).array();
-    lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.TINYINT_TYPE_NAME, false);
-    assertArrayEquals(lex, encoded);
-  }
-
-  @Test
-  public void testFloatBinary() {
-    byte[] value = ByteBuffer.allocate(4).putFloat(10.55f).array();
-    byte[] encoded = new DoubleLexicoder().encode((double)10.55f);
-    String val = new String(encoded);
-
-    byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.FLOAT_TYPE_NAME, false);
-    assertArrayEquals(lex, encoded);
-
-    value = ByteBuffer.allocate(8).putDouble(10.55).array();
-    encoded = new DoubleLexicoder().encode(10.55);
-    lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.DOUBLE_TYPE_NAME, false);
-    assertArrayEquals(lex, encoded);
-  }
-
-  @Test
-  public void testFloatString() {
-    byte[] value = "10.55".getBytes(UTF_8);
-    byte[] encoded = new DoubleLexicoder().encode(10.55);
-
-    byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.FLOAT_TYPE_NAME, true);
-    assertArrayEquals(lex, encoded);
-
-    lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.DOUBLE_TYPE_NAME, true);
-    assertArrayEquals(lex, encoded);
-  }
-
-  @Test
-  public void testBigIntBinary() {
-    byte[] value = new String("1232322323").getBytes(UTF_8);
-    byte[] encoded = new BigIntegerLexicoder().encode(new BigInteger("1232322323", 10));
-
-    byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.BIGINT_TYPE_NAME, true);
-    assertArrayEquals(lex, encoded);
-
-    value = new BigInteger( "1232322323", 10 ).toByteArray();
-    encoded = new BigIntegerLexicoder().encode(new BigInteger("1232322323", 10 ));
-    lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.BIGINT_TYPE_NAME, false);
-    assertArrayEquals(lex, encoded);
-  }
-
-  @Test
-  public void testDecimalString() {
-    String strVal = "12323232233434";
-    byte[] value = strVal.getBytes(UTF_8);
-    byte[] encoded = strVal.getBytes(UTF_8);
-
-    byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.DECIMAL_TYPE_NAME, true);
-    assertArrayEquals(lex, encoded);
-
-
-    lex = AccumuloIndexLexicoder.encodeValue(value, "DECIMAL (10,3)", true);
-    assertArrayEquals(lex, encoded);
-  }
-
-  @Test
-  public void testDecimalBinary() {
-    byte[] value = new BigInteger("12323232233434", 10).toString().getBytes(UTF_8);
-    byte[] encoded = new String(value).getBytes(UTF_8);
-
-    byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.DECIMAL_TYPE_NAME, false);
-    assertArrayEquals(lex, encoded);
-  }
-
-  @Test
-  public void testDateString() {
-    String date = "2016-02-22";
-    byte[] value = date.getBytes(UTF_8);
-    assertArrayEquals(AccumuloIndexLexicoder.encodeValue(value, serdeConstants.DATE_TYPE_NAME,
-                                                        true), value);
-  }
-
-  @Test
-  public void testDateTimeString() {
-    String timestamp = "2016-02-22 12:12:06.000000005";
-    byte[] value = timestamp.getBytes(UTF_8);
-    assertArrayEquals(AccumuloIndexLexicoder.encodeValue(value, serdeConstants.TIMESTAMP_TYPE_NAME,
-                                                        true), value);
-  }
-
-  @Test
-  public void testString() {
-    String strVal = "The quick brown fox";
-    byte[] value = strVal.getBytes(UTF_8);
-    assertArrayEquals(AccumuloIndexLexicoder.encodeValue(value, serdeConstants.STRING_TYPE_NAME,
-                                                        true), value);
-    assertArrayEquals(AccumuloIndexLexicoder.encodeValue(value, "varChar(20)",
-                                                        true), value);
-    assertArrayEquals(AccumuloIndexLexicoder.encodeValue(value, "CHAR (20)",
-                                                        true), value);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexParameters.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexParameters.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexParameters.java
deleted file mode 100644
index 976fd27..0000000
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexParameters.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.accumulo;
-
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloIndexParameters;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters;
-import org.junit.Test;
-
-import java.util.List;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-public class TestAccumuloIndexParameters {
-
-  public static class MockAccumuloIndexScanner implements AccumuloIndexScanner {
-
-    @Override
-    public void init(Configuration conf) {
-    }
-
-    @Override
-    public boolean isIndexed(String columnName) {
-      return false;
-    }
-
-    @Override
-    public List<Range> getIndexRowRanges(String column, Range indexRange) {
-      return null;
-    }
-  }
-
-  @Test
-  public void testDefaultScanner() {
-    try {
-      AccumuloIndexScanner scanner = new AccumuloIndexParameters(new Configuration()).createScanner();
-      assertTrue(scanner instanceof AccumuloDefaultIndexScanner);
-    } catch (AccumuloIndexScannerException e) {
-      fail("Unexpected exception thrown");
-    }
-  }
-
-  @Test
-  public void testUserHandler() throws AccumuloIndexScannerException {
-    Configuration conf = new Configuration();
-    conf.set(AccumuloIndexParameters.INDEX_SCANNER, MockAccumuloIndexScanner.class.getName());
-    AccumuloIndexScanner scanner = new AccumuloIndexParameters(conf).createScanner();
-    assertTrue(scanner instanceof MockAccumuloIndexScanner);
-  }
-
-  @Test
-  public void testBadHandler() {
-    Configuration conf = new Configuration();
-    conf.set(AccumuloIndexParameters.INDEX_SCANNER, "a.class.does.not.exist.IndexHandler");
-    try {
-      AccumuloIndexScanner scanner = new AccumuloIndexParameters(conf).createScanner();
-    } catch (AccumuloIndexScannerException e) {
-      return;
-    }
-    fail("Failed to throw exception for class not found");
-  }
-
-  @Test
-  public void getIndexColumns() {
-    Configuration conf = new Configuration();
-    conf.set(AccumuloIndexParameters.INDEXED_COLUMNS, "a,b,c");
-    Set<String> cols = new AccumuloIndexParameters(conf).getIndexColumns();
-    assertEquals(3, cols.size());
-    assertTrue("Missing column a", cols.contains("a"));
-    assertTrue("Missing column b", cols.contains("b"));
-    assertTrue("Missing column c", cols.contains("c"));
-  }
-
-  @Test
-  public void getMaxIndexRows() {
-    Configuration conf = new Configuration();
-    conf.setInt(AccumuloIndexParameters.MAX_INDEX_ROWS, 10);
-    int maxRows = new AccumuloIndexParameters(conf).getMaxIndexRows();
-    assertEquals(10, maxRows);
-  }
-
-  @Test
-  public void getAuths() {
-    Configuration conf = new Configuration();
-    conf.set(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "public,open");
-    Authorizations auths = new AccumuloIndexParameters(conf).getTableAuths();
-    assertEquals(2, auths.size());
-    assertTrue("Missing auth public", auths.contains("public"));
-    assertTrue("Missing auth open", auths.contains("open"));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloStorageHandler.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloStorageHandler.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloStorageHandler.java
index 8d195ee..0aaa782 100644
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloStorageHandler.java
+++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloStorageHandler.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.serde.serdeConstants;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
@@ -60,8 +59,6 @@ public class TestAccumuloStorageHandler {
     Map<String,String> jobProperties = new HashMap<String,String>();
 
     props.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq1,cf:cq2,cf:cq3");
-    props.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string:int:string");
-    props.setProperty(serdeConstants.LIST_COLUMNS, "name,age,email");
     props.setProperty(AccumuloSerDeParameters.TABLE_NAME, "table");
     props.setProperty(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY, "foo");
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java
index 0bb50e8..88e4530 100644
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java
+++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java
@@ -488,7 +488,6 @@ public class TestAccumuloPredicateHandler {
         TypeInfoFactory.intTypeInfo, TypeInfoFactory.stringTypeInfo);
     conf.set(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columnNames));
     conf.set(serdeConstants.LIST_COLUMN_TYPES, "string,int,string");
-    conf.set(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE, ColumnEncoding.BINARY.getName());
     String columnMappingStr = "cf:f1,cf:f2,:rowID";
     conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, columnMappingStr);
     columnMapper = new ColumnMapper(columnMappingStr, ColumnEncoding.STRING.getName(), columnNames,
@@ -759,7 +758,7 @@ public class TestAccumuloPredicateHandler {
     String hiveRowIdColumnName = "rid";
 
     Mockito.when(mockHandler.getRanges(conf, columnMapper)).thenCallRealMethod();
-    Mockito.when(mockHandler.generateRanges(conf, columnMapper, hiveRowIdColumnName, root)).thenReturn(null);
+    Mockito.when(mockHandler.generateRanges(columnMapper, hiveRowIdColumnName, root)).thenReturn(null);
     Mockito.when(mockHandler.getExpression(conf)).thenReturn(root);
 
     // A null result from AccumuloRangeGenerator is all ranges
@@ -777,8 +776,7 @@ public class TestAccumuloPredicateHandler {
     String hiveRowIdColumnName = "rid";
 
     Mockito.when(mockHandler.getRanges(conf, columnMapper)).thenCallRealMethod();
-    Mockito.when(mockHandler.generateRanges(conf, columnMapper, hiveRowIdColumnName, root))
-                  .thenReturn(Collections.emptyList());
+    Mockito.when(mockHandler.generateRanges(columnMapper, hiveRowIdColumnName, root)).thenReturn(Collections.emptyList());
     Mockito.when(mockHandler.getExpression(conf)).thenReturn(root);
 
     // A null result from AccumuloRangeGenerator is all ranges
@@ -797,7 +795,7 @@ public class TestAccumuloPredicateHandler {
     Range r = new Range("a");
 
     Mockito.when(mockHandler.getRanges(conf, columnMapper)).thenCallRealMethod();
-    Mockito.when(mockHandler.generateRanges(conf, columnMapper, hiveRowIdColumnName, root)).thenReturn(r);
+    Mockito.when(mockHandler.generateRanges(columnMapper, hiveRowIdColumnName, root)).thenReturn(r);
     Mockito.when(mockHandler.getExpression(conf)).thenReturn(root);
 
     // A null result from AccumuloRangeGenerator is all ranges
@@ -816,8 +814,7 @@ public class TestAccumuloPredicateHandler {
     Range r1 = new Range("a"), r2 = new Range("z");
 
     Mockito.when(mockHandler.getRanges(conf, columnMapper)).thenCallRealMethod();
-    Mockito.when(mockHandler.generateRanges(conf, columnMapper, hiveRowIdColumnName, root))
-                 .thenReturn(Arrays.asList(r1, r2));
+    Mockito.when(mockHandler.generateRanges(columnMapper, hiveRowIdColumnName, root)).thenReturn(Arrays.asList(r1, r2));
     Mockito.when(mockHandler.getExpression(conf)).thenReturn(root);
 
     // A null result from AccumuloRangeGenerator is all ranges


[33/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
index 1a8337f..e344e0f 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
@@ -100,6 +100,22 @@ public class TestOperationLoggingLayout {
       String row = iter.next()[0].toString();
       Assert.assertEquals(true, row.matches("^.*(FATAL|ERROR|WARN|INFO|DEBUG|TRACE).*$"));
     }
+
+    String queryString = "set hive.server2.logging.operation.level=verbose";
+    client.executeStatement(sessionHandle, queryString, null);
+    operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
+    // just check for first few lines, some log lines are multi-line strings which can break format
+    // checks below
+    rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 10,
+        FetchType.LOG);
+    iter = rowSetLog.iterator();
+    // verbose pattern is "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n"
+    while (iter.hasNext()) {
+      String row = iter.next()[0].toString();
+      // just check if the log line starts with date
+      Assert.assertEquals(true,
+          row.matches("^\\d{2}[/](0[1-9]|1[012])[/](0[1-9]|[12][0-9]|3[01]).*$"));
+    }
   }
 
   private SessionHandle setupSession() throws Exception {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/pom.xml
----------------------------------------------------------------------
diff --git a/itests/pom.xml b/itests/pom.xml
index 400075b..e5b54bf 100644
--- a/itests/pom.xml
+++ b/itests/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/qtest-accumulo/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-accumulo/pom.xml b/itests/qtest-accumulo/pom.xml
index b7ce283..31cee36 100644
--- a/itests/qtest-accumulo/pom.xml
+++ b/itests/qtest-accumulo/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/qtest-spark/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-spark/pom.xml b/itests/qtest-spark/pom.xml
index a506f7f..f301504 100644
--- a/itests/qtest-spark/pom.xml
+++ b/itests/qtest-spark/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/qtest/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index 02664f3..1c3b601 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestBeeLineDriver.java
----------------------------------------------------------------------
diff --git a/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestBeeLineDriver.java b/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestBeeLineDriver.java
index 4dd17c1e..24eeb9d 100644
--- a/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestBeeLineDriver.java
+++ b/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestBeeLineDriver.java
@@ -22,15 +22,15 @@ import java.util.List;
 
 import org.apache.hadoop.hive.cli.control.CliAdapter;
 import org.apache.hadoop.hive.cli.control.CliConfigs;
-import org.apache.hive.beeline.Parallelized;
 import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestRule;
 import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-@RunWith(Parallelized.class)
+@RunWith(Parameterized.class)
 public class TestBeeLineDriver {
 
   static CliAdapter adapter = new CliConfigs.BeeLineConfig().getCliAdapter();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index df947bb..dc02fd4 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -71,7 +71,6 @@ minillap.shared.query.files=insert_into1.q,\
   orc_merge3.q,\
   orc_merge4.q,\
   orc_merge_diff_fs.q,\
-  parallel_colstats.q,\
   unionDistinct_1.q,\
   union_type_chk.q,\
   cte_2.q,\
@@ -353,8 +352,6 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\
   vector_reduce_groupby_decimal.q,\
   vector_string_concat.q,\
   vector_struct_in.q,\
-  vector_udf_character_length.q,\
-  vector_udf_octet_length.q,\
   vector_varchar_4.q,\
   vector_varchar_mapjoin1.q,\
   vector_varchar_simple.q,\
@@ -408,7 +405,6 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\
 minillap.query.files=acid_bucket_pruning.q,\
   bucket5.q,\
   bucket6.q,\
-  dynamic_semijoin_user_level.q,\
   except_distinct.q,\
   explainuser_2.q,\
   empty_dir_in_table.q,\
@@ -503,7 +499,6 @@ minillaplocal.query.files=acid_globallimit.q,\
   groupby2.q,\
   hybridgrace_hashjoin_1.q,\
   hybridgrace_hashjoin_2.q,\
-  is_distinct_from.q,\
   infer_bucket_sort_bucketed_table.q,\
   input16_cc.q,\
   insert_dir_distcp.q,\
@@ -515,14 +510,12 @@ minillaplocal.query.files=acid_globallimit.q,\
   join_max_hashtable.q,\
   join_nulls.q,\
   join_nullsafe.q,\
-  join_is_not_distinct_from.q,\
   leftsemijoin_mr.q,\
   limit_join_transpose.q,\
   lineage2.q,\
   lineage3.q,\
   list_bucket_dml_10.q,\
   llap_partitioned.q,\
-  llap_vector_nohybridgrace.q,\
   load_dyn_part5.q,\
   lvj_mapjoin.q,\
   mapjoin_decimal.q,\
@@ -574,7 +567,6 @@ minillaplocal.query.files=acid_globallimit.q,\
   schema_evol_text_vecrow_table.q,\
   selectDistinctStar.q,\
   semijoin.q,\
-  semijoin_hint.q,\
   smb_cache.q,\
   special_character_in_tabnames_1.q,\
   sqlmerge.q,\
@@ -619,26 +611,11 @@ minillaplocal.query.files=acid_globallimit.q,\
   vector_auto_smb_mapjoin_14.q,\
   vector_decimal_2.q,\
   vector_decimal_udf.q,\
-  vector_groupby_cube1.q,\
-  vector_groupby_grouping_id1.q,\
-  vector_groupby_grouping_id2.q,\
-  vector_groupby_grouping_id3.q,\
-  vector_groupby_grouping_sets1.q,\
-  vector_groupby_grouping_sets2.q,\
-  vector_groupby_grouping_sets3.q,\
-  vector_groupby_grouping_sets4.q,\
-  vector_groupby_grouping_sets5.q,\
-  vector_groupby_grouping_sets6.q,\
-  vector_groupby_grouping_sets_grouping.q,\
-  vector_groupby_grouping_sets_limit.q,\
-  vector_groupby_grouping_window.q,\
-  vector_groupby_rollup1.q,\
   vector_join30.q,\
   vector_join_filters.q,\
   vector_leftsemi_mapjoin.q,\
   vector_number_compare_projection.q,\
   vector_partitioned_date_time.q,\
-  vector_ptf_part_simple.q,\
   vector_udf1.q,\
   vectorization_short_regress.q,\
   vectorized_dynamic_partition_pruning.q,\
@@ -748,22 +725,10 @@ encrypted.query.files=encryption_join_unencrypted_tbl.q,\
   encryption_drop_view.q \
   encryption_drop_partition.q \
   encryption_with_trash.q \
-  encryption_ctas.q \
-  encryption_auto_purge_tables.q \
-  encryption_drop_table_in_encrypted_db.q
+  encryption_ctas.q
 
 beeline.positive.include=drop_with_concurrency.q,\
-  escape_comments.q,\
-  smb_mapjoin_1.q,\
-  smb_mapjoin_10.q,\
-  smb_mapjoin_11.q,\
-  smb_mapjoin_12.q,\
-  smb_mapjoin_13.q,\
-  smb_mapjoin_16.q,\
-  smb_mapjoin_2.q,\
-  smb_mapjoin_3.q,\
-  smb_mapjoin_7.q,\
-  select_dummy_source.q
+  escape_comments.q
 
 minimr.query.negative.files=cluster_tasklog_retrieval.q,\
   file_with_header_footer_negative.q,\
@@ -1371,7 +1336,6 @@ spark.query.files=add_part_multiple.q, \
 spark.only.query.files=spark_combine_equivalent_work.q,\
   spark_dynamic_partition_pruning.q,\
   spark_dynamic_partition_pruning_2.q,\
-  spark_explainuser_1.q,\
   spark_vectorized_dynamic_partition_pruning.q,\
   spark_use_file_size_for_mapjoin.q,\
   spark_use_op_stats.q
@@ -1446,6 +1410,4 @@ spark.query.negative.files=groupby2_map_skew_multi_distinct.q,\
   groupby2_multi_distinct.q,\
   groupby3_map_skew_multi_distinct.q,\
   groupby3_multi_distinct.q,\
-  groupby_grouping_sets7.q,\
-  spark_job_max_tasks.q
-
+  groupby_grouping_sets7.q

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/test-serde/pom.xml
----------------------------------------------------------------------
diff --git a/itests/test-serde/pom.xml b/itests/test-serde/pom.xml
index bf5f5d2..df0ce8a 100644
--- a/itests/test-serde/pom.xml
+++ b/itests/test-serde/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/pom.xml
----------------------------------------------------------------------
diff --git a/itests/util/pom.xml b/itests/util/pom.xml
index a44a55c..3740af8 100644
--- a/itests/util/pom.xml
+++ b/itests/util/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
index 02abe53..9c97c31 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hive.cli.control.AbstractCliConfig.MetastoreType;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveVariableSource;
 import org.apache.hadoop.hive.conf.VariableSubstitution;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.junit.After;
@@ -140,14 +139,12 @@ public abstract class AbstractCoreBlobstoreCliDriver extends CliAdapter {
       if ((ecode == 0) ^ expectSuccess) {
         qt.failed(ecode, fname, debugHint);
       }
-      QTestProcessExecResult result = qt.checkCliDriverResults(fname);
-      if (result.getReturnCode() != 0) {
-        String message = Strings.isNullOrEmpty(result.getCapturedOutput()) ?
-            debugHint : "\r\n" + result.getCapturedOutput();
-        qt.failedDiff(result.getReturnCode(), fname, message);
+      ecode = qt.checkCliDriverResults(fname);
+      if (ecode != 0) {
+        qt.failedDiff(ecode, fname, debugHint);
       }
     }
-    catch (Exception e) {
+    catch (Throwable e) {
       qt.failed(e, fname, debugHint);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
index 1457db0..67064b8 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
@@ -287,7 +287,6 @@ public class CliConfigs {
 
         excludesFrom(testConfigProps, "minimr.query.negative.files");
         excludeQuery("authorization_uri_import.q");
-        excludeQuery("spark_job_max_tasks.q");
 
         setResultsDir("ql/src/test/results/clientnegative");
         setLogDir("itests/qtest/target/qfile-results/clientnegative");

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
index 73e5632..3e4b373 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
@@ -18,10 +18,8 @@
 package org.apache.hadoop.hive.cli.control;
 
 import static org.junit.Assert.assertTrue;
-
 import org.apache.hadoop.hive.accumulo.AccumuloQTestUtil;
 import org.apache.hadoop.hive.accumulo.AccumuloTestSetup;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -94,13 +92,13 @@ public class CoreAccumuloCliDriver extends CliAdapter {
         qt.failed(ecode, fname, null);
       }
 
-      QTestProcessExecResult result = qt.checkCliDriverResults(fname);
-      if (result.getReturnCode() != 0) {
-        qt.failedDiff(result.getReturnCode(), fname, result.getCapturedOutput());
+      ecode = qt.checkCliDriverResults(fname);
+      if (ecode != 0) {
+        qt.failedDiff(ecode, fname, null);
       }
       qt.clearPostTestEffects();
 
-    } catch (Exception e) {
+    } catch (Throwable e) {
       qt.failed(e, fname, null);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
index 8c7057c..acc02eb 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
@@ -17,23 +17,21 @@
  */
 package org.apache.hadoop.hive.cli.control;
 
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import com.google.common.base.Strings;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.hooks.PreExecutePrinter;
-import org.apache.hive.beeline.QFile;
-import org.apache.hive.beeline.QFile.QFileBuilder;
-import org.apache.hive.beeline.QFileBeeLineClient;
-import org.apache.hive.beeline.QFileBeeLineClient.QFileClientBuilder;
+import org.apache.hive.beeline.qfile.QFile;
+import org.apache.hive.beeline.qfile.QFile.QFileBuilder;
+import org.apache.hive.beeline.qfile.QFileBeeLineClient;
+import org.apache.hive.beeline.qfile.QFileBeeLineClient.QFileClientBuilder;
 import org.apache.hive.jdbc.miniHS2.MiniHS2;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
 import java.io.File;
 import java.io.IOException;
-import java.sql.SQLException;
 import java.util.HashMap;
 
 public class CoreBeeLineDriver extends CliAdapter {
@@ -46,7 +44,6 @@ public class CoreBeeLineDriver extends CliAdapter {
   private final File testDataDirectory;
   private final File testScriptDirectory;
   private boolean overwrite = false;
-  private boolean rewriteSourceTables = true;
   private MiniHS2 miniHS2;
   private QFileClientBuilder clientBuilder;
   private QFileBuilder fileBuilder;
@@ -71,10 +68,6 @@ public class CoreBeeLineDriver extends CliAdapter {
     if (testOutputOverwrite != null && "true".equalsIgnoreCase(testOutputOverwrite)) {
       overwrite = true;
     }
-    String testRewriteSourceTables = System.getProperty("test.rewrite.source.tables");
-    if (testRewriteSourceTables != null && "false".equalsIgnoreCase(testRewriteSourceTables)) {
-      rewriteSourceTables = false;
-    }
 
     HiveConf hiveConf = new HiveConf();
     // We do not need Zookeeper at the moment
@@ -99,17 +92,19 @@ public class CoreBeeLineDriver extends CliAdapter {
         .setPassword("password");
 
     fileBuilder = new QFileBuilder()
+        .setHiveRootDirectory(hiveRootDirectory)
         .setLogDirectory(logDirectory)
         .setQueryDirectory(queryDirectory)
         .setResultsDirectory(resultsDirectory)
-        .setRewriteSourceTables(rewriteSourceTables);
+        .setScratchDirectoryString(hiveConf.getVar(HiveConf.ConfVars.SCRATCHDIR))
+        .setWarehouseDirectoryString(hiveConf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE));
 
     runInfraScript(initScript, new File(logDirectory, "init.beeline"),
         new File(logDirectory, "init.raw"));
   }
 
   protected void runInfraScript(File script, File beeLineOutput, File log)
-      throws IOException, SQLException {
+      throws IOException {
     try (QFileBeeLineClient beeLineClient = clientBuilder.getClient(beeLineOutput)) {
       beeLineClient.execute(
           new String[]{
@@ -119,9 +114,6 @@ public class CoreBeeLineDriver extends CliAdapter {
             "!run " + script,
           },
           log);
-    } catch (Exception e) {
-      throw new SQLException("Error running infra script: " + script
-          + "\nCheck the following logs for details:\n - " + beeLineOutput + "\n - " + log, e);
     }
   }
 
@@ -142,41 +134,28 @@ public class CoreBeeLineDriver extends CliAdapter {
     try (QFileBeeLineClient beeLineClient = clientBuilder.getClient(qFile.getLogFile())) {
       long startTime = System.currentTimeMillis();
       System.err.println(">>> STARTED " + qFile.getName());
+      assertTrue("QFile execution failed, see logs for details", beeLineClient.execute(qFile));
 
-      beeLineClient.execute(qFile);
-
-      long queryEndTime = System.currentTimeMillis();
-      System.err.println(">>> EXECUTED " + qFile.getName() + ": " + (queryEndTime - startTime)
-          + "ms");
+      long endTime = System.currentTimeMillis();
+      System.err.println(">>> EXECUTED " + qFile.getName() + ":" + (endTime - startTime) / 1000
+          + "s");
 
       qFile.filterOutput();
       long filterEndTime = System.currentTimeMillis();
-      System.err.println(">>> FILTERED " + qFile.getName() + ": " + (filterEndTime - queryEndTime)
-          + "ms");
+      System.err.println(">>> FILTERED " + qFile.getName() + ":" + (filterEndTime - endTime) / 1000
+          + "s");
 
       if (!overwrite) {
-        QTestProcessExecResult result = qFile.compareResults();
-
-        long compareEndTime = System.currentTimeMillis();
-        System.err.println(">>> COMPARED " + qFile.getName() + ": "
-            + (compareEndTime - filterEndTime) + "ms");
-        if (result.getReturnCode() == 0) {
+        if (qFile.compareResults()) {
           System.err.println(">>> PASSED " + qFile.getName());
         } else {
           System.err.println(">>> FAILED " + qFile.getName());
-          String messageText = "Client result comparison failed with error code = "
-              + result.getReturnCode() + " while executing fname=" + qFile.getName() + "\n";
-          String messageBody = Strings.isNullOrEmpty(result.getCapturedOutput()) ?
-              qFile.getDebugHint() : result.getCapturedOutput();
-          fail(messageText + messageBody);
+          fail("Failed diff");
         }
       } else {
         qFile.overwriteResults();
         System.err.println(">>> PASSED " + qFile.getName());
       }
-    } catch (Exception e) {
-      throw new Exception("Exception running or analyzing the results of the query file: " + qFile
-          + "\n" + qFile.getDebugHint(), e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
index d59b650..a735346 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
@@ -23,9 +23,7 @@ import static org.junit.Assert.fail;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Stopwatch;
-import com.google.common.base.Strings;
 import org.apache.hadoop.hive.cli.control.AbstractCliConfig.MetastoreType;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.apache.hadoop.hive.util.ElapsedTimeLoggingWrapper;
@@ -177,15 +175,13 @@ public class CoreCliDriver extends CliAdapter {
         failed = true;
         qt.failed(ecode, fname, debugHint);
       }
-      QTestProcessExecResult result = qt.checkCliDriverResults(fname);
-      if (result.getReturnCode() != 0) {
+      ecode = qt.checkCliDriverResults(fname);
+      if (ecode != 0) {
         failed = true;
-        String message = Strings.isNullOrEmpty(result.getCapturedOutput()) ?
-            debugHint : "\r\n" + result.getCapturedOutput();
-        qt.failedDiff(result.getReturnCode(), fname, message);
+        qt.failedDiff(ecode, fname, debugHint);
       }
     }
-    catch (Exception e) {
+    catch (Throwable e) {
       failed = true;
       qt.failed(e, fname, debugHint);
     } finally {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
index bff81dd..71a02bc 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
@@ -25,8 +25,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import com.google.common.base.Strings;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.junit.After;
@@ -145,14 +143,12 @@ public class CoreCompareCliDriver extends CliAdapter{
         }
       }
 
-      QTestProcessExecResult result = qt.checkCompareCliDriverResults(fname, outputs);
-      if (result.getReturnCode() != 0) {
-        String message = Strings.isNullOrEmpty(result.getCapturedOutput()) ?
-            debugHint : "\r\n" + result.getCapturedOutput();
-        qt.failedDiff(result.getReturnCode(), fname, message);
+      ecode = qt.checkCompareCliDriverResults(fname, outputs);
+      if (ecode != 0) {
+        qt.failedDiff(ecode, fname, debugHint);
       }
     }
-    catch (Exception e) {
+    catch (Throwable e) {
       qt.failed(e, fname, debugHint);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
index aa0b071..956a42d 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.fail;
 
 import org.apache.hadoop.hive.hbase.HBaseQTestUtil;
 import org.apache.hadoop.hive.hbase.HBaseTestSetup;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -121,12 +120,12 @@ public class CoreHBaseCliDriver extends CliAdapter {
         qt.failed(ecode, fname, null);
       }
 
-      QTestProcessExecResult result = qt.checkCliDriverResults(fname);
-      if (result.getReturnCode() != 0) {
-        qt.failedDiff(result.getReturnCode(), fname, result.getCapturedOutput());
+      ecode = qt.checkCliDriverResults(fname);
+      if (ecode != 0) {
+        qt.failedDiff(ecode, fname, null);
       }
 
-    } catch (Exception e) {
+    } catch (Throwable e) {
       qt.failed(e, fname, null);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
index 8320a80..6225180 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.fail;
 
 import org.apache.hadoop.hive.hbase.HBaseQTestUtil;
 import org.apache.hadoop.hive.hbase.HBaseTestSetup;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -102,13 +101,13 @@ public class CoreHBaseNegativeCliDriver extends CliAdapter {
         qt.failed(fname, null);
       }
 
-      QTestProcessExecResult result = qt.checkCliDriverResults(fname);
-      if (result.getReturnCode() != 0) {
-        qt.failedDiff(result.getReturnCode(), fname, result.getCapturedOutput());
+      ecode = qt.checkCliDriverResults(fname);
+      if (ecode != 0) {
+        qt.failedDiff(ecode, fname, null);
       }
       qt.clearPostTestEffects();
 
-    } catch (Exception e) {
+    } catch (Throwable e) {
       qt.failed(e, fname, null);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
index 438a61e..65b2ce7 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.cli.control;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import com.google.common.base.Strings;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.junit.After;
@@ -125,14 +123,12 @@ public class CoreNegativeCliDriver extends CliAdapter{
         qt.failed(fname, debugHint);
       }
 
-      QTestProcessExecResult result = qt.checkCliDriverResults(fname);
-      if (result.getReturnCode() != 0) {
-        String message = Strings.isNullOrEmpty(result.getCapturedOutput()) ?
-            debugHint : "\r\n" + result.getCapturedOutput();
-        qt.failedDiff(result.getReturnCode(), fname, message);
+      ecode = qt.checkCliDriverResults(fname);
+      if (ecode != 0) {
+        qt.failedDiff(ecode, fname, debugHint);
       }
     }
-    catch (Exception e) {
+    catch (Throwable e) {
       qt.failed(e, fname, debugHint);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
index 26b9ce1..8620cde 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
@@ -22,8 +22,6 @@ package org.apache.hadoop.hive.cli.control;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import com.google.common.base.Strings;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.junit.After;
@@ -123,13 +121,11 @@ public class CorePerfCliDriver extends CliAdapter{
       if (ecode != 0) {
         qt.failed(ecode, fname, debugHint);
       }
-      QTestProcessExecResult result = qt.checkCliDriverResults(fname);
-      if (result.getReturnCode() != 0) {
-        String message = Strings.isNullOrEmpty(result.getCapturedOutput()) ?
-            debugHint : "\r\n" + result.getCapturedOutput();
-        qt.failedDiff(result.getReturnCode(), fname, message);
+      ecode = qt.checkCliDriverResults(fname);
+      if (ecode != 0) {
+        qt.failedDiff(ecode, fname, debugHint);
       }
-    } catch (Exception e) {
+    } catch (Throwable e) {
       qt.failed(e, fname, debugHint);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
index 75cce14..6acc7a0 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
@@ -18,8 +18,12 @@
 
 package org.apache.hadoop.hive.ql;
 
+
+import com.google.common.base.Strings;
+import org.apache.commons.lang3.StringUtils;
+
 /**
- * Standard output and return code of a process executed during the qtests.
+ * Standard output and return code of a process executed during the qtests
  */
 public class QTestProcessExecResult {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index b897ffa..e32ee74 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -39,10 +39,6 @@ import java.io.PrintStream;
 import java.io.Serializable;
 import java.io.StringWriter;
 import java.net.URL;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.FileSystems;
-import java.nio.file.Files;
-import java.nio.file.StandardOpenOption;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -55,7 +51,6 @@ import java.util.Collection;
 import java.util.Comparator;
 import java.util.Deque;
 import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -64,22 +59,17 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
-import java.util.stream.Stream;
 
 import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
 
-import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
-import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang3.tuple.Pair;
 import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileStatus;
@@ -128,6 +118,7 @@ import org.apache.hadoop.hive.ql.processors.HiveCommand;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.util.Shell;
 import org.apache.hive.common.util.StreamPrinter;
 import org.apache.logging.log4j.util.Strings;
 import org.apache.tools.ant.BuildException;
@@ -176,7 +167,7 @@ public class QTestUtil {
   private final Set<String> qNoSessionReuseQuerySet;
   private final Set<String> qJavaVersionSpecificOutput;
   private static final String SORT_SUFFIX = ".sorted";
-  private final Set<String> srcTables;
+  private final HashSet<String> srcTables;
   private final Set<String> srcUDFs;
   private final MiniClusterType clusterType;
   private final FsType fsType;
@@ -209,7 +200,7 @@ public class QTestUtil {
   }
   private HBaseTestingUtility utility;
 
-  public static Set<String> getSrcTables() {
+  HashSet<String> getSrcTables() {
     HashSet<String> srcTables = new HashSet<String>();
     // FIXME: moved default value to here...for now
     // i think this features is never really used from the command line
@@ -1404,8 +1395,7 @@ public class QTestUtil {
 
         int rc = response.getResponseCode();
         if (rc != 0) {
-          SessionState.getConsole().printError(response.toString(), response.getException() != null ?
-                  Throwables.getStackTraceAsString(response.getException()) : "");
+          SessionState.get().out.println(response);
         }
 
         return rc;
@@ -1413,7 +1403,7 @@ public class QTestUtil {
         throw new RuntimeException("Could not get CommandProcessor for command: " + commandName);
       }
     } catch (Exception e) {
-      throw new RuntimeException("Could not execute test command", e);
+      throw new RuntimeException("Could not execute test command: " + e.getMessage());
     }
   }
 
@@ -1476,6 +1466,10 @@ public class QTestUtil {
     // Create an instance of hive in order to create the tables
     testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
     db = Hive.get(conf);
+    // Create dest4 to replace dest4_sequencefile
+    LinkedList<String> cols = new LinkedList<String>();
+    cols.add("key");
+    cols.add("value");
 
     // Move all data from dest4_sequencefile to dest4
     drv
@@ -1486,7 +1480,7 @@ public class QTestUtil {
         true, true);
   }
 
-  public QTestProcessExecResult checkNegativeResults(String tname, Exception e) throws Exception {
+  public int checkNegativeResults(String tname, Exception e) throws Exception {
 
     String outFileExtension = getOutFileExtension(tname);
 
@@ -1509,17 +1503,16 @@ public class QTestUtil {
     outfd.write(e.getMessage());
     outfd.close();
 
-    QTestProcessExecResult result = executeDiffCommand(outf.getPath(), expf, false,
+    int exitVal = executeDiffCommand(outf.getPath(), expf, false,
                                      qSortSet.contains(qf.getName()));
-    if (overWrite) {
-      overwriteResults(outf.getPath(), expf);
-      return QTestProcessExecResult.createWithoutOutput(0);
+    if (exitVal != 0 && overWrite) {
+      exitVal = overwriteResults(outf.getPath(), expf);
     }
 
-    return result;
+    return exitVal;
   }
 
-  public QTestProcessExecResult checkParseResults(String tname, ASTNode tree) throws Exception {
+  public int checkParseResults(String tname, ASTNode tree) throws Exception {
 
     if (tree != null) {
       String outFileExtension = getOutFileExtension(tname);
@@ -1535,11 +1528,10 @@ public class QTestUtil {
       outfd.write(tree.toStringTree());
       outfd.close();
 
-      QTestProcessExecResult exitVal = executeDiffCommand(outf.getPath(), expf, false, false);
+      int exitVal = executeDiffCommand(outf.getPath(), expf, false, false);
 
-      if (overWrite) {
-        overwriteResults(outf.getPath(), expf);
-        return QTestProcessExecResult.createWithoutOutput(0);
+      if (exitVal != 0 && overWrite) {
+        exitVal = overwriteResults(outf.getPath(), expf);
       }
 
       return exitVal;
@@ -1708,7 +1700,7 @@ public class QTestUtil {
   });
 
   /* This list may be modified by specific cli drivers to mask strings that change on every test */
-  private final List<Pair<Pattern, String>> patternsWithMaskComments = new ArrayList<Pair<Pattern, String>>() {{
+  private List<Pair<Pattern, String>> patternsWithMaskComments = new ArrayList<Pair<Pattern, String>>() {{
     add(toPatternPair("(pblob|s3.?|swift|wasb.?).*hive-staging.*","### BLOBSTORE_STAGING_PATH ###"));
   }};
 
@@ -1720,7 +1712,7 @@ public class QTestUtil {
     patternsWithMaskComments.add(toPatternPair(patternStr, maskComment));
   }
 
-  public QTestProcessExecResult checkCliDriverResults(String tname) throws Exception {
+  public int checkCliDriverResults(String tname) throws Exception {
     assert(qMap.containsKey(tname));
 
     String outFileExtension = getOutFileExtension(tname);
@@ -1729,71 +1721,69 @@ public class QTestUtil {
     File f = new File(logDir, tname + outFileExtension);
 
     maskPatterns(planMask, f.getPath());
-    QTestProcessExecResult exitVal = executeDiffCommand(f.getPath(),
+    int exitVal = executeDiffCommand(f.getPath(),
                                      outFileName, false,
                                      qSortSet.contains(tname));
 
-    if (overWrite) {
-      overwriteResults(f.getPath(), outFileName);
-      return QTestProcessExecResult.createWithoutOutput(0);
+    if (exitVal != 0 && overWrite) {
+      exitVal = overwriteResults(f.getPath(), outFileName);
     }
 
     return exitVal;
   }
 
 
-  public QTestProcessExecResult checkCompareCliDriverResults(String tname, List<String> outputs)
-      throws Exception {
+  public int checkCompareCliDriverResults(String tname, List<String> outputs) throws Exception {
     assert outputs.size() > 1;
     maskPatterns(planMask, outputs.get(0));
     for (int i = 1; i < outputs.size(); ++i) {
       maskPatterns(planMask, outputs.get(i));
-      QTestProcessExecResult result = executeDiffCommand(
+      int ecode = executeDiffCommand(
           outputs.get(i - 1), outputs.get(i), false, qSortSet.contains(tname));
-      if (result.getReturnCode() != 0) {
+      if (ecode != 0) {
         System.out.println("Files don't match: " + outputs.get(i - 1) + " and " + outputs.get(i));
-        return result;
+        return ecode;
       }
     }
-    return QTestProcessExecResult.createWithoutOutput(0);
+    return 0;
   }
 
-  private static void overwriteResults(String inFileName, String outFileName) throws Exception {
+  private static int overwriteResults(String inFileName, String outFileName) throws Exception {
     // This method can be replaced with Files.copy(source, target, REPLACE_EXISTING)
     // once Hive uses JAVA 7.
     System.out.println("Overwriting results " + inFileName + " to " + outFileName);
-    int result = executeCmd(new String[]{
+    return executeCmd(new String[] {
         "cp",
         getQuotedString(inFileName),
         getQuotedString(outFileName)
-    }).getReturnCode();
-    if (result != 0)
-      throw new IllegalStateException("Unexpected error while overwriting " +
-          inFileName + " with " + outFileName);
+      });
   }
 
-  private static QTestProcessExecResult executeDiffCommand(String inFileName,
+  private static int executeDiffCommand(String inFileName,
       String outFileName,
       boolean ignoreWhiteSpace,
       boolean sortResults
       ) throws Exception {
 
-    QTestProcessExecResult result;
+    int result = 0;
 
     if (sortResults) {
       // sort will try to open the output file in write mode on windows. We need to
       // close it first.
       SessionState ss = SessionState.get();
       if (ss != null && ss.out != null && ss.out != System.out) {
-        ss.out.close();
+  ss.out.close();
       }
 
       String inSorted = inFileName + SORT_SUFFIX;
       String outSorted = outFileName + SORT_SUFFIX;
 
-      sortFiles(inFileName, inSorted);
-      sortFiles(outFileName, outSorted);
-
+      result = sortFiles(inFileName, inSorted);
+      result |= sortFiles(outFileName, outSorted);
+      if (result != 0) {
+        System.err.println("ERROR: Could not sort files before comparing");
+        return result;
+      }
       inFileName = inSorted;
       outFileName = outSorted;
     }
@@ -1823,47 +1813,40 @@ public class QTestUtil {
     return result;
   }
 
-  private static void sortFiles(String in, String out) throws Exception {
-    int result = executeCmd(new String[]{
+  private static int sortFiles(String in, String out) throws Exception {
+    return executeCmd(new String[] {
         "sort",
         getQuotedString(in),
-    }, out, null).getReturnCode();
-    if (result != 0)
-      throw new IllegalStateException("Unexpected error while sorting " + in);
+      }, out, null);
   }
 
-  private static QTestProcessExecResult executeCmd(Collection<String> args) throws Exception {
+  private static int executeCmd(Collection<String> args) throws Exception {
     return executeCmd(args, null, null);
   }
 
-  private static QTestProcessExecResult executeCmd(String[] args) throws Exception {
+  private static int executeCmd(String[] args) throws Exception {
     return executeCmd(args, null, null);
   }
 
-  private static QTestProcessExecResult executeCmd(Collection<String> args, String outFile,
-                                            String errFile) throws Exception {
+  private static int executeCmd(Collection<String> args, String outFile, String errFile) throws Exception {
     String[] cmdArray = args.toArray(new String[args.size()]);
     return executeCmd(cmdArray, outFile, errFile);
   }
 
-  private static QTestProcessExecResult executeCmd(String[] args, String outFile,
-                                            String errFile) throws Exception {
+  private static int executeCmd(String[] args, String outFile, String errFile) throws Exception {
     System.out.println("Running: " + org.apache.commons.lang.StringUtils.join(args, ' '));
 
     PrintStream out = outFile == null ?
       SessionState.getConsole().getChildOutStream() :
-      new PrintStream(new FileOutputStream(outFile), true, "UTF-8");
+      new PrintStream(new FileOutputStream(outFile), true);
     PrintStream err = errFile == null ?
       SessionState.getConsole().getChildErrStream() :
-      new PrintStream(new FileOutputStream(errFile), true, "UTF-8");
+      new PrintStream(new FileOutputStream(errFile), true);
 
     Process executor = Runtime.getRuntime().exec(args);
 
-    ByteArrayOutputStream bos = new ByteArrayOutputStream();
-    PrintStream str = new PrintStream(bos, true, "UTF-8");
-
     StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, err);
-    StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, out, str);
+    StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, out);
 
     outPrinter.start();
     errPrinter.start();
@@ -1881,8 +1864,7 @@ public class QTestUtil {
       err.close();
     }
 
-    return QTestProcessExecResult.
-        create(result, new String(bos.toByteArray(), StandardCharsets.UTF_8));
+    return result;
   }
 
   private static String getQuotedString(String str){
@@ -2058,18 +2040,11 @@ public class QTestUtil {
       qt[i].clearTestSideEffects();
       qt[i].cliInit(qfiles[i].getName(), false);
       qt[i].executeClient(qfiles[i].getName());
-      QTestProcessExecResult result = qt[i].checkCliDriverResults(qfiles[i].getName());
-      if (result.getReturnCode() != 0) {
+      int ecode = qt[i].checkCliDriverResults(qfiles[i].getName());
+      if (ecode != 0) {
         failed = true;
-        StringBuilder builder = new StringBuilder();
-        builder.append("Test ")
-            .append(qfiles[i].getName())
-            .append(" results check failed with error code ")
-            .append(result.getReturnCode());
-        if (Strings.isNotEmpty(result.getCapturedOutput())) {
-          builder.append(" and diff value ").append(result.getCapturedOutput());
-        }
-        System.err.println(builder.toString());
+        System.err.println("Test " + qfiles[i].getName()
+            + " results check failed with error code " + ecode);
         outputTestFailureHelpMessage();
       }
       qt[i].clearPostTestEffects();
@@ -2116,18 +2091,11 @@ public class QTestUtil {
 
     for (int i = 0; i < qfiles.length; i++) {
       qtThread[i].join();
-      QTestProcessExecResult result = qt[i].checkCliDriverResults(qfiles[i].getName());
-      if (result.getReturnCode() != 0) {
+      int ecode = qt[i].checkCliDriverResults(qfiles[i].getName());
+      if (ecode != 0) {
         failed = true;
-        StringBuilder builder = new StringBuilder();
-        builder.append("Test ")
-            .append(qfiles[i].getName())
-            .append(" results check failed with error code ")
-            .append(result.getReturnCode());
-        if (Strings.isNotEmpty(result.getCapturedOutput())) {
-          builder.append(" and diff value ").append(result.getCapturedOutput());
-        }
-        System.err.println(builder.toString());
+        System.err.println("Test " + qfiles[i].getName()
+            + " results check failed with error code " + ecode);
         outputTestFailureHelpMessage();
       }
     }
@@ -2225,15 +2193,16 @@ public class QTestUtil {
 
   public void failedDiff(int ecode, String fname, String debugHint) {
     String message =
-        "Client Execution succeeded but contained differences " +
-            "(error code = " + ecode + ") after executing " +
+        "Client Execution results failed with error code = " + ecode + " while executing fname=" +
             fname + (debugHint != null ? (" " + debugHint) : "");
     LOG.error(message);
     Assert.fail(message);
   }
 
-  public void failed(Exception e, String fname, String debugHint) {
+  public void failed(Throwable e, String fname, String debugHint) {
     String command = SessionState.get() != null ? SessionState.get().getLastCommand() : null;
+    System.err.println("Exception: " + e.getMessage());
+    e.printStackTrace();
     System.err.println("Failed query: " + fname);
     System.err.flush();
     Assert.fail("Unexpected exception " +
@@ -2273,6 +2242,9 @@ public class QTestUtil {
       }
       br.close();
     } catch (Exception e) {
+      System.err.println("Exception: " + e.getMessage());
+      e.printStackTrace();
+      System.err.flush();
       Assert.fail("Unexpected exception " + org.apache.hadoop.util.StringUtils.stringifyException(e));
     }
   }
@@ -2296,9 +2268,7 @@ public class QTestUtil {
       String mdbPath =   AbstractCliConfig.HIVE_ROOT + "/data/files/tpcds-perf/metastore_export/";
 
       // Setup the table column stats
-      BufferedReader br = new BufferedReader(
-          new FileReader(
-              new File(AbstractCliConfig.HIVE_ROOT + "/metastore/scripts/upgrade/derby/022-HIVE-11107.derby.sql")));
+      BufferedReader br = new BufferedReader(new FileReader(new File(AbstractCliConfig.HIVE_ROOT + "/metastore/scripts/upgrade/derby/022-HIVE-11107.derby.sql")));
       String command;
 
       s.execute("DROP TABLE APP.TABLE_PARAMS");
@@ -2324,13 +2294,15 @@ public class QTestUtil {
       }
       br.close();
 
-      java.nio.file.Path tabColStatsCsv = FileSystems.getDefault().getPath(mdbPath, "csv" ,"TAB_COL_STATS.txt.bz2");
-      java.nio.file.Path tabParamsCsv = FileSystems.getDefault().getPath(mdbPath, "csv", "TABLE_PARAMS.txt.bz2");
+      File tabColStatsCsv = new File(mdbPath+"csv/TAB_COL_STATS.txt");
+      File tabParamsCsv = new File(mdbPath+"csv/TABLE_PARAMS.txt");
 
       // Set up the foreign key constraints properly in the TAB_COL_STATS data
       String tmpBaseDir =  System.getProperty(TEST_TMP_DIR_PROPERTY);
-      java.nio.file.Path tmpFileLoc1 = FileSystems.getDefault().getPath(tmpBaseDir, "TAB_COL_STATS.txt");
-      java.nio.file.Path tmpFileLoc2 = FileSystems.getDefault().getPath(tmpBaseDir, "TABLE_PARAMS.txt");
+      File tmpFileLoc1 = new File(tmpBaseDir+"/TAB_COL_STATS.txt");
+      File tmpFileLoc2 = new File(tmpBaseDir+"/TABLE_PARAMS.txt");
+      FileUtils.copyFile(tabColStatsCsv, tmpFileLoc1);
+      FileUtils.copyFile(tabParamsCsv, tmpFileLoc2);
 
       class MyComp implements Comparator<String> {
         @Override
@@ -2342,7 +2314,7 @@ public class QTestUtil {
         }
       }
 
-      final SortedMap<String, Integer> tableNameToID = new TreeMap<String, Integer>(new MyComp());
+      SortedMap<String, Integer> tableNameToID = new TreeMap<String, Integer>(new MyComp());
 
      rs = s.executeQuery("SELECT * FROM APP.TBLS");
       while(rs.next()) {
@@ -2354,73 +2326,29 @@ public class QTestUtil {
           LOG.debug("Resultset : " +  tblName + " | " + tblId);
         }
       }
-
-      final Map<String, Map<String, String>> data = new HashMap<>();
-      rs = s.executeQuery("select TBLS.TBL_NAME, a.COLUMN_NAME, a.TYPE_NAME from  "
-          + "(select COLUMN_NAME, TYPE_NAME, SDS.SD_ID from APP.COLUMNS_V2 join APP.SDS on SDS.CD_ID = COLUMNS_V2.CD_ID) a"
-          + " join APP.TBLS on  TBLS.SD_ID = a.SD_ID");
-      while (rs.next()) {
-        String tblName = rs.getString(1);
-        String colName = rs.getString(2);
-        String typeName = rs.getString(3);
-        Map<String, String> cols = data.get(tblName);
-        if (null == cols) {
-          cols = new HashMap<>();
+      for (Map.Entry<String, Integer> entry : tableNameToID.entrySet()) {
+        String toReplace1 = ",_" + entry.getKey() + "_" ;
+        String replacementString1 = ","+entry.getValue();
+        String toReplace2 = "_" + entry.getKey() + "_@" ;
+        String replacementString2 = ""+entry.getValue()+"@";
+        try {
+          String content1 = FileUtils.readFileToString(tmpFileLoc1, "UTF-8");
+          content1 = content1.replaceAll(toReplace1, replacementString1);
+          FileUtils.writeStringToFile(tmpFileLoc1, content1, "UTF-8");
+          String content2 = FileUtils.readFileToString(tmpFileLoc2, "UTF-8");
+          content2 = content2.replaceAll(toReplace2, replacementString2);
+          FileUtils.writeStringToFile(tmpFileLoc2, content2, "UTF-8");
+        } catch (IOException e) {
+          LOG.info("Generating file failed", e);
         }
-        cols.put(colName, typeName);
-        data.put(tblName, cols);
       }
 
-      BufferedReader reader = new BufferedReader(new InputStreamReader(
-        new BZip2CompressorInputStream(Files.newInputStream(tabColStatsCsv, StandardOpenOption.READ))));
-
-      Stream<String> replaced = reader.lines().parallel().map(str-> {
-        String[] splits = str.split(",");
-        String tblName = splits[0];
-        String colName = splits[1];
-        Integer tblID = tableNameToID.get(tblName);
-        StringBuilder sb = new StringBuilder("default@"+tblName + "@" + colName + "@" + data.get(tblName).get(colName)+"@");
-        for (int i = 2; i < splits.length; i++) {
-          sb.append(splits[i]+"@");
-        }
-        return sb.append(tblID).toString();
-        });
-
-      Files.write(tmpFileLoc1, (Iterable<String>)replaced::iterator);
-      replaced.close();
-      reader.close();
-
-      BufferedReader reader2 = new BufferedReader(new InputStreamReader(
-          new BZip2CompressorInputStream(Files.newInputStream(tabParamsCsv, StandardOpenOption.READ))));
-      final Map<String,String> colStats = new ConcurrentHashMap<>();
-      Stream<String> replacedStream = reader2.lines().parallel().map(str-> {
-        String[] splits = str.split("_@");
-        String tblName = splits[0];
-        Integer tblId = tableNameToID.get(tblName);
-        Map<String,String> cols = data.get(tblName);
-        StringBuilder sb = new StringBuilder();
-        sb.append("{\"COLUMN_STATS\":{");
-        for (String colName : cols.keySet()) {
-          sb.append("\""+colName+"\":\"true\",");
-        }
-        sb.append("},\"BASIC_STATS\":\"true\"}");
-        colStats.put(tblId.toString(), sb.toString());
-
-        return  tblId.toString() + "@" + splits[1];
-      });
-
-      Files.write(tmpFileLoc2, (Iterable<String>)replacedStream::iterator);
-      Files.write(tmpFileLoc2, (Iterable<String>)colStats.entrySet().stream()
-        .map(map->map.getKey()+"@COLUMN_STATS_ACCURATE@"+map.getValue())::iterator, StandardOpenOption.APPEND);
-
-      replacedStream.close();
-      reader2.close();
       // Load the column stats and table params with 30 TB scale
-      String importStatement1 =  "CALL SYSCS_UTIL.SYSCS_IMPORT_TABLE(null, '" + "TAB_COL_STATS" +
-        "', '" + tmpFileLoc1.toAbsolutePath().toString() +
-        "', '@', null, 'UTF-8', 1)";
-      String importStatement2 =  "CALL SYSCS_UTIL.SYSCS_IMPORT_TABLE(null, '" + "TABLE_PARAMS" +
-        "', '" + tmpFileLoc2.toAbsolutePath().toString() +
+      String importStatement1 =  "CALL SYSCS_UTIL.SYSCS_IMPORT_TABLE_LOBS_FROM_EXTFILE(null, '" + "TAB_COL_STATS" +
+        "', '" + tmpFileLoc1.getAbsolutePath() +
+        "', ',', null, 'UTF-8', 1)";
+      String importStatement2 =  "CALL SYSCS_UTIL.SYSCS_IMPORT_TABLE_LOBS_FROM_EXTFILE(null, '" + "TABLE_PARAMS" +
+        "', '" + tmpFileLoc2.getAbsolutePath() +
         "', '@', null, 'UTF-8', 1)";
       try {
         PreparedStatement psImport1 = conn.prepareStatement(importStatement1);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hadoop/hive/ql/parse/CoreParseNegative.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/parse/CoreParseNegative.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/parse/CoreParseNegative.java
index 31f69a3..8dba0bb 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/parse/CoreParseNegative.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/parse/CoreParseNegative.java
@@ -21,12 +21,9 @@ import static org.junit.Assert.fail;
 
 import java.io.Serializable;
 import java.util.List;
-
-import com.google.common.base.Strings;
 import org.apache.hadoop.hive.cli.control.AbstractCliConfig;
 import org.apache.hadoop.hive.cli.control.CliAdapter;
 import org.apache.hadoop.hive.cli.control.CliConfigs;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.apache.hadoop.hive.ql.exec.Task;
@@ -109,20 +106,18 @@ public class CoreParseNegative extends CliAdapter{
       fail("Unexpected success for query: " + fname + debugHint);
     }
     catch (ParseException pe) {
-      QTestProcessExecResult result = qt.checkNegativeResults(fname, pe);
-      if (result.getReturnCode() != 0) {
-        qt.failed(result.getReturnCode(), fname, result.getCapturedOutput() + "\r\n" + debugHint);
+      int ecode = qt.checkNegativeResults(fname, pe);
+      if (ecode != 0) {
+        qt.failed(ecode, fname, debugHint);
       }
     }
     catch (SemanticException se) {
-      QTestProcessExecResult result = qt.checkNegativeResults(fname, se);
-      if (result.getReturnCode() != 0) {
-        String message = Strings.isNullOrEmpty(result.getCapturedOutput()) ?
-            debugHint : "\r\n" + result.getCapturedOutput();
-        qt.failedDiff(result.getReturnCode(), fname, message);
+      int ecode = qt.checkNegativeResults(fname, se);
+      if (ecode != 0) {
+        qt.failedDiff(ecode, fname, debugHint);
       }
     }
-    catch (Exception e) {
+    catch (Throwable e) {
       qt.failed(e, fname, debugHint);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hive/beeline/Parallelized.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/Parallelized.java b/itests/util/src/main/java/org/apache/hive/beeline/Parallelized.java
deleted file mode 100644
index a819e26..0000000
--- a/itests/util/src/main/java/org/apache/hive/beeline/Parallelized.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hive.beeline;
-
-import org.junit.runners.Parameterized;
-import org.junit.runners.model.RunnerScheduler;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Class to run Parameterized test in parallel.
- * Source: http://hwellmann.blogspot.hu/2009/12/running-parameterized-junit-tests-in.html
- */
-public class Parallelized extends Parameterized {
-  private static class ThreadPoolScheduler implements RunnerScheduler {
-    private ExecutorService executor;
-
-    public ThreadPoolScheduler() {
-      String threads = System.getProperty("junit.parallel.threads");
-      int numThreads = Runtime.getRuntime().availableProcessors();
-      if (threads != null) {
-        numThreads = Integer.parseInt(threads);
-      }
-      executor = Executors.newFixedThreadPool(numThreads);
-    }
-
-    @Override
-    public void finished() {
-      executor.shutdown();
-      try {
-        executor.awaitTermination(10, TimeUnit.MINUTES);
-      } catch (InterruptedException exc) {
-        throw new RuntimeException(exc);
-      }
-    }
-
-    @Override
-    public void schedule(Runnable childStatement) {
-      executor.submit(childStatement);
-    }
-  }
-
-  public Parallelized(Class klass) throws Throwable {
-    super(klass);
-    setScheduler(new ThreadPoolScheduler());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hive/beeline/QFile.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/QFile.java b/itests/util/src/main/java/org/apache/hive/beeline/QFile.java
deleted file mode 100644
index 0bde529..0000000
--- a/itests/util/src/main/java/org/apache/hive/beeline/QFile.java
+++ /dev/null
@@ -1,333 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hive.beeline;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
-import org.apache.hadoop.hive.ql.QTestUtil;
-import org.apache.hadoop.util.Shell;
-import org.apache.hive.common.util.StreamPrinter;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-/**
- * Class for representing a Query and the connected files. It provides accessors for the specific
- * input and output files, and provides methods for filtering the output of the runs.
- */
-public final class QFile {
-  private static final Set<String> srcTables = QTestUtil.getSrcTables();
-  private static final String DEBUG_HINT =
-      "The following files can help you identifying the problem:%n"
-      + " - Query file: %1s%n"
-      + " - Raw output file: %2s%n"
-      + " - Filtered output file: %3s%n"
-      + " - Expected output file: %4s%n"
-      + " - Client log file: %5s%n"
-      + " - Client log files before the test: %6s%n"
-      + " - Client log files after the test: %7s%n"
-      + " - Hiveserver2 log file: %8s%n";
-  private static final String USE_COMMAND_WARNING =
-      "The query file %1s contains \"%2s\" command.%n"
-      + "The source table name rewrite is turned on, so this might cause problems when the used "
-      + "database contains tables named any of the following: " + srcTables + "%n"
-      + "To turn off the table name rewrite use -Dtest.rewrite.source.tables=false%n";
-
-  private static final Pattern USE_PATTERN =
-      Pattern.compile("^\\s*use\\s.*", Pattern.CASE_INSENSITIVE);
-
-  private static final String MASK_PATTERN = "#### A masked pattern was here ####\n";
-
-  private String name;
-  private File inputFile;
-  private File rawOutputFile;
-  private File outputFile;
-  private File expectedOutputFile;
-  private File logFile;
-  private File beforeExecuteLogFile;
-  private File afterExecuteLogFile;
-  private static RegexFilterSet staticFilterSet = getStaticFilterSet();
-  private RegexFilterSet specificFilterSet;
-  private boolean rewriteSourceTables;
-
-  private QFile() {}
-
-  public String getName() {
-    return name;
-  }
-
-  public File getInputFile() {
-    return inputFile;
-  }
-
-  public File getRawOutputFile() {
-    return rawOutputFile;
-  }
-
-  public File getOutputFile() {
-    return outputFile;
-  }
-
-  public File getExpectedOutputFile() {
-    return expectedOutputFile;
-  }
-
-  public File getLogFile() {
-    return logFile;
-  }
-
-  public File getBeforeExecuteLogFile() {
-    return beforeExecuteLogFile;
-  }
-
-  public File getAfterExecuteLogFile() {
-    return afterExecuteLogFile;
-  }
-
-  public String getDebugHint() {
-    return String.format(DEBUG_HINT, inputFile, rawOutputFile, outputFile, expectedOutputFile,
-        logFile, beforeExecuteLogFile, afterExecuteLogFile,
-        "./itests/qtest/target/tmp/log/hive.log");
-  }
-
-  /**
-   * Filters the sql commands if necessary.
-   * @param commands The array of the sql commands before filtering
-   * @return The filtered array of the sql command strings
-   * @throws IOException File read error
-   */
-  public String[] filterCommands(String[] commands) throws IOException {
-    if (rewriteSourceTables) {
-      for (int i=0; i<commands.length; i++) {
-        if (USE_PATTERN.matcher(commands[i]).matches()) {
-          System.err.println(String.format(USE_COMMAND_WARNING, inputFile, commands[i]));
-        }
-        commands[i] = replaceTableNames(commands[i]);
-      }
-    }
-    return commands;
-  }
-
-  /**
-   * Replace the default src database TABLE_NAMEs in the queries with default.TABLE_NAME, like
-   * src->default.src, srcpart->default.srcpart, so the queries could be run even if the used
-   * database is query specific. This change is only a best effort, since we do not want to parse
-   * the queries, we could not be sure that we do not replace other strings which are not
-   * tablenames. Like 'select src from othertable;'. The q files containing these commands should
-   * be excluded. Only replace the tablenames, if rewriteSourceTables is set.
-   * @param source The original query string
-   * @return The query string where the tablenames are replaced
-   */
-  private String replaceTableNames(String source) {
-    for (String table : srcTables) {
-      source = source.replaceAll("(?is)(\\s+)" + table + "([\\s;\\n\\)])", "$1default." + table
-          + "$2");
-    }
-    return source;
-  }
-
-  /**
-   * The result contains the original queries. To revert them to the original form remove the
-   * 'default' from every default.TABLE_NAME, like default.src->src, default.srcpart->srcpart.
-   * @param source The original query output
-   * @return The query output where the tablenames are replaced
-   */
-  private String revertReplaceTableNames(String source) {
-    for (String table : srcTables) {
-      source = source.replaceAll("(?is)(\\s+)default\\." + table + "([\\s;\\n\\)])", "$1" + table
-          + "$2");
-    }
-    return source;
-  }
-
-  public void filterOutput() throws IOException {
-    String rawOutput = FileUtils.readFileToString(rawOutputFile, "UTF-8");
-    if (rewriteSourceTables) {
-      rawOutput = revertReplaceTableNames(rawOutput);
-    }
-    String filteredOutput = staticFilterSet.filter(specificFilterSet.filter(rawOutput));
-    FileUtils.writeStringToFile(outputFile, filteredOutput);
-  }
-
-  public QTestProcessExecResult compareResults() throws IOException, InterruptedException {
-    if (!expectedOutputFile.exists()) {
-      throw new IOException("Expected results file does not exist: " + expectedOutputFile);
-    }
-    return executeDiff();
-  }
-
-  public void overwriteResults() throws IOException {
-    FileUtils.copyFile(outputFile, expectedOutputFile);
-  }
-
-  private QTestProcessExecResult executeDiff() throws IOException, InterruptedException {
-    List<String> diffCommandArgs = new ArrayList<String>();
-    diffCommandArgs.add("diff");
-
-    // Text file comparison
-    diffCommandArgs.add("-a");
-
-    if (Shell.WINDOWS) {
-      // Ignore changes in the amount of white space
-      diffCommandArgs.add("-b");
-
-      // Files created on Windows machines have different line endings
-      // than files created on Unix/Linux. Windows uses carriage return and line feed
-      // ("\r\n") as a line ending, whereas Unix uses just line feed ("\n").
-      // Also StringBuilder.toString(), Stream to String conversions adds extra
-      // spaces at the end of the line.
-      diffCommandArgs.add("--strip-trailing-cr"); // Strip trailing carriage return on input
-      diffCommandArgs.add("-B"); // Ignore changes whose lines are all blank
-    }
-
-    // Add files to compare to the arguments list
-    diffCommandArgs.add(getQuotedString(expectedOutputFile));
-    diffCommandArgs.add(getQuotedString(outputFile));
-
-    System.out.println("Running: " + org.apache.commons.lang.StringUtils.join(diffCommandArgs,
-        ' '));
-    Process executor = Runtime.getRuntime().exec(diffCommandArgs.toArray(
-        new String[diffCommandArgs.size()]));
-
-    ByteArrayOutputStream bos = new ByteArrayOutputStream();
-    PrintStream out = new PrintStream(bos, true, "UTF-8");
-
-    StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, System.err);
-    StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out, out);
-
-    outPrinter.start();
-    errPrinter.start();
-
-    int result = executor.waitFor();
-
-    outPrinter.join();
-    errPrinter.join();
-
-    executor.waitFor();
-
-    return QTestProcessExecResult.create(result, new String(bos.toByteArray(),
-        StandardCharsets.UTF_8));
-  }
-
-  private static String getQuotedString(File file) {
-    return Shell.WINDOWS ? String.format("\"%s\"", file.getAbsolutePath()) : file.getAbsolutePath();
-  }
-
-  private static class Filter {
-    private final Pattern pattern;
-    private final String replacement;
-
-    public Filter(Pattern pattern, String replacement) {
-      this.pattern = pattern;
-      this.replacement = replacement;
-    }
-  }
-
-  private static class RegexFilterSet {
-    private final List<Filter> regexFilters = new ArrayList<Filter>();
-
-    public RegexFilterSet addFilter(String regex, String replacement) {
-      regexFilters.add(new Filter(Pattern.compile(regex), replacement));
-      return this;
-    }
-
-    public String filter(String input) {
-      for (Filter filter : regexFilters) {
-        input = filter.pattern.matcher(input).replaceAll(filter.replacement);
-      }
-      return input;
-    }
-  }
-
-  // These are the filters which are common for every QTest.
-  // Check specificFilterSet for QTest specific ones.
-  private static RegexFilterSet getStaticFilterSet() {
-    // Pattern to remove the timestamp and other infrastructural info from the out file
-    return new RegexFilterSet()
-        .addFilter("Reading log file: .*\n", "")
-        .addFilter("INFO  : ", "")
-        .addFilter(".*/tmp/.*\n", MASK_PATTERN)
-        .addFilter(".*file:.*\n", MASK_PATTERN)
-        .addFilter(".*file\\..*\n", MASK_PATTERN)
-        .addFilter(".*CreateTime.*\n", MASK_PATTERN)
-        .addFilter(".*transient_lastDdlTime.*\n", MASK_PATTERN)
-        .addFilter("(?s)(" + MASK_PATTERN + ")+", MASK_PATTERN);
-  }
-
-  /**
-   * Builder to generate QFile objects. After initializing the builder it is possible the
-   * generate the next QFile object using it's name only.
-   */
-  public static class QFileBuilder {
-    private File queryDirectory;
-    private File logDirectory;
-    private File resultsDirectory;
-    private boolean rewriteSourceTables;
-
-    public QFileBuilder() {
-    }
-
-    public QFileBuilder setQueryDirectory(File queryDirectory) {
-      this.queryDirectory = queryDirectory;
-      return this;
-    }
-
-    public QFileBuilder setLogDirectory(File logDirectory) {
-      this.logDirectory = logDirectory;
-      return this;
-    }
-
-    public QFileBuilder setResultsDirectory(File resultsDirectory) {
-      this.resultsDirectory = resultsDirectory;
-      return this;
-    }
-
-    public QFileBuilder setRewriteSourceTables(boolean rewriteSourceTables) {
-      this.rewriteSourceTables = rewriteSourceTables;
-      return this;
-    }
-
-    public QFile getQFile(String name) throws IOException {
-      QFile result = new QFile();
-      result.name = name;
-      result.inputFile = new File(queryDirectory, name + ".q");
-      result.rawOutputFile = new File(logDirectory, name + ".q.out.raw");
-      result.outputFile = new File(logDirectory, name + ".q.out");
-      result.expectedOutputFile = new File(resultsDirectory, name + ".q.out");
-      result.logFile = new File(logDirectory, name + ".q.beeline");
-      result.beforeExecuteLogFile = new File(logDirectory, name + ".q.beforeExecute.log");
-      result.afterExecuteLogFile = new File(logDirectory, name + ".q.afterExecute.log");
-      result.rewriteSourceTables = rewriteSourceTables;
-      result.specificFilterSet = new RegexFilterSet()
-          .addFilter("(PREHOOK|POSTHOOK): (Output|Input): database:" + name + "\n",
-              "$1: $2: database:default\n")
-          .addFilter("(PREHOOK|POSTHOOK): (Output|Input): " + name + "@", "$1: $2: default@")
-          .addFilter("name(:?) " + name + "\\.(.*)\n", "name$1 default.$2\n")
-          .addFilter("/" + name + ".db/", "/");
-      return result;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hive/beeline/QFileBeeLineClient.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/QFileBeeLineClient.java b/itests/util/src/main/java/org/apache/hive/beeline/QFileBeeLineClient.java
deleted file mode 100644
index f1b53f7..0000000
--- a/itests/util/src/main/java/org/apache/hive/beeline/QFileBeeLineClient.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hive.beeline;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.sql.SQLException;
-
-/**
- * QFile test client using BeeLine. It can be used to submit a list of command strings, or a QFile.
- */
-public class QFileBeeLineClient implements AutoCloseable {
-  private BeeLine beeLine;
-  private PrintStream beelineOutputStream;
-  private File logFile;
-
-  protected QFileBeeLineClient(String jdbcUrl, String jdbcDriver, String username, String password,
-      File log) throws IOException {
-    logFile = log;
-    beeLine = new BeeLine();
-    beelineOutputStream = new PrintStream(logFile, "UTF-8");
-    beeLine.setOutputStream(beelineOutputStream);
-    beeLine.setErrorStream(beelineOutputStream);
-    beeLine.runCommands(
-        new String[] {
-          "!set verbose true",
-          "!set shownestederrs true",
-          "!set showwarnings true",
-          "!set showelapsedtime false",
-          "!set trimscripts false",
-          "!set maxwidth -1",
-          "!connect " + jdbcUrl + " " + username + " " + password + " " + jdbcDriver
-        });
-  }
-
-  public void execute(String[] commands, File resultFile) throws SQLException {
-    beeLine.runCommands(
-        new String[] {
-          "!record " + resultFile.getAbsolutePath()
-        });
-
-    int lastSuccessfulCommand = beeLine.runCommands(commands);
-    if (commands.length != lastSuccessfulCommand) {
-      throw new SQLException("Error executing SQL command: " + commands[lastSuccessfulCommand]);
-    }
-
-    beeLine.runCommands(new String[] {"!record"});
-  }
-
-  private void beforeExecute(QFile qFile) throws SQLException {
-    execute(
-        new String[] {
-          "!set outputformat tsv2",
-          "!set verbose false",
-          "!set silent true",
-          "!set showheader false",
-          "USE default;",
-          "SHOW TABLES;",
-          "DROP DATABASE IF EXISTS `" + qFile.getName() + "` CASCADE;",
-          "CREATE DATABASE `" + qFile.getName() + "`;",
-          "USE `" + qFile.getName() + "`;",
-          "set hive.in.test.short.logs=true;",
-          "set hive.in.test.remove.logs=false;",
-        },
-        qFile.getBeforeExecuteLogFile());
-    beeLine.setIsTestMode(true);
-  }
-
-  private void afterExecute(QFile qFile) throws SQLException {
-    beeLine.setIsTestMode(false);
-    execute(
-        new String[] {
-          "set hive.in.test.short.logs=false;",
-          "!set verbose true",
-          "!set silent false",
-          "!set showheader true",
-          "!set outputformat table",
-          "USE default;",
-          "DROP DATABASE IF EXISTS `" + qFile.getName() + "` CASCADE;",
-        },
-        qFile.getAfterExecuteLogFile());
-  }
-
-  public void execute(QFile qFile) throws SQLException, IOException {
-    beforeExecute(qFile);
-    String[] commands = beeLine.getCommands(qFile.getInputFile());
-    execute(qFile.filterCommands(commands), qFile.getRawOutputFile());
-    afterExecute(qFile);
-  }
-
-  public void close() {
-    if (beeLine != null) {
-      beeLine.runCommands(new String[] {
-        "!quit"
-      });
-    }
-    if (beelineOutputStream != null) {
-      beelineOutputStream.close();
-    }
-  }
-
-  /**
-   * Builder to generated QFileBeeLineClient objects. The after initializing the builder, it can be
-   * used to create new clients without any parameters.
-   */
-  public static class QFileClientBuilder {
-    private String username;
-    private String password;
-    private String jdbcUrl;
-    private String jdbcDriver;
-
-    public QFileClientBuilder() {
-    }
-
-    public QFileClientBuilder setUsername(String username) {
-      this.username = username;
-      return this;
-    }
-
-    public QFileClientBuilder setPassword(String password) {
-      this.password = password;
-      return this;
-    }
-
-    public QFileClientBuilder setJdbcUrl(String jdbcUrl) {
-      this.jdbcUrl = jdbcUrl;
-      return this;
-    }
-
-    public QFileClientBuilder setJdbcDriver(String jdbcDriver) {
-      this.jdbcDriver = jdbcDriver;
-      return this;
-    }
-
-    public QFileBeeLineClient getClient(File logFile) throws IOException {
-      return new QFileBeeLineClient(jdbcUrl, jdbcDriver, username, password, logFile);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/util/src/main/java/org/apache/hive/beeline/package-info.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/package-info.java b/itests/util/src/main/java/org/apache/hive/beeline/package-info.java
deleted file mode 100644
index e05ac0a..0000000
--- a/itests/util/src/main/java/org/apache/hive/beeline/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package for the BeeLine specific QTest classes.
- */
-package org.apache.hive.beeline;


[15/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index d8af7a7..bae39ac 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -23,13 +23,13 @@ import com.google.common.collect.Lists;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.messaging.EventMessage;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
@@ -54,11 +54,8 @@ import org.apache.hive.common.util.HiveStringUtils;
 import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
 
 /**
  * Hive specific implementation of alter
@@ -91,9 +88,6 @@ public class HiveAlterHandler implements AlterHandler {
   public void alterTable(RawStore msdb, Warehouse wh, String dbname,
       String name, Table newt, EnvironmentContext environmentContext,
       HMSHandler handler) throws InvalidOperationException, MetaException {
-    name = name.toLowerCase();
-    dbname = dbname.toLowerCase();
-
     final boolean cascade = environmentContext != null
         && environmentContext.isSetProperties()
         && StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(
@@ -102,11 +96,9 @@ public class HiveAlterHandler implements AlterHandler {
       throw new InvalidOperationException("New table is invalid: " + newt);
     }
 
-    String newTblName = newt.getTableName().toLowerCase();
-    String newDbName = newt.getDbName().toLowerCase();
-
-    if (!MetaStoreUtils.validateName(newTblName, hiveConf)) {
-      throw new InvalidOperationException(newTblName + " is not a valid object name");
+    if (!MetaStoreUtils.validateName(newt.getTableName(), hiveConf)) {
+      throw new InvalidOperationException(newt.getTableName()
+          + " is not a valid object name");
     }
     String validate = MetaStoreUtils.validateTblColumns(newt.getSd().getCols());
     if (validate != null) {
@@ -119,38 +111,36 @@ public class HiveAlterHandler implements AlterHandler {
     FileSystem destFs = null;
 
     boolean success = false;
-    boolean dataWasMoved = false;
+    boolean moveData = false;
+    boolean rename = false;
     Table oldt = null;
+    List<ObjectPair<Partition, String>> altps = new ArrayList<ObjectPair<Partition, String>>();
     List<MetaStoreEventListener> transactionalListeners = null;
     if (handler != null) {
       transactionalListeners = handler.getTransactionalListeners();
     }
 
     try {
-      boolean rename = false;
-      boolean isPartitionedTable = false;
-      List<Partition> parts = null;
+      msdb.openTransaction();
+      name = name.toLowerCase();
+      dbname = dbname.toLowerCase();
 
       // check if table with the new name already exists
-      if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
-        if (msdb.getTable(newDbName, newTblName) != null) {
-          throw new InvalidOperationException("new table " + newDbName
-              + "." + newTblName + " already exists");
+      if (!newt.getTableName().equalsIgnoreCase(name)
+          || !newt.getDbName().equalsIgnoreCase(dbname)) {
+        if (msdb.getTable(newt.getDbName(), newt.getTableName()) != null) {
+          throw new InvalidOperationException("new table " + newt.getDbName()
+              + "." + newt.getTableName() + " already exists");
         }
         rename = true;
       }
 
-      msdb.openTransaction();
       // get old table
       oldt = msdb.getTable(dbname, name);
       if (oldt == null) {
         throw new InvalidOperationException("table " + dbname + "." + name + " doesn't exist");
       }
 
-      if (oldt.getPartitionKeysSize() != 0) {
-        isPartitionedTable = true;
-      }
-
       if (HiveConf.getBoolVar(hiveConf,
             HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
             false)) {
@@ -160,14 +150,32 @@ public class HiveAlterHandler implements AlterHandler {
             oldt.getSd().getCols(), newt.getSd().getCols());
       }
 
+      if (cascade) {
+        //Currently only column related changes can be cascaded in alter table
+        if(MetaStoreUtils.isCascadeNeededInAlterTable(oldt, newt)) {
+          List<Partition> parts = msdb.getPartitions(dbname, name, -1);
+          for (Partition part : parts) {
+            List<FieldSchema> oldCols = part.getSd().getCols();
+            part.getSd().setCols(newt.getSd().getCols());
+            String oldPartName = Warehouse.makePartName(oldt.getPartitionKeys(), part.getValues());
+            updatePartColumnStatsForAlterColumns(msdb, part, oldPartName, part.getValues(), oldCols, part);
+            msdb.alterPartition(dbname, name, part.getValues(), part);
+          }
+        } else {
+          LOG.warn("Alter table does not cascade changes to its partitions.");
+        }
+      }
+
       //check that partition keys have not changed, except for virtual views
       //however, allow the partition comments to change
       boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(),
           newt.getPartitionKeys());
 
       if(!oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())){
-        if (!partKeysPartiallyEqual) {
-          throw new InvalidOperationException("partition keys can not be changed.");
+        if (oldt.getPartitionKeys().size() != newt.getPartitionKeys().size()
+            || !partKeysPartiallyEqual) {
+          throw new InvalidOperationException(
+              "partition keys can not be changed.");
         }
       }
 
@@ -198,12 +206,13 @@ public class HiveAlterHandler implements AlterHandler {
           srcFs = wh.getFs(srcPath);
 
           // get new location
-          Database db = msdb.getDatabase(newDbName);
+          Database db = msdb.getDatabase(newt.getDbName());
           Path databasePath = constructRenamedPath(wh.getDatabasePath(db), srcPath);
-          destPath = new Path(databasePath, newTblName);
+          destPath = new Path(databasePath, newt.getTableName().toLowerCase());
           destFs = wh.getFs(destPath);
 
           newt.getSd().setLocation(destPath.toString());
+          moveData = true;
 
           // check that destination does not exist otherwise we will be
           // overwriting data
@@ -213,99 +222,58 @@ public class HiveAlterHandler implements AlterHandler {
                 + " is on a different file system than the old location "
                 + srcPath + ". This operation is not supported");
           }
-
           try {
+            srcFs.exists(srcPath); // check that src exists and also checks
+                                   // permissions necessary
             if (destFs.exists(destPath)) {
               throw new InvalidOperationException("New location for this table "
-                  + newDbName + "." + newTblName + " already exists : " + destPath);
-            }
-            // check that src exists and also checks permissions necessary, rename src to dest
-            if (srcFs.exists(srcPath) && srcFs.rename(srcPath, destPath)) {
-              dataWasMoved = true;
+                  + newt.getDbName() + "." + newt.getTableName()
+                  + " already exists : " + destPath);
             }
           } catch (IOException e) {
-            LOG.error("Alter Table operation for " + dbname + "." + name + " failed.", e);
-            throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name +
-                " failed to move data due to: '" + getSimpleMessage(e)
-                + "' See hive log file for details.");
+            throw new InvalidOperationException("Unable to access new location "
+                + destPath + " for table " + newt.getDbName() + "."
+                + newt.getTableName());
           }
-        }
-
-        if (isPartitionedTable) {
           String oldTblLocPath = srcPath.toUri().getPath();
-          String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null;
+          String newTblLocPath = destPath.toUri().getPath();
 
           // also the location field in partition
-          parts = msdb.getPartitions(dbname, name, -1);
-          Map<Partition, ColumnStatistics> columnStatsNeedUpdated = new HashMap<Partition, ColumnStatistics>();
+          List<Partition> parts = msdb.getPartitions(dbname, name, -1);
           for (Partition part : parts) {
             String oldPartLoc = part.getSd().getLocation();
-            if (dataWasMoved && oldPartLoc.contains(oldTblLocPath)) {
+            if (oldPartLoc.contains(oldTblLocPath)) {
               URI oldUri = new Path(oldPartLoc).toUri();
               String newPath = oldUri.getPath().replace(oldTblLocPath, newTblLocPath);
               Path newPartLocPath = new Path(oldUri.getScheme(), oldUri.getAuthority(), newPath);
+              altps.add(ObjectPair.create(part, part.getSd().getLocation()));
               part.getSd().setLocation(newPartLocPath.toString());
-            }
-            part.setDbName(newDbName);
-            part.setTableName(newTblName);
-            ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name,
-                part.getValues(), part.getSd().getCols(), oldt, part);
-            if (colStats != null) {
-              columnStatsNeedUpdated.put(part, colStats);
-            }
-          }
-          msdb.alterTable(dbname, name, newt);
-          // alterPartition is only for changing the partition location in the table rename
-          if (dataWasMoved) {
-            for (Partition part : parts) {
-              msdb.alterPartition(newDbName, newTblName, part.getValues(), part);
-            }
-          }
-
-          for (Entry<Partition, ColumnStatistics> partColStats : columnStatsNeedUpdated.entrySet()) {
-            ColumnStatistics newPartColStats = partColStats.getValue();
-            newPartColStats.getStatsDesc().setDbName(newDbName);
-            newPartColStats.getStatsDesc().setTableName(newTblName);
-            msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues());
-          }
-        } else {
-          alterTableUpdateTableColumnStats(msdb, oldt, newt);
-        }
-      } else {
-        // operations other than table rename
-        if (MetaStoreUtils.requireCalStats(hiveConf, null, null, newt, environmentContext) &&
-            !isPartitionedTable) {
-          Database db = msdb.getDatabase(newDbName);
-          // Update table stats. For partitioned table, we update stats in alterPartition()
-          MetaStoreUtils.updateTableStatsFast(db, newt, wh, false, true, environmentContext);
-        }
-
-        if (cascade && isPartitionedTable) {
-          //Currently only column related changes can be cascaded in alter table
-          if(!MetaStoreUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols())) {
-            parts = msdb.getPartitions(dbname, name, -1);
-            for (Partition part : parts) {
-              List<FieldSchema> oldCols = part.getSd().getCols();
-              part.getSd().setCols(newt.getSd().getCols());
-              ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name,
-                  part.getValues(), oldCols, oldt, part);
-              assert(colStats == null);
+              String oldPartName = Warehouse.makePartName(oldt.getPartitionKeys(), part.getValues());
+              try {
+                //existing partition column stats is no longer valid, remove them
+                msdb.deletePartitionColumnStatistics(dbname, name, oldPartName, part.getValues(), null);
+              } catch (InvalidInputException iie) {
+                throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
+              }
               msdb.alterPartition(dbname, name, part.getValues(), part);
             }
-            msdb.alterTable(dbname, name, newt);
-          } else {
-            LOG.warn("Alter table does not cascade changes to its partitions.");
           }
-        } else {
-          alterTableUpdateTableColumnStats(msdb, oldt, newt);
         }
+      } else if (MetaStoreUtils.requireCalStats(hiveConf, null, null, newt, environmentContext) &&
+        (newt.getPartitionKeysSize() == 0)) {
+          Database db = msdb.getDatabase(newt.getDbName());
+          // Update table stats. For partitioned table, we update stats in
+          // alterPartition()
+          MetaStoreUtils.updateTableStatsFast(db, newt, wh, false, true, environmentContext);
       }
 
-      if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
-        MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                              EventMessage.EventType.ALTER_TABLE,
-                                              new AlterTableEvent(oldt, newt, false, true, handler),
-                                              environmentContext);
+      alterTableUpdateTableColumnStats(msdb, oldt, newt);
+      if (transactionalListeners != null && transactionalListeners.size() > 0) {
+        AlterTableEvent alterTableEvent = new AlterTableEvent(oldt, newt, true, handler);
+        alterTableEvent.setEnvironmentContext(environmentContext);
+        for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+          transactionalListener.onAlterTable(alterTableEvent);
+        }
       }
       // commit the changes
       success = msdb.commitTransaction();
@@ -314,11 +282,6 @@ public class HiveAlterHandler implements AlterHandler {
       throw new InvalidOperationException(
           "Unable to change partition or table."
               + " Check metastore logs for detailed stack." + e.getMessage());
-    } catch (InvalidInputException e) {
-        LOG.debug("Accessing Metastore failed due to invalid input ", e);
-        throw new InvalidOperationException(
-            "Unable to change partition or table."
-                + " Check metastore logs for detailed stack." + e.getMessage());
     } catch (NoSuchObjectException e) {
       LOG.debug("Object not found in metastore ", e);
       throw new InvalidOperationException(
@@ -326,23 +289,52 @@ public class HiveAlterHandler implements AlterHandler {
               + " Check metastore logs for detailed stack." + e.getMessage());
     } finally {
       if (!success) {
-        LOG.error("Failed to alter table " + dbname + "." + name);
         msdb.rollbackTransaction();
-        if (dataWasMoved) {
+      }
+
+      if (success && moveData) {
+        // change the file name in hdfs
+        // check that src exists otherwise there is no need to copy the data
+        // rename the src to destination
+        try {
+          if (srcFs.exists(srcPath) && !srcFs.rename(srcPath, destPath)) {
+            throw new IOException("Renaming " + srcPath + " to " + destPath + " failed");
+          }
+        } catch (IOException e) {
+          LOG.error("Alter Table operation for " + dbname + "." + name + " failed.", e);
+          boolean revertMetaDataTransaction = false;
           try {
-            if (destFs.exists(destPath)) {
-              if (!destFs.rename(destPath, srcPath)) {
-                LOG.error("Failed to restore data from " + destPath + " to " + srcPath
-                    + " in alter table failure. Manual restore is needed.");
-              }
+            msdb.openTransaction();
+            msdb.alterTable(newt.getDbName(), newt.getTableName(), oldt);
+            for (ObjectPair<Partition, String> pair : altps) {
+              Partition part = pair.getFirst();
+              part.getSd().setLocation(pair.getSecond());
+              msdb.alterPartition(newt.getDbName(), name, part.getValues(), part);
+            }
+            revertMetaDataTransaction = msdb.commitTransaction();
+          } catch (Exception e1) {
+            // we should log this for manual rollback by administrator
+            LOG.error("Reverting metadata by HDFS operation failure failed During HDFS operation failed", e1);
+            LOG.error("Table " + Warehouse.getQualifiedName(newt) +
+                " should be renamed to " + Warehouse.getQualifiedName(oldt));
+            LOG.error("Table " + Warehouse.getQualifiedName(newt) +
+                " should have path " + srcPath);
+            for (ObjectPair<Partition, String> pair : altps) {
+              LOG.error("Partition " + Warehouse.getQualifiedName(pair.getFirst()) +
+                  " should have path " + pair.getSecond());
+            }
+            if (!revertMetaDataTransaction) {
+              msdb.rollbackTransaction();
             }
-          } catch (IOException e) {
-            LOG.error("Failed to restore data from " + destPath + " to " + srcPath
-                +  " in alter table failure. Manual restore is needed.");
           }
+          throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name +
+            " failed to move data due to: '" + getSimpleMessage(e) + "' See hive log file for details.");
         }
       }
     }
+    if (!success) {
+      throw new MetaException("Committing the alter table transaction was not successful.");
+    }
   }
 
   /**
@@ -416,19 +408,15 @@ public class HiveAlterHandler implements AlterHandler {
           }
         }
 
-        // PartitionView does not have SD. We do not need update its column stats
-        if (oldPart.getSd() != null) {
-          updateOrGetPartitionColumnStats(msdb, dbname, name, new_part.getValues(),
-              oldPart.getSd().getCols(), tbl, new_part);
-        }
+        updatePartColumnStats(msdb, dbname, name, new_part.getValues(), new_part);
         msdb.alterPartition(dbname, name, new_part.getValues(), new_part);
-        if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                EventMessage.EventType.ALTER_PARTITION,
-                                                new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler),
-                                                environmentContext);
-
-
+        if (transactionalListeners != null && transactionalListeners.size() > 0) {
+          AlterPartitionEvent alterPartitionEvent =
+              new AlterPartitionEvent(oldPart, new_part, tbl, true, handler);
+          alterPartitionEvent.setEnvironmentContext(environmentContext);
+          for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+            transactionalListener.onAlterPartition(alterPartitionEvent);
+          }
         }
         success = msdb.commitTransaction();
       } catch (InvalidObjectException e) {
@@ -483,9 +471,8 @@ public class HiveAlterHandler implements AlterHandler {
         msdb.alterPartition(dbname, name, part_vals, new_part);
       } else {
         try {
-          // if tbl location is available use it
-          // else derive the tbl location from database location
-          destPath = wh.getPartitionPath(msdb.getDatabase(dbname), tbl, new_part.getValues());
+          destPath = new Path(wh.getTablePath(msdb.getDatabase(dbname), name),
+            Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues()));
           destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation()));
         } catch (NoSuchObjectException e) {
           LOG.debug("Didn't find object in metastore ", e);
@@ -541,11 +528,13 @@ public class HiveAlterHandler implements AlterHandler {
         }
       }
 
-      if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
-        MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                              EventMessage.EventType.ALTER_PARTITION,
-                                              new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler),
-                                              environmentContext);
+      if (transactionalListeners != null && transactionalListeners.size() > 0) {
+        AlterPartitionEvent alterPartitionEvent =
+            new AlterPartitionEvent(oldPart, new_part, tbl, true, handler);
+        alterPartitionEvent.setEnvironmentContext(environmentContext);
+        for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+          transactionalListener.onAlterPartition(alterPartitionEvent);
+        }
       }
 
       success = msdb.commitTransaction();
@@ -560,11 +549,11 @@ public class HiveAlterHandler implements AlterHandler {
           if (srcFs.exists(srcPath)) {
             //if destPath's parent path doesn't exist, we should mkdir it
             Path destParentPath = destPath.getParent();
-            if (!wh.mkdirs(destParentPath)) {
+            if (!wh.mkdirs(destParentPath, true)) {
                 throw new IOException("Unable to create path " + destParentPath);
             }
 
-            wh.renameDir(srcPath, destPath);
+            wh.renameDir(srcPath, destPath, true);
             LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done.");
           }
         } catch (IOException ex) {
@@ -574,11 +563,13 @@ public class HiveAlterHandler implements AlterHandler {
           try {
             msdb.openTransaction();
             msdb.alterPartition(dbname, name, new_part.getValues(), oldPart);
-            if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventMessage.EventType.ALTER_PARTITION,
-                                                    new AlterPartitionEvent(new_part, oldPart, tbl, false, success, handler),
-                                                    environmentContext);
+            if (transactionalListeners != null && transactionalListeners.size() > 0) {
+              AlterPartitionEvent alterPartitionEvent =
+                  new AlterPartitionEvent(new_part, oldPart, tbl, true, handler);
+              alterPartitionEvent.setEnvironmentContext(environmentContext);
+              for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+                transactionalListener.onAlterPartition(alterPartitionEvent);
+              }
             }
 
             revertMetaDataTransaction = msdb.commitTransaction();
@@ -649,12 +640,7 @@ public class HiveAlterHandler implements AlterHandler {
             MetaStoreUtils.updatePartitionStatsFast(tmpPart, wh, false, true, environmentContext);
           }
         }
-
-        // PartitionView does not have SD and we do not need to update its column stats
-        if (oldTmpPart.getSd() != null) {
-          updateOrGetPartitionColumnStats(msdb, dbname, name, oldTmpPart.getValues(),
-              oldTmpPart.getSd().getCols(), tbl, tmpPart);
-        }
+        updatePartColumnStats(msdb, dbname, name, oldTmpPart.getValues(), tmpPart);
       }
 
       msdb.alterPartitions(dbname, name, partValsList, new_parts);
@@ -668,10 +654,12 @@ public class HiveAlterHandler implements AlterHandler {
               "when invoking MetaStoreEventListener for alterPartitions event.");
         }
 
-        if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                EventMessage.EventType.ALTER_PARTITION,
-                                                new AlterPartitionEvent(oldPart, newPart, tbl, false, true, handler));
+        if (transactionalListeners != null && transactionalListeners.size() > 0) {
+          AlterPartitionEvent alterPartitionEvent =
+              new AlterPartitionEvent(oldPart, newPart, tbl, true, handler);
+          for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+            transactionalListener.onAlterPartition(alterPartitionEvent);
+          }
         }
       }
 
@@ -724,8 +712,91 @@ public class HiveAlterHandler implements AlterHandler {
         defaultNewPath.toUri().getPath());
   }
 
+  private void updatePartColumnStatsForAlterColumns(RawStore msdb, Partition oldPartition,
+      String oldPartName, List<String> partVals, List<FieldSchema> oldCols, Partition newPart)
+          throws MetaException, InvalidObjectException {
+    String dbName = oldPartition.getDbName();
+    String tableName = oldPartition.getTableName();
+    try {
+      List<String> oldPartNames = Lists.newArrayList(oldPartName);
+      List<String> oldColNames = new ArrayList<String>(oldCols.size());
+      for (FieldSchema oldCol : oldCols) {
+        oldColNames.add(oldCol.getName());
+      }
+      List<FieldSchema> newCols = newPart.getSd().getCols();
+      List<ColumnStatistics> partsColStats = msdb.getPartitionColumnStatistics(dbName, tableName,
+          oldPartNames, oldColNames);
+      assert (partsColStats.size() <= 1);
+      for (ColumnStatistics partColStats : partsColStats) { //actually only at most one loop
+        List<ColumnStatisticsObj> statsObjs = partColStats.getStatsObj();
+        List<String> deletedCols = new ArrayList<String>();
+        for (ColumnStatisticsObj statsObj : statsObjs) {
+          boolean found =false;
+          for (FieldSchema newCol : newCols) {
+            if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
+                && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
+              found = true;
+              break;
+            }
+          }
+          if (!found) {
+            msdb.deletePartitionColumnStatistics(dbName, tableName, oldPartName, partVals,
+                statsObj.getColName());
+            deletedCols.add(statsObj.getColName());
+          }
+        }
+        StatsSetupConst.removeColumnStatsState(newPart.getParameters(), deletedCols);
+      }
+    } catch (NoSuchObjectException nsoe) {
+      LOG.debug("Could not find db entry." + nsoe);
+      //ignore
+    } catch (InvalidInputException iie) {
+      throw new InvalidObjectException
+      ("Invalid input to update partition column stats in alter table change columns" + iie);
+    }
+  }
+
+  private void updatePartColumnStats(RawStore msdb, String dbName, String tableName,
+      List<String> partVals, Partition newPart) throws MetaException, InvalidObjectException {
+    dbName = HiveStringUtils.normalizeIdentifier(dbName);
+    tableName = HiveStringUtils.normalizeIdentifier(tableName);
+    String newDbName = HiveStringUtils.normalizeIdentifier(newPart.getDbName());
+    String newTableName = HiveStringUtils.normalizeIdentifier(newPart.getTableName());
+
+    Table oldTable = msdb.getTable(dbName, tableName);
+    if (oldTable == null) {
+      return;
+    }
+
+    try {
+      String oldPartName = Warehouse.makePartName(oldTable.getPartitionKeys(), partVals);
+      String newPartName = Warehouse.makePartName(oldTable.getPartitionKeys(), newPart.getValues());
+      if (!dbName.equals(newDbName) || !tableName.equals(newTableName)
+          || !oldPartName.equals(newPartName)) {
+        msdb.deletePartitionColumnStatistics(dbName, tableName, oldPartName, partVals, null);
+      } else {
+        Partition oldPartition = msdb.getPartition(dbName, tableName, partVals);
+        if (oldPartition == null) {
+          return;
+        }
+        if (oldPartition.getSd() != null && newPart.getSd() != null) {
+        List<FieldSchema> oldCols = oldPartition.getSd().getCols();
+          if (!MetaStoreUtils.columnsIncluded(oldCols, newPart.getSd().getCols())) {
+            updatePartColumnStatsForAlterColumns(msdb, oldPartition, oldPartName, partVals, oldCols, newPart);
+          }
+        }
+      }
+    } catch (NoSuchObjectException nsoe) {
+      LOG.debug("Could not find db entry." + nsoe);
+      //ignore
+    } catch (InvalidInputException iie) {
+      throw new InvalidObjectException("Invalid input to update partition column stats." + iie);
+    }
+  }
+
   @VisibleForTesting
-  void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable)
+  void alterTableUpdateTableColumnStats(RawStore msdb,
+      Table oldTable, Table newTable)
       throws MetaException, InvalidObjectException {
     String dbName = oldTable.getDbName().toLowerCase();
     String tableName = HiveStringUtils.normalizeIdentifier(oldTable.getTableName());
@@ -802,69 +873,4 @@ public class HiveAlterHandler implements AlterHandler {
       throw new InvalidObjectException("Invalid inputs to update table column stats: " + e);
     }
   }
-
-  private ColumnStatistics updateOrGetPartitionColumnStats(
-      RawStore msdb, String dbname, String tblname, List<String> partVals,
-      List<FieldSchema> oldCols, Table table, Partition part)
-          throws MetaException, InvalidObjectException {
-    ColumnStatistics newPartsColStats = null;
-    try {
-      List<FieldSchema> newCols = part.getSd() == null ?
-          new ArrayList<FieldSchema>() : part.getSd().getCols();
-      String oldPartName = Warehouse.makePartName(table.getPartitionKeys(), partVals);
-      String newPartName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
-      boolean rename = !part.getDbName().equals(dbname) || !part.getTableName().equals(tblname)
-          || !oldPartName.equals(newPartName);
-
-      // do not need to update column stats if alter partition is not for rename or changing existing columns
-      if (!rename && MetaStoreUtils.columnsIncluded(oldCols, newCols)) {
-        return newPartsColStats;
-      }
-      List<String> oldColNames = new ArrayList<String>(oldCols.size());
-      for (FieldSchema oldCol : oldCols) {
-        oldColNames.add(oldCol.getName());
-      }
-      List<String> oldPartNames = Lists.newArrayList(oldPartName);
-      List<ColumnStatistics> partsColStats = msdb.getPartitionColumnStatistics(dbname, tblname,
-          oldPartNames, oldColNames);
-      assert (partsColStats.size() <= 1);
-      for (ColumnStatistics partColStats : partsColStats) { //actually only at most one loop
-        List<ColumnStatisticsObj> newStatsObjs = new ArrayList<ColumnStatisticsObj>();
-        List<ColumnStatisticsObj> statsObjs = partColStats.getStatsObj();
-        List<String> deletedCols = new ArrayList<String>();
-        for (ColumnStatisticsObj statsObj : statsObjs) {
-          boolean found =false;
-          for (FieldSchema newCol : newCols) {
-            if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
-                && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
-              found = true;
-              break;
-            }
-          }
-          if (found) {
-            if (rename) {
-              msdb.deletePartitionColumnStatistics(dbname, tblname, partColStats.getStatsDesc().getPartName(),
-                  partVals, statsObj.getColName());
-              newStatsObjs.add(statsObj);
-            }
-          } else {
-            msdb.deletePartitionColumnStatistics(dbname, tblname, partColStats.getStatsDesc().getPartName(),
-                partVals, statsObj.getColName());
-            deletedCols.add(statsObj.getColName());
-          }
-        }
-        StatsSetupConst.removeColumnStatsState(part.getParameters(), deletedCols);
-        if (!newStatsObjs.isEmpty()) {
-          partColStats.setStatsObj(newStatsObjs);
-          newPartsColStats = partColStats;
-        }
-      }
-    } catch (NoSuchObjectException nsoe) {
-      // ignore this exception, actually this exception won't be thrown from getPartitionColumnStatistics
-    } catch (InvalidInputException iie) {
-      throw new InvalidObjectException("Invalid input to delete partition column stats." + iie);
-    }
-
-    return newPartsColStats;
-  }
 }


[34/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
new file mode 100644
index 0000000..2ae9cc0
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
@@ -0,0 +1,792 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.security;
+
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.Assert;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * This test the flag 'hive.warehouse.subdir.inherit.perms'.
+ */
+public abstract class FolderPermissionBase {
+  protected static HiveConf conf;
+  protected static Driver driver;
+  protected static String dataFileDir;
+  protected static Path dataFilePath;
+  protected static FileSystem fs;
+
+  protected static Path warehouseDir;
+  protected static Path baseDfsDir;
+
+  protected static final PathFilter hiddenFileFilter = new PathFilter(){
+    public boolean accept(Path p){
+      String name = p.getName();
+      return !name.startsWith("_") && !name.startsWith(".");
+    }
+  };
+
+
+  public abstract void setPermission(String locn, int permIndex) throws Exception;
+
+  public abstract void verifyPermission(String locn, int permIndex) throws Exception;
+
+
+  public void setPermission(String locn) throws Exception {
+    setPermission(locn, 0);
+  }
+
+  public void verifyPermission(String locn) throws Exception {
+    verifyPermission(locn, 0);
+  }
+
+
+  public static void baseSetup() throws Exception {
+    MiniDFSShim dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
+    fs = dfs.getFileSystem();
+    baseDfsDir =  new Path(new Path(fs.getUri()), "/base");
+    fs.mkdirs(baseDfsDir);
+    warehouseDir = new Path(baseDfsDir, "warehouse");
+    fs.mkdirs(warehouseDir);
+    conf.setVar(ConfVars.METASTOREWAREHOUSE, warehouseDir.toString());
+
+    // Assuming the tests are run either in C or D drive in Windows OS!
+    dataFileDir = conf.get("test.data.files").replace('\\', '/')
+        .replace("c:", "").replace("C:", "").replace("D:", "").replace("d:", "");
+    dataFilePath = new Path(dataFileDir, "kv1.txt");
+
+    // Set up scratch directory
+    Path scratchDir = new Path(baseDfsDir, "scratchdir");
+    conf.setVar(HiveConf.ConfVars.SCRATCHDIR, scratchDir.toString());
+
+    //set hive conf vars
+    conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true);
+    conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
+    int port = MetaStoreUtils.findFreePort();
+    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+
+    SessionState.start(new CliSessionState(conf));
+    driver = new Driver(conf);
+    setupDataTable();
+  }
+
+
+  private static void setupDataTable() throws Exception {
+    CommandProcessorResponse ret = driver.run("DROP TABLE IF EXISTS mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    ret = driver.run("CREATE TABLE mysrc (key STRING, value STRING) PARTITIONED BY (part1 string, part2 string) STORED AS TEXTFILE");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='1',part2='1')");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='2',part2='2')");
+    Assert.assertEquals(0,ret.getResponseCode());
+  }
+
+  @Before
+  public void setupBeforeTest() throws Exception {
+    driver.run("USE default");
+  }
+
+  @Test
+  public void testCreateDb() throws Exception {
+    //see if db inherits permission from warehouse directory.
+    String testDb = "mydb";
+    String tableName = "createtable";
+
+    setPermission(warehouseDir.toString());
+    verifyPermission(warehouseDir.toString());
+
+    CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + testDb + ".db");
+    verifyPermission(warehouseDir + "/" + testDb + ".db");
+
+    ret = driver.run("USE " + testDb);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+
+    ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
+    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) {
+      verifyPermission(child);
+    }
+
+    ret = driver.run("USE default");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    //cleanup after the test.
+    fs.delete(warehouseDir, true);
+    fs.mkdirs(warehouseDir);
+    Assert.assertEquals(listStatus(warehouseDir.toString()).size(), 0);
+    setupDataTable();
+  }
+
+  @Test
+  public void testCreateTable() throws Exception {
+    String testDb = "mydb2";
+    String tableName = "createtable";
+    CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + testDb + ".db");
+    setPermission(warehouseDir + "/" + testDb + ".db");
+    verifyPermission(warehouseDir + "/" + testDb + ".db");
+
+    ret = driver.run("USE " + testDb);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+
+    ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
+    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) {
+      verifyPermission(child);
+    }
+
+    ret = driver.run("USE default");
+    Assert.assertEquals(0,ret.getResponseCode());
+  }
+
+
+  @Test
+  public void testInsertNonPartTable() throws Exception {
+    //case 1 is non-partitioned table.
+    String tableName = "nonpart";
+
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    String tableLoc = warehouseDir + "/" + tableName;
+    assertExistence(warehouseDir + "/" + tableName);
+
+    //case1A: insert into non-partitioned table.
+    setPermission(warehouseDir + "/" + tableName);
+    ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + tableName);
+    Assert.assertTrue(listStatus(tableLoc).size() > 0);
+    for (String child : listStatus(tableLoc)) {
+      verifyPermission(child);
+    }
+
+    //case1B: insert overwrite non-partitioned-table
+    setPermission(warehouseDir + "/" + tableName, 1);
+    ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + tableName, 1);
+    Assert.assertTrue(listStatus(tableLoc).size() > 0);
+    for (String child : listStatus(tableLoc)) {
+      verifyPermission(child, 1);
+    }
+  }
+
+  @Test
+  public void testInsertStaticSinglePartition() throws Exception {
+    String tableName = "singlestaticpart";
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName);
+    setPermission(warehouseDir + "/" + tableName);
+
+    //insert into test
+    ret = driver.run("insert into table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + tableName);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=1");
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) {
+      verifyPermission(child);
+    }
+
+    //insert overwrite test
+    setPermission(warehouseDir + "/" + tableName, 1);
+    setPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
+    ret = driver.run("insert overwrite table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + tableName, 1);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) {
+      verifyPermission(child, 1);
+    }
+  }
+
+  @Test
+  public void testInsertStaticDualPartition() throws Exception {
+    String tableName = "dualstaticpart";
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName);
+    setPermission(warehouseDir + "/" + tableName);
+
+    //insert into test
+    ret = driver.run("insert into table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + tableName);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=1");
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1");
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
+      verifyPermission(child);
+    }
+
+    //insert overwrite test
+    setPermission(warehouseDir + "/" + tableName, 1);
+    setPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
+    setPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1);
+
+    ret = driver.run("insert overwrite table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + tableName, 1);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1);
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
+      verifyPermission(child, 1);
+    }
+  }
+
+  @Test
+  public void testInsertDualDynamicPartitions() throws Exception {
+    String tableName = "dualdynamicpart";
+
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
+    Assert.assertEquals(0, ret.getResponseCode());
+    assertExistence(warehouseDir + "/" + tableName);
+
+    //Insert into test, with permission set 0.
+    setPermission(warehouseDir + "/" + tableName, 0);
+    ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyDualPartitionTable(warehouseDir + "/" + tableName, 0);
+
+    //Insert overwrite test, with permission set 1.  We need reset existing partitions to 1 since the permissions
+    //should be inherited from existing partition
+    setDualPartitionTable(warehouseDir + "/" + tableName, 1);
+    ret = driver.run("insert overwrite table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyDualPartitionTable(warehouseDir + "/" + tableName, 1);
+  }
+
+  @Test
+  public void testInsertSingleDynamicPartition() throws Exception {
+    String tableName = "singledynamicpart";
+
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)");
+    Assert.assertEquals(0,ret.getResponseCode());
+    String tableLoc = warehouseDir + "/" + tableName;
+    assertExistence(tableLoc);
+
+    //Insert into test, with permission set 0.
+    setPermission(tableLoc, 0);
+    ret = driver.run("insert into table " + tableName + " partition (part1) select key,value,part1 from mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+    verifySinglePartition(tableLoc, 0);
+
+    //Insert overwrite test, with permission set 1. We need reset existing partitions to 1 since the permissions
+    //should be inherited from existing partition
+    setSinglePartition(tableLoc, 1);
+    ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+    verifySinglePartition(tableLoc, 1);
+
+    //delete and re-insert using insert overwrite.  There's different code paths insert vs insert overwrite for new tables.
+    ret = driver.run("DROP TABLE " + tableName);
+    Assert.assertEquals(0, ret.getResponseCode());
+    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName);
+    setPermission(warehouseDir + "/" + tableName);
+
+    ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifySinglePartition(tableLoc, 0);
+  }
+
+  @Test
+  public void testPartition() throws Exception {
+    String tableName = "alterpart";
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName);
+    setPermission(warehouseDir + "/" + tableName);
+
+    ret = driver.run("insert into table " + tableName + " partition(part1='1',part2='1',part3='1') select key,value from mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName);
+    setPermission(warehouseDir + "/" + tableName, 1);
+
+    //alter partition
+    ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=2", 1);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2", 1);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2", 1);
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) {
+      verifyPermission(child, 1);
+    }
+
+    String tableName2 = "alterpart2";
+    ret = driver.run("CREATE TABLE " + tableName2 + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName2);
+    setPermission(warehouseDir + "/" + tableName2);
+    ret = driver.run("alter table " + tableName2 + " exchange partition (part1='2',part2='2',part3='2') with table " + tableName);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    //alter exchange can not change base table's permission
+    //alter exchange can only control final partition folder's permission
+    verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2", 0);
+    verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2", 0);
+    verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2/part3=2", 1);
+  }
+
+  @Test
+  public void testExternalTable() throws Exception {
+    String tableName = "externaltable";
+
+    String myLocation = warehouseDir + "/myfolder";
+    FileSystem fs = FileSystem.get(new URI(myLocation), conf);
+    fs.mkdirs(new Path(myLocation));
+    setPermission(myLocation);
+
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) LOCATION '" + myLocation + "'");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    Assert.assertTrue(listStatus(myLocation).size() > 0);
+    for (String child : listStatus(myLocation)) {
+      verifyPermission(child);
+    }
+  }
+
+  @Test
+  public void testLoadLocal() throws Exception {
+    //case 1 is non-partitioned table.
+    String tableName = "loadlocal";
+
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    String tableLoc = warehouseDir + "/" + tableName;
+    assertExistence(warehouseDir + "/" + tableName);
+
+    //case1A: load data local into non-partitioned table.
+    setPermission(warehouseDir + "/" + tableName);
+
+    ret = driver.run("load data local inpath '" + dataFilePath + "' into table " + tableName);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    Assert.assertTrue(listStatus(tableLoc).size() > 0);
+    for (String child : listStatus(tableLoc)) {
+      verifyPermission(child);
+    }
+
+    //case1B: load data local into overwrite non-partitioned-table
+    setPermission(warehouseDir + "/" + tableName, 1);
+    for (String child : listStatus(tableLoc)) {
+      setPermission(child, 1);
+    }
+    ret = driver.run("load data local inpath '" + dataFilePath + "' overwrite into table " + tableName);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    Assert.assertTrue(listStatus(tableLoc).size() > 0);
+    for (String child : listStatus(tableLoc)) {
+      verifyPermission(child, 1);
+    }
+
+    //case 2 is partitioned table.
+    tableName = "loadlocalpartition";
+
+    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)");
+    Assert.assertEquals(0,ret.getResponseCode());
+    tableLoc = warehouseDir + "/" + tableName;
+    assertExistence(tableLoc);
+
+    //case 2A: load data local into partitioned table.
+    setPermission(tableLoc);
+    ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1";
+    Assert.assertTrue(listStatus(partLoc).size() > 0);
+    for (String child : listStatus(partLoc)) {
+      verifyPermission(child);
+    }
+
+    //case 2B: insert data overwrite into partitioned table. set testing table/partition folder hierarchy 1
+    //local load overwrite just overwrite the existing partition content but not the permission
+    setPermission(tableLoc, 1);
+    setPermission(partLoc, 1);
+    for (String child : listStatus(partLoc)) {
+      setPermission(child, 1);
+    }
+    ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    Assert.assertTrue(listStatus(tableLoc).size() > 0);
+    for (String child : listStatus(partLoc)) {
+      verifyPermission(child, 1);
+    }
+  }
+
+  @Test
+  public void testLoad() throws Exception {
+    String tableName = "load";
+    String location = "/hdfsPath";
+    fs.copyFromLocalFile(dataFilePath, new Path(location));
+
+    //case 1: load data
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+    Assert.assertEquals(0,ret.getResponseCode());
+    String tableLoc = warehouseDir + "/" + tableName;
+    assertExistence(warehouseDir + "/" + tableName);
+
+    //case1A: load data into non-partitioned table.
+    setPermission(warehouseDir + "/" + tableName);
+
+    ret = driver.run("load data inpath '" + location + "' into table " + tableName);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    Assert.assertTrue(listStatus(tableLoc).size() > 0);
+    for (String child : listStatus(tableLoc)) {
+      verifyPermission(child);
+    }
+
+    //case1B: load data into overwrite non-partitioned-table
+    setPermission(warehouseDir + "/" + tableName, 1);
+    for (String child : listStatus(tableLoc)) {
+      setPermission(child, 1);
+    }
+
+    fs.copyFromLocalFile(dataFilePath, new Path(location));
+    ret = driver.run("load data inpath '" + location + "' overwrite into table " + tableName);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    Assert.assertTrue(listStatus(tableLoc).size() > 0);
+    for (String child : listStatus(tableLoc)) {
+      verifyPermission(child, 1);
+    }
+
+    //case 2 is partitioned table.
+    tableName = "loadpartition";
+
+    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)");
+    Assert.assertEquals(0,ret.getResponseCode());
+    tableLoc = warehouseDir + "/" + tableName;
+    assertExistence(tableLoc);
+
+    //case 2A: load data into partitioned table.
+    setPermission(tableLoc);
+    fs.copyFromLocalFile(dataFilePath, new Path(location));
+    ret = driver.run("LOAD DATA INPATH '" + location + "' INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1";
+    Assert.assertTrue(listStatus(partLoc).size() > 0);
+    for (String child : listStatus(partLoc)) {
+      verifyPermission(child);
+    }
+
+    //case 2B: insert data overwrite into partitioned table. set testing table/partition folder hierarchy 1
+    //load overwrite just overwrite the existing partition content but not the permission
+    setPermission(tableLoc, 1);
+    setPermission(partLoc, 1);
+    Assert.assertTrue(listStatus(partLoc).size() > 0);
+    for (String child : listStatus(partLoc)) {
+      setPermission(child, 1);
+    }
+
+    fs.copyFromLocalFile(dataFilePath, new Path(location));
+    ret = driver.run("LOAD DATA INPATH '" + location + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    Assert.assertTrue(listStatus(tableLoc).size() > 0);
+    for (String child : listStatus(partLoc)) {
+      verifyPermission(child, 1);
+    }
+  }
+
+  @Test
+  public void testCtas() throws Exception {
+    String testDb = "ctasdb";
+    String tableName = "createtable";
+    CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + testDb + ".db");
+    setPermission(warehouseDir + "/" + testDb + ".db");
+    verifyPermission(warehouseDir + "/" + testDb + ".db");
+
+    ret = driver.run("USE " + testDb);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    ret = driver.run("create table " + tableName + " as select key,value from default.mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
+    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) {
+      verifyPermission(child);
+    }
+
+    ret = driver.run("USE default");
+    Assert.assertEquals(0,ret.getResponseCode());
+  }
+
+  @Test
+  public void testExim() throws Exception {
+
+    //export the table to external file.
+    String myLocation = warehouseDir + "/exim";
+    FileSystem fs = FileSystem.get(new URI(myLocation), conf);
+    fs.mkdirs(new Path(myLocation));
+    setPermission(myLocation);
+    myLocation = myLocation + "/temp";
+
+    CommandProcessorResponse ret = driver.run("export table mysrc to '" + myLocation + "'");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    //check if exported data has inherited the permissions.
+    assertExistence(myLocation);
+    verifyPermission(myLocation);
+
+    assertExistence(myLocation + "/part1=1/part2=1");
+    verifyPermission(myLocation + "/part1=1/part2=1");
+    Assert.assertTrue(listStatus(myLocation + "/part1=1/part2=1").size() > 0);
+    for (String child : listStatus(myLocation + "/part1=1/part2=1")) {
+      verifyPermission(child);
+    }
+
+    assertExistence(myLocation + "/part1=2/part2=2");
+    verifyPermission(myLocation + "/part1=2/part2=2");
+    Assert.assertTrue(listStatus(myLocation + "/part1=2/part2=2").size() > 0);
+    for (String child : listStatus(myLocation + "/part1=2/part2=2")) {
+      verifyPermission(child);
+    }
+
+    //import the table back into another database
+    String testDb = "eximdb";
+    ret = driver.run("CREATE DATABASE " + testDb);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    //use another permission for this import location, to verify that it is really set (permIndex=2)
+    assertExistence(warehouseDir + "/" + testDb + ".db");
+    setPermission(warehouseDir + "/" + testDb + ".db", 1);
+
+    ret = driver.run("USE " + testDb);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    ret = driver.run("import from '" + myLocation + "'");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    //check permissions of imported, from the exported table
+    assertExistence(warehouseDir + "/" + testDb + ".db/mysrc");
+    verifyPermission(warehouseDir + "/" + testDb + ".db/mysrc", 1);
+
+    myLocation = warehouseDir + "/" + testDb + ".db/mysrc";
+    assertExistence(myLocation);
+    verifyPermission(myLocation, 1);
+
+    assertExistence(myLocation + "/part1=1/part2=1");
+    verifyPermission(myLocation + "/part1=1/part2=1", 1);
+    Assert.assertTrue(listStatus(myLocation + "/part1=1/part2=1").size() > 0);
+    for (String child : listStatus(myLocation + "/part1=1/part2=1")) {
+      verifyPermission(child, 1);
+    }
+
+    assertExistence(myLocation + "/part1=2/part2=2");
+    verifyPermission(myLocation + "/part1=2/part2=2", 1);
+    Assert.assertTrue(listStatus(myLocation + "/part1=2/part2=2").size() > 0);
+    for (String child : listStatus(myLocation + "/part1=2/part2=2")) {
+      verifyPermission(child, 1);
+    }
+  }
+
+  /**
+   * Tests the permission to the table doesn't change after the truncation
+   * @throws Exception
+   */
+  @Test
+  public void testTruncateTable() throws Exception {
+    String tableName = "truncatetable";
+    String partition = warehouseDir + "/" + tableName + "/part1=1";
+
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key STRING, value STRING) PARTITIONED BY (part1 INT)");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    setPermission(warehouseDir + "/" + tableName);
+
+    ret = driver.run("insert into table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName);
+
+    verifyPermission(warehouseDir + "/" + tableName);
+    verifyPermission(partition);
+
+    ret = driver.run("TRUNCATE TABLE " + tableName);
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName);
+    verifyPermission(warehouseDir + "/" + tableName);
+
+    ret = driver.run("insert into table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + tableName);
+
+    assertExistence(partition);
+    verifyPermission(partition);    
+
+    // Also test the partition folder if the partition is truncated
+    ret = driver.run("TRUNCATE TABLE " + tableName + " partition(part1='1')");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    assertExistence(partition);
+    verifyPermission(partition);
+  }
+
+  private void setSinglePartition(String tableLoc, int index) throws Exception {
+    setPermission(tableLoc + "/part1=1", index);
+    setPermission(tableLoc + "/part1=2", index);
+  }
+
+  private void verifySinglePartition(String tableLoc, int index) throws Exception {
+    verifyPermission(tableLoc + "/part1=1", index);
+    verifyPermission(tableLoc + "/part1=2", index);
+
+    Assert.assertTrue(listStatus(tableLoc + "/part1=1").size() > 0);
+    for (String child : listStatus(tableLoc + "/part1=1")) {
+      verifyPermission(child, index);
+    }
+
+    Assert.assertTrue(listStatus(tableLoc + "/part1=2").size() > 0);
+    for (String child : listStatus(tableLoc + "/part1=2")) {
+      verifyPermission(child, index);
+    }
+  }
+
+  private void setDualPartitionTable(String baseTablePath, int index) throws Exception {
+    setPermission(baseTablePath, index);
+    setPermission(baseTablePath + "/part1=1", index);
+    setPermission(baseTablePath + "/part1=1/part2=1", index);
+
+    setPermission(baseTablePath + "/part1=2", index);
+    setPermission(baseTablePath + "/part1=2/part2=2", index);
+  }
+
+  private void verifyDualPartitionTable(String baseTablePath, int index) throws Exception {
+    verifyPermission(baseTablePath, index);
+    verifyPermission(baseTablePath + "/part1=1", index);
+    verifyPermission(baseTablePath + "/part1=1/part2=1", index);
+
+    verifyPermission(baseTablePath + "/part1=2", index);
+    verifyPermission(baseTablePath + "/part1=2/part2=2", index);
+
+    Assert.assertTrue(listStatus(baseTablePath + "/part1=1/part2=1").size() > 0);
+    for (String child : listStatus(baseTablePath + "/part1=1/part2=1")) {
+      verifyPermission(child, index);
+    }
+
+    Assert.assertTrue(listStatus(baseTablePath + "/part1=2/part2=2").size() > 0);
+    for (String child : listStatus(baseTablePath + "/part1=2/part2=2")) {
+      verifyPermission(child, index);
+    }
+  }
+
+  private void assertExistence(String locn) throws Exception {
+    Assert.assertTrue(fs.exists(new Path(locn)));
+  }
+
+  private List<String> listStatus(String locn) throws Exception {
+    List<String> results = new ArrayList<String>();
+    FileStatus[] listStatus = fs.listStatus(new Path(locn), hiddenFileFilter);
+    for (FileStatus status : listStatus) {
+      results.add(status.getPath().toString());
+    }
+    return results;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java
new file mode 100644
index 0000000..6cc2d18
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.security;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestFolderPermissions extends FolderPermissionBase {
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    conf = new HiveConf(TestFolderPermissions.class);
+    conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
+    baseSetup();
+  }
+
+  public FsPermission[] expected = new FsPermission[] {
+     FsPermission.createImmutable((short) 0777),
+     FsPermission.createImmutable((short) 0766)
+  };
+
+  @Override
+  public void setPermission(String locn, int permIndex) throws Exception {
+    fs.setPermission(new Path(locn), expected[permIndex]);
+  }
+
+  @Override
+  public void verifyPermission(String locn, int permIndex) throws Exception {
+    FsPermission actual =  fs.getFileStatus(new Path(locn)).getPermission();
+    Assert.assertEquals(expected[permIndex], actual);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java
new file mode 100644
index 0000000..bb65ee7
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.security;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim;
+import org.apache.hadoop.hive.shims.Utils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test cases focusing on drop table permission checks
+ */
+public class TestStorageBasedMetastoreAuthorizationDrops extends StorageBasedMetastoreTestBase {
+
+  protected static MiniDFSShim dfs = null;
+
+  @Override
+  protected HiveConf createHiveConf() throws Exception {
+    // Hadoop FS ACLs do not work with LocalFileSystem, so set up MiniDFS.
+    HiveConf conf = super.createHiveConf();
+
+    String currentUserName = Utils.getUGI().getShortUserName();
+    conf.set("hadoop.proxyuser." + currentUserName + ".groups", "*");
+    conf.set("hadoop.proxyuser." + currentUserName + ".hosts", "*");
+    dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
+    FileSystem fs = dfs.getFileSystem();
+
+    Path warehouseDir = new Path(new Path(fs.getUri()), "/warehouse");
+    fs.mkdirs(warehouseDir);
+    conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehouseDir.toString());
+    conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true);
+
+    // Set up scratch directory
+    Path scratchDir = new Path(new Path(fs.getUri()), "/scratchdir");
+    conf.setVar(HiveConf.ConfVars.SCRATCHDIR, scratchDir.toString());
+
+    return conf;
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+
+    if (dfs != null) {
+      dfs.shutdown();
+      dfs = null;
+    }
+  }
+
+  @Test
+  public void testDropDatabase() throws Exception {
+    dropDatabaseByOtherUser("-rwxrwxrwx", 0);
+    dropDatabaseByOtherUser("-rwxrwxrwt", 1);
+  }
+
+  /**
+   * Creates db and tries to drop as 'other' user
+   * @param perm - permission for warehouse dir
+   * @param expectedRet - expected return code for drop by other user
+   * @throws Exception
+   */
+  public void dropDatabaseByOtherUser(String perm, int expectedRet) throws Exception {
+    String dbName = getTestDbName();
+    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), perm);
+
+    CommandProcessorResponse resp = driver.run("create database " + dbName);
+    Assert.assertEquals(0, resp.getResponseCode());
+    Database db = msc.getDatabase(dbName);
+    validateCreateDb(db, dbName);
+
+    InjectableDummyAuthenticator.injectMode(true);
+
+
+    resp = driver.run("drop database " + dbName);
+    Assert.assertEquals(expectedRet, resp.getResponseCode());
+
+  }
+
+  @Test
+  public void testDropTable() throws Exception {
+    dropTableByOtherUser("-rwxrwxrwx", 0);
+    dropTableByOtherUser("-rwxrwxrwt", 1);
+  }
+
+  /**
+   * @param perm dir permission for database dir
+   * @param expectedRet expected return code on drop table
+   * @throws Exception
+   */
+  public void dropTableByOtherUser(String perm, int expectedRet) throws Exception {
+    String dbName = getTestDbName();
+    String tblName = getTestTableName();
+    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx");
+
+    CommandProcessorResponse resp = driver.run("create database " + dbName);
+    Assert.assertEquals(0, resp.getResponseCode());
+    Database db = msc.getDatabase(dbName);
+    validateCreateDb(db, dbName);
+
+    setPermissions(db.getLocationUri(), perm);
+
+    String dbDotTable = dbName + "." + tblName;
+    resp = driver.run("create table " + dbDotTable + "(i int)");
+    Assert.assertEquals(0, resp.getResponseCode());
+
+
+    InjectableDummyAuthenticator.injectMode(true);
+    resp = driver.run("drop table " + dbDotTable);
+    Assert.assertEquals(expectedRet, resp.getResponseCode());
+  }
+
+  /**
+   * Drop view should not be blocked by SBA. View will not have any location to drop.
+   * @throws Exception
+   */
+  @Test
+  public void testDropView() throws Exception {
+    String dbName = getTestDbName();
+    String tblName = getTestTableName();
+    String viewName = "view" + tblName;
+    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx");
+
+    CommandProcessorResponse resp = driver.run("create database " + dbName);
+    Assert.assertEquals(0, resp.getResponseCode());
+    Database db = msc.getDatabase(dbName);
+    validateCreateDb(db, dbName);
+
+    setPermissions(db.getLocationUri(), "-rwxrwxrwt");
+
+    String dbDotTable = dbName + "." + tblName;
+    resp = driver.run("create table " + dbDotTable + "(i int)");
+    Assert.assertEquals(0, resp.getResponseCode());
+
+    String dbDotView = dbName + "." + viewName;
+    resp = driver.run("create view " + dbDotView + " as select * from " +  dbDotTable);
+    Assert.assertEquals(0, resp.getResponseCode());
+
+    resp = driver.run("drop view " + dbDotView);
+    Assert.assertEquals(0, resp.getResponseCode());
+
+    resp = driver.run("drop table " + dbDotTable);
+    Assert.assertEquals(0, resp.getResponseCode());
+  }
+
+  @Test
+  public void testDropPartition() throws Exception {
+    dropPartitionByOtherUser("-rwxrwxrwx", 0);
+    dropPartitionByOtherUser("-rwxrwxrwt", 1);
+  }
+
+  /**
+   * @param perm permissions for table dir
+   * @param expectedRet expected return code
+   * @throws Exception
+   */
+  public void dropPartitionByOtherUser(String perm, int expectedRet) throws Exception {
+    String dbName = getTestDbName();
+    String tblName = getTestTableName();
+    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx");
+
+    CommandProcessorResponse resp = driver.run("create database " + dbName);
+    Assert.assertEquals(0, resp.getResponseCode());
+    Database db = msc.getDatabase(dbName);
+    validateCreateDb(db, dbName);
+    setPermissions(db.getLocationUri(), "-rwxrwxrwx");
+
+    String dbDotTable = dbName + "." + tblName;
+    resp = driver.run("create table " + dbDotTable + "(i int) partitioned by (b string)");
+    Assert.assertEquals(0, resp.getResponseCode());
+    Table tab = msc.getTable(dbName, tblName);
+    setPermissions(tab.getSd().getLocation(), perm);
+
+    resp = driver.run("alter table " + dbDotTable + " add partition (b='2011')");
+    Assert.assertEquals(0, resp.getResponseCode());
+
+    InjectableDummyAuthenticator.injectMode(true);
+    resp = driver.run("alter table " + dbDotTable + " drop partition (b='2011')");
+    Assert.assertEquals(expectedRet, resp.getResponseCode());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java
new file mode 100644
index 0000000..ea631d2
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.security;
+
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.CommandNeedRetryException;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test cases focusing on read table permission checks
+ */
+public class TestStorageBasedMetastoreAuthorizationReads extends StorageBasedMetastoreTestBase {
+
+  @Test
+  public void testReadTableSuccess() throws Exception {
+    readTableByOtherUser("-rwxrwxrwx", true);
+  }
+
+  @Test
+  public void testReadTableSuccessWithReadOnly() throws Exception {
+    readTableByOtherUser("-r--r--r--", true);
+  }
+
+  @Test
+  public void testReadTableFailure() throws Exception {
+    readTableByOtherUser("-rwxrwx---", false);
+  }
+
+  /**
+   * @param perm dir permission for table dir
+   * @param isSuccess if command was successful
+   * @throws Exception
+   */
+  private void readTableByOtherUser(String perm, boolean isSuccess) throws Exception {
+    String dbName = getTestDbName();
+    String tblName = getTestTableName();
+    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx");
+
+    CommandProcessorResponse resp = driver.run("create database " + dbName);
+    Assert.assertEquals(0, resp.getResponseCode());
+    Database db = msc.getDatabase(dbName);
+    validateCreateDb(db, dbName);
+
+    setPermissions(db.getLocationUri(), "-rwxrwxrwx");
+
+    String dbDotTable = dbName + "." + tblName;
+    resp = driver.run("create table " + dbDotTable + "(i int) partitioned by (`date` string)");
+    Assert.assertEquals(0, resp.getResponseCode());
+    Table tab = msc.getTable(dbName, tblName);
+    setPermissions(tab.getSd().getLocation(), perm);
+
+    InjectableDummyAuthenticator.injectMode(true);
+
+    testCmd(driver, "DESCRIBE  " + dbDotTable, isSuccess);
+    testCmd(driver, "DESCRIBE EXTENDED  " + dbDotTable, isSuccess);
+    testCmd(driver, "SHOW PARTITIONS  " + dbDotTable, isSuccess);
+    testCmd(driver, "SHOW COLUMNS IN " + tblName + " IN " + dbName, isSuccess);
+    testCmd(driver, "use " + dbName, true);
+    testCmd(driver, "SHOW TABLE EXTENDED LIKE " + tblName, isSuccess);
+
+  }
+
+  @Test
+  public void testReadDbSuccess() throws Exception {
+    readDbByOtherUser("-rwxrwxrwx", true);
+  }
+
+  @Test
+  public void testReadDbFailure() throws Exception {
+    readDbByOtherUser("-rwxrwx---", false);
+  }
+
+
+  /**
+   * @param perm dir permission for database dir
+   * @param isSuccess if command was successful
+   * @throws Exception
+   */
+  private void readDbByOtherUser(String perm, boolean isSuccess) throws Exception {
+    String dbName = getTestDbName();
+    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), perm);
+
+    CommandProcessorResponse resp = driver.run("create database " + dbName);
+    Assert.assertEquals(0, resp.getResponseCode());
+    Database db = msc.getDatabase(dbName);
+    validateCreateDb(db, dbName);
+    setPermissions(db.getLocationUri(), perm);
+
+    InjectableDummyAuthenticator.injectMode(true);
+
+    testCmd(driver, "DESCRIBE DATABASE " + dbName, isSuccess);
+    testCmd(driver, "DESCRIBE DATABASE EXTENDED " + dbName, isSuccess);
+    testCmd(driver, "SHOW TABLES IN " + dbName, isSuccess);
+    driver.run("use " + dbName);
+    testCmd(driver, "SHOW TABLES ", isSuccess);
+
+  }
+
+  private void testCmd(Driver driver, String cmd, boolean isSuccess)
+      throws CommandNeedRetryException {
+    CommandProcessorResponse resp = driver.run(cmd);
+    Assert.assertEquals(isSuccess, resp.getResponseCode() == 0);
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index e0c05bd..66ed8ca 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -659,17 +659,17 @@ public class TestCompactor {
       Path resultFile = null;
       for (int i = 0; i < names.length; i++) {
         names[i] = stat[i].getPath().getName();
-        if (names[i].equals("delta_0000003_0000006")) {
+        if (names[i].equals("delta_0000001_0000004")) {
           resultFile = stat[i].getPath();
         }
       }
       Arrays.sort(names);
-      String[] expected = new String[]{"delta_0000003_0000004",
-          "delta_0000003_0000006", "delta_0000005_0000006", "delta_0000007_0000008"};
+      String[] expected = new String[]{"delta_0000001_0000002",
+          "delta_0000001_0000004", "delta_0000003_0000004", "delta_0000005_0000006"};
       if (!Arrays.deepEquals(expected, names)) {
         Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names));
       }
-      checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty,  0, 3L, 6L);
+      checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty,  0, 1L, 4L);
 
     } finally {
       connection.close();
@@ -718,11 +718,11 @@ public class TestCompactor {
       FileStatus[] stat =
           fs.listStatus(new Path(table.getSd().getLocation()), AcidUtils.baseFileFilter);
       if (1 != stat.length) {
-        Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat));
+        Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat));
       }
       String name = stat[0].getPath().getName();
-      Assert.assertEquals(name, "base_0000006");
-      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L);
+      Assert.assertEquals(name, "base_0000004");
+      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L);
     } finally {
       connection.close();
     }
@@ -778,17 +778,17 @@ public class TestCompactor {
       Path resultDelta = null;
       for (int i = 0; i < names.length; i++) {
         names[i] = stat[i].getPath().getName();
-        if (names[i].equals("delta_0000003_0000006")) {
+        if (names[i].equals("delta_0000001_0000004")) {
           resultDelta = stat[i].getPath();
         }
       }
       Arrays.sort(names);
-      String[] expected = new String[]{"delta_0000003_0000004",
-          "delta_0000003_0000006", "delta_0000005_0000006"};
+      String[] expected = new String[]{"delta_0000001_0000002",
+          "delta_0000001_0000004", "delta_0000003_0000004"};
       if (!Arrays.deepEquals(expected, names)) {
         Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names));
       }
-      checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 6L);
+      checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 4L);
     } finally {
       connection.close();
     }
@@ -844,13 +844,13 @@ public class TestCompactor {
         Assert.fail("majorCompactAfterAbort FileStatus[] stat " + Arrays.toString(stat));
       }
       if (1 != stat.length) {
-        Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat));
+        Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat));
       }
       String name = stat[0].getPath().getName();
-      if (!name.equals("base_0000006")) {
-        Assert.fail("majorCompactAfterAbort name " + name + " not equals to base_0000006");
+      if (!name.equals("base_0000004")) {
+        Assert.fail("majorCompactAfterAbort name " + name + " not equals to base_0000004");
       }
-      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L);
+      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L);
     } finally {
       connection.close();
     }
@@ -899,11 +899,11 @@ public class TestCompactor {
       FileStatus[] stat =
           fs.listStatus(new Path(table.getSd().getLocation()), AcidUtils.baseFileFilter);
       if (1 != stat.length) {
-        Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat));
+        Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat));
       }
       String name = stat[0].getPath().getName();
-      Assert.assertEquals(name, "base_0000006");
-      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L);
+      Assert.assertEquals(name, "base_0000004");
+      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L);
     } finally {
       connection.close();
     }
@@ -923,18 +923,18 @@ public class TestCompactor {
         " STORED AS ORC  TBLPROPERTIES ('transactional'='true',"
         + "'transactional_properties'='default')", driver);
 
-    // Insert some data -> this will generate only insert deltas and no delete deltas: delta_3_3
+    // Insert some data -> this will generate only insert deltas and no delete deltas: delta_1_1
     executeStatementOnDriver("INSERT INTO " + tblName +"(a,b) VALUES(1, 'foo')", driver);
 
-    // Insert some data -> this will again generate only insert deltas and no delete deltas: delta_4_4
+    // Insert some data -> this will again generate only insert deltas and no delete deltas: delta_2_2
     executeStatementOnDriver("INSERT INTO " + tblName +"(a,b) VALUES(2, 'bar')", driver);
 
-    // Delete some data -> this will generate only delete deltas and no insert deltas: delete_delta_5_5
+    // Delete some data -> this will generate only delete deltas and no insert deltas: delete_delta_3_3
     executeStatementOnDriver("DELETE FROM " + tblName +" WHERE a = 2", driver);
 
     // Now, compact -> Compaction produces a single range for both delta and delete delta
-    // That is, both delta and delete_deltas would be compacted into delta_3_5 and delete_delta_3_5
-    // even though there are only two delta_3_3, delta_4_4 and one delete_delta_5_5.
+    // That is, both delta and delete_deltas would be compacted into delta_1_3 and delete_delta_1_3
+    // even though there are only two delta_1_1, delta_2_2 and one delete_delta_3_3.
     TxnStore txnHandler = TxnUtils.getTxnStore(conf);
     txnHandler.compact(new CompactionRequest(dbName, tblName, CompactionType.MINOR));
     Worker t = new Worker();
@@ -957,16 +957,16 @@ public class TestCompactor {
     Path minorCompactedDelta = null;
     for (int i = 0; i < deltas.length; i++) {
       deltas[i] = stat[i].getPath().getName();
-      if (deltas[i].equals("delta_0000003_0000005")) {
+      if (deltas[i].equals("delta_0000001_0000003")) {
         minorCompactedDelta = stat[i].getPath();
       }
     }
     Arrays.sort(deltas);
-    String[] expectedDeltas = new String[]{"delta_0000003_0000003_0000", "delta_0000003_0000005", "delta_0000004_0000004_0000"};
+    String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000003", "delta_0000002_0000002_0000"};
     if (!Arrays.deepEquals(expectedDeltas, deltas)) {
       Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas));
     }
-    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 4L);
+    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 2L);
 
     // Verify that we have got correct set of delete_deltas.
     FileStatus[] deleteDeltaStat =
@@ -975,16 +975,16 @@ public class TestCompactor {
     Path minorCompactedDeleteDelta = null;
     for (int i = 0; i < deleteDeltas.length; i++) {
       deleteDeltas[i] = deleteDeltaStat[i].getPath().getName();
-      if (deleteDeltas[i].equals("delete_delta_0000003_0000005")) {
+      if (deleteDeltas[i].equals("delete_delta_0000001_0000003")) {
         minorCompactedDeleteDelta = deleteDeltaStat[i].getPath();
       }
     }
     Arrays.sort(deleteDeltas);
-    String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000005", "delete_delta_0000005_0000005_0000"};
+    String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000003", "delete_delta_0000003_0000003_0000"};
     if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) {
       Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas));
     }
-    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDeleteDelta}, columnNamesProperty, columnTypesProperty, 0, 4L, 4L);
+    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDeleteDelta}, columnNamesProperty, columnTypesProperty, 0, 2L, 2L);
   }
 
   @Test
@@ -1034,16 +1034,16 @@ public class TestCompactor {
     Path minorCompactedDelta = null;
     for (int i = 0; i < deltas.length; i++) {
       deltas[i] = stat[i].getPath().getName();
-      if (deltas[i].equals("delta_0000003_0000004")) {
+      if (deltas[i].equals("delta_0000001_0000002")) {
         minorCompactedDelta = stat[i].getPath();
       }
     }
     Arrays.sort(deltas);
-    String[] expectedDeltas = new String[]{"delta_0000003_0000003_0000", "delta_0000003_0000004", "delta_0000004_0000004_0000"};
+    String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000002", "delta_0000002_0000002_0000"};
     if (!Arrays.deepEquals(expectedDeltas, deltas)) {
       Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas));
     }
-    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 4L);
+    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 2L);
 
     // Verify that we have got correct set of delete_deltas.
     FileStatus[] deleteDeltaStat =
@@ -1052,12 +1052,12 @@ public class TestCompactor {
     Path minorCompactedDeleteDelta = null;
     for (int i = 0; i < deleteDeltas.length; i++) {
       deleteDeltas[i] = deleteDeltaStat[i].getPath().getName();
-      if (deleteDeltas[i].equals("delete_delta_0000003_0000004")) {
+      if (deleteDeltas[i].equals("delete_delta_0000001_0000002")) {
         minorCompactedDeleteDelta = deleteDeltaStat[i].getPath();
       }
     }
     Arrays.sort(deleteDeltas);
-    String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000004"};
+    String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000002"};
     if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) {
       Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas));
     }
@@ -1111,17 +1111,17 @@ public class TestCompactor {
       Path resultFile = null;
       for (int i = 0; i < names.length; i++) {
         names[i] = stat[i].getPath().getName();
-        if (names[i].equals("delta_0000003_0000006")) {
+        if (names[i].equals("delta_0000001_0000004")) {
           resultFile = stat[i].getPath();
         }
       }
       Arrays.sort(names);
-      String[] expected = new String[]{"delta_0000003_0000004",
-          "delta_0000003_0000006", "delta_0000005_0000006", "delta_0000007_0000008"};
+      String[] expected = new String[]{"delta_0000001_0000002",
+          "delta_0000001_0000004", "delta_0000003_0000004", "delta_0000005_0000006"};
       if (!Arrays.deepEquals(expected, names)) {
         Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names));
       }
-      checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty,  0, 3L, 6L);
+      checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty,  0, 1L, 4L);
 
       // Verify that we have got correct set of delete_deltas also
       FileStatus[] deleteDeltaStat =
@@ -1130,12 +1130,12 @@ public class TestCompactor {
       Path minorCompactedDeleteDelta = null;
       for (int i = 0; i < deleteDeltas.length; i++) {
         deleteDeltas[i] = deleteDeltaStat[i].getPath().getName();
-        if (deleteDeltas[i].equals("delete_delta_0000003_0000006")) {
+        if (deleteDeltas[i].equals("delete_delta_0000001_0000004")) {
           minorCompactedDeleteDelta = deleteDeltaStat[i].getPath();
         }
       }
       Arrays.sort(deleteDeltas);
-      String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000006"};
+      String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000004"};
       if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) {
         Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas));
       }
@@ -1332,16 +1332,6 @@ public class TestCompactor {
       public boolean isValidBase(long txnid) {
         return true;
       }
-
-      @Override
-      public boolean isTxnAborted(long txnid) {
-        return true;
-      }
-
-      @Override
-      public RangeResponse isTxnRangeAborted(long minTxnId, long maxTxnId) {
-        return RangeResponse.ALL;
-      }
     };
 
     OrcInputFormat aif = new OrcInputFormat();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
index 75f46ec..650c4b7 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
@@ -1069,17 +1069,4 @@ public class TestBeeLineWithArgs {
       this.shouldMatch = shouldMatch;
     }
   }
-
-  /**
-   * Test that Beeline can handle \\ characters within a string literal. Either at the beginning, middle, or end of the
-   * literal.
-   */
-  @Test
-  public void testBackslashInLiteral() throws Throwable {
-    String SCRIPT_TEXT = "select 'hello\\\\', '\\\\hello', 'hel\\\\lo', '\\\\' as literal;";
-    final String EXPECTED_PATTERN = "hello\\\\\t\\\\hello\thel\\\\lo\t\\\\";
-    List<String> argList = getBaseArgs(miniHS2.getBaseJdbcURL());
-    argList.add("--outputformat=tsv2");
-    testScriptFile(SCRIPT_TEXT, argList, EXPECTED_PATTERN, true);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
index 604c234..22630b9 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
@@ -36,7 +36,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaException;
 import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hive.beeline.HiveSchemaHelper.NestedScriptParser;
 import org.apache.hive.beeline.HiveSchemaHelper.PostgresCommandParser;
 
@@ -58,10 +57,6 @@ public class TestSchemaTool extends TestCase {
     hiveConf = new HiveConf(this.getClass());
     schemaTool = new HiveSchemaTool(
         System.getProperty("test.tmp.dir", "target/tmp"), hiveConf, "derby");
-    schemaTool.setUserName(
-        schemaTool.getHiveConf().get(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME.varname));
-    schemaTool.setPassWord(ShimLoader.getHadoopShims().getPassword(schemaTool.getHiveConf(),
-          HiveConf.ConfVars.METASTOREPWD.varname));
     System.setProperty("beeLine.system.exit", "true");
     errStream = System.err;
     outStream = System.out;
@@ -125,8 +120,8 @@ public class TestSchemaTool extends TestCase {
     boolean isValid = (boolean)schemaTool.validateSchemaTables(conn);
     assertTrue(isValid);
 
-    // upgrade from 2.0.0 schema and re-validate
-    schemaTool.doUpgrade("2.0.0");
+    // upgrade to 2.2.0 schema and re-validate
+    schemaTool.doUpgrade("2.2.0");
     isValid = (boolean)schemaTool.validateSchemaTables(conn);
     assertTrue(isValid);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 6e9223a..4a82aa5 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.hive.ql.processors.DfsProcessor;
 import org.apache.hive.common.util.HiveVersionInfo;
 import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
-import org.apache.hive.service.cli.HiveSQLException;
 import org.apache.hive.service.cli.operation.ClassicTableTypeMapping;
 import org.apache.hive.service.cli.operation.ClassicTableTypeMapping.ClassicTableTypes;
 import org.apache.hive.service.cli.operation.HiveTableTypeMapping;
@@ -578,7 +577,7 @@ public class TestJdbcDriver2 {
 
   @Test
   public void testSetOnConnection() throws Exception {
-    Connection connection = getConnection(testDbName + "?conf1=conf2;conf3=conf4#var1=var2;var3=var4");
+    Connection connection = getConnection("test?conf1=conf2;conf3=conf4#var1=var2;var3=var4");
     try {
       verifyConfValue(connection, "conf1", "conf2");
       verifyConfValue(connection, "conf3", "conf4");
@@ -1084,45 +1083,6 @@ public class TestJdbcDriver2 {
   }
 
   @Test
-  public void testShowTablesInDb() throws SQLException {
-    Statement stmt = con.createStatement();
-    assertNotNull("Statement is null", stmt);
-
-    String tableNameInDbUnique = tableName + "_unique";
-    // create a table with a unique name in testDb
-    stmt.execute("drop table if exists " + testDbName + "." + tableNameInDbUnique);
-    stmt.execute("create table " + testDbName + "." + tableNameInDbUnique
-        + " (under_col int comment 'the under column', value string) comment '" + tableComment
-        + "'");
-
-    ResultSet res = stmt.executeQuery("show tables in " + testDbName);
-
-    boolean testTableExists = false;
-    while (res.next()) {
-      assertNotNull("table name is null in result set", res.getString(1));
-      if (tableNameInDbUnique.equalsIgnoreCase(res.getString(1))) {
-        testTableExists = true;
-      }
-    }
-    assertTrue("table name " + tableNameInDbUnique
-        + " not found in SHOW TABLES result set", testTableExists);
-    stmt.execute("drop table if exists " + testDbName + "." + tableNameInDbUnique);
-    stmt.close();
-  }
-
-  @Test
-  public void testInvalidShowTables() throws SQLException {
-    Statement stmt = con.createStatement();
-    assertNotNull("Statement is null", stmt);
-
-    //show tables <dbname> is in invalid show tables syntax. Hive does not return
-    //any tables in this case
-    ResultSet res = stmt.executeQuery("show tables " + testDbName);
-    assertFalse(res.next());
-    stmt.close();
-  }
-
-  @Test
   public void testMetaDataGetTables() throws SQLException {
     getTablesTest(ImmutableSet.of(ClassicTableTypes.TABLE.toString()),
         ClassicTableTypes.VIEW.toString());
@@ -2923,10 +2883,4 @@ public class TestJdbcDriver2 {
     assertEquals(rowCount, dataFileRowCount);
     stmt.execute("drop table " + tblName);
   }
-
-  // Test that opening a JDBC connection to a non-existent database throws a HiveSQLException
-  @Test(expected = HiveSQLException.class)
-  public void testConnectInvalidDatabase() throws SQLException {
-    DriverManager.getConnection("jdbc:hive2:///databasedoesnotexist", "", "");
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index fc2cb08..afe23f8 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -112,14 +112,6 @@ public class TestJdbcWithMiniHS2 {
     stmt.execute("drop database if exists " + testDbName + " cascade");
     stmt.execute("create database " + testDbName);
     stmt.close();
-
-    try {
-      openTestConnections();
-    } catch (Exception e) {
-      System.out.println("Unable to open default connections to MiniHS2: " + e);
-      throw e;
-    }
-
     // tables in test db
     createTestTables(conTestDb, testDbName);
   }
@@ -191,7 +183,6 @@ public class TestJdbcWithMiniHS2 {
     HiveConf conf = new HiveConf();
     startMiniHS2(conf);
     openDefaultConnections();
-    openTestConnections();
   }
 
   private static void startMiniHS2(HiveConf conf) throws Exception {
@@ -217,9 +208,6 @@ public class TestJdbcWithMiniHS2 {
 
   private static void openDefaultConnections() throws Exception {
     conDefault = getConnection();
-  }
-
-  private static void openTestConnections() throws Exception {
     conTestDb = getConnection(testDbName);
   }
 
@@ -978,38 +966,6 @@ public class TestJdbcWithMiniHS2 {
   }
 
   /**
-   * Test for jdbc driver retry on NoHttpResponseException
-   * @throws Exception
-   */
-  @Test
-  public void testHttpRetryOnServerIdleTimeout() throws Exception {
-    // Stop HiveServer2
-    stopMiniHS2();
-    HiveConf conf = new HiveConf();
-    conf.set("hive.server2.transport.mode", "http");
-    // Set server's idle timeout to a very low value
-    conf.set("hive.server2.thrift.http.max.idle.time", "5");
-    startMiniHS2(conf);
-    String userName = System.getProperty("user.name");
-    Connection conn = getConnection(miniHS2.getJdbcURL(testDbName), userName, "password");
-    Statement stmt = conn.createStatement();
-    stmt.execute("select from_unixtime(unix_timestamp())");
-    // Sleep for longer than server's idletimeout and execute a query
-    TimeUnit.SECONDS.sleep(10);
-    try {
-      stmt.execute("select from_unixtime(unix_timestamp())");
-    } catch (Exception e) {
-      fail("Not expecting exception: " + e);
-    } finally {
-      if (conn != null) {
-        conn.close();
-      }
-    }
-    // Restore original state
-    restoreMiniHS2AndConnections();
-  }
-
-  /**
    * Tests that DataNucleus' NucleusContext.classLoaderResolverMap clears cached class objects
    * (& hence doesn't leak classloaders) on closing any session
    *

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
index d227275..0a53259 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
@@ -17,12 +17,12 @@
  */
 
 package org.apache.hive.jdbc;
-
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.File;
+import java.net.URLEncoder;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -35,10 +35,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-
 import org.apache.hive.jdbc.miniHS2.MiniHS2;
-import org.hadoop.hive.jdbc.SSLTestUtils;
-
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -47,13 +44,10 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 public class TestSSL {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestSSL.class);
   private static final String LOCALHOST_KEY_STORE_NAME = "keystore.jks";
   private static final String EXAMPLEDOTCOM_KEY_STORE_NAME = "keystore_exampledotcom.jks";
@@ -61,12 +55,19 @@ public class TestSSL {
   private static final String KEY_STORE_TRUST_STORE_PASSWORD = "HiveJdbc";
   private static final String JAVA_TRUST_STORE_PROP = "javax.net.ssl.trustStore";
   private static final String JAVA_TRUST_STORE_PASS_PROP = "javax.net.ssl.trustStorePassword";
+  private static final String HS2_BINARY_MODE = "binary";
+  private static final String HS2_HTTP_MODE = "http";
+  private static final String HS2_HTTP_ENDPOINT = "cliservice";
+  private static final String HS2_BINARY_AUTH_MODE = "NONE";
 
   private MiniHS2 miniHS2 = null;
   private static HiveConf conf = new HiveConf();
   private Connection hs2Conn = null;
-  private String dataFileDir = SSLTestUtils.getDataFileDir();
+  private String dataFileDir = conf.get("test.data.files");
   private Map<String, String> confOverlay;
+  private final String SSL_CONN_PARAMS = "ssl=true;sslTrustStore="
+      + URLEncoder.encode(dataFileDir + File.separator + TRUST_STORE_NAME) + ";trustStorePassword="
+      + KEY_STORE_TRUST_STORE_PASSWORD;
 
   @BeforeClass
   public static void beforeTest() throws Exception {
@@ -82,6 +83,10 @@ public class TestSSL {
   @Before
   public void setUp() throws Exception {
     DriverManager.setLoginTimeout(0);
+    if (!System.getProperty("test.data.files", "").isEmpty()) {
+      dataFileDir = System.getProperty("test.data.files");
+    }
+    dataFileDir = dataFileDir.replace('\\', '/').replace("c:", "");
     miniHS2 = new MiniHS2.Builder().withConf(conf).cleanupLocalDirOnStartup(false).build();
     confOverlay = new HashMap<String, String>();
   }
@@ -122,9 +127,9 @@ public class TestSSL {
     // we depend on linux openssl exit codes
     Assume.assumeTrue(System.getProperty("os.name").toLowerCase().contains("linux"));
 
-    SSLTestUtils.setSslConfOverlay(confOverlay);
+    setSslConfOverlay(confOverlay);
     // Test in binary mode
-    SSLTestUtils.setBinaryConfOverlay(confOverlay);
+    setBinaryConfOverlay(confOverlay);
     // Start HS2 with SSL
     miniHS2.start(confOverlay);
 
@@ -141,7 +146,7 @@ public class TestSSL {
     miniHS2.stop();
 
     // Test in http mode
-    SSLTestUtils.setHttpConfOverlay(confOverlay);
+    setHttpConfOverlay(confOverlay);
     miniHS2.start(confOverlay);
     // make SSL connection
     try {
@@ -170,13 +175,13 @@ public class TestSSL {
    */
   @Test
   public void testInvalidConfig() throws Exception {
-    SSLTestUtils.clearSslConfOverlay(confOverlay);
+    clearSslConfOverlay(confOverlay);
     // Test in binary mode
-    SSLTestUtils.setBinaryConfOverlay(confOverlay);
+    setBinaryConfOverlay(confOverlay);
     miniHS2.start(confOverlay);
     DriverManager.setLoginTimeout(4);
     try {
-      hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSLTestUtils.SSL_CONN_PARAMS),
+      hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSL_CONN_PARAMS),
           System.getProperty("user.name"), "bar");
       fail("SSL connection should fail with NON-SSL server");
     } catch (SQLException e) {
@@ -199,10 +204,10 @@ public class TestSSL {
     // Test in http mode with ssl properties specified in url
     System.clearProperty(JAVA_TRUST_STORE_PROP);
     System.clearProperty(JAVA_TRUST_STORE_PASS_PROP);
-    SSLTestUtils.setHttpConfOverlay(confOverlay);
+    setHttpConfOverlay(confOverlay);
     miniHS2.start(confOverlay);
     try {
-      hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSLTestUtils.SSL_CONN_PARAMS),
+      hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSL_CONN_PARAMS),
           System.getProperty("user.name"), "bar");
       fail("SSL connection should fail with NON-SSL server");
     } catch (SQLException e) {
@@ -218,9 +223,9 @@ public class TestSSL {
    */
   @Test
   public void testConnectionMismatch() throws Exception {
-    SSLTestUtils.setSslConfOverlay(confOverlay);
+    setSslConfOverlay(confOverlay);
     // Test in binary mode
-    SSLTestUtils.setBinaryConfOverlay(confOverlay);
+    setBinaryConfOverlay(confOverlay);
     miniHS2.start(confOverlay);
     // Start HS2 with SSL
     try {
@@ -242,7 +247,7 @@ public class TestSSL {
     miniHS2.stop();
 
     // Test in http mode
-    SSLTestUtils.setHttpConfOverlay(confOverlay);
+    setHttpConfOverlay(confOverlay);
     miniHS2.start(confOverlay);
     try {
       hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", ";ssl=false"),
@@ -261,23 +266,23 @@ public class TestSSL {
    */
   @Test
   public void testSSLConnectionWithURL() throws Exception {
-    SSLTestUtils.setSslConfOverlay(confOverlay);
+    setSslConfOverlay(confOverlay);
     // Test in binary mode
-    SSLTestUtils.setBinaryConfOverlay(confOverlay);
+    setBinaryConfOverlay(confOverlay);
     // Start HS2 with SSL
     miniHS2.start(confOverlay);
 
     // make SSL connection
-    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSLTestUtils.SSL_CONN_PARAMS),
+    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSL_CONN_PARAMS),
         System.getProperty("user.name"), "bar");
     hs2Conn.close();
     miniHS2.stop();
 
     // Test in http mode
-    SSLTestUtils.setHttpConfOverlay(confOverlay);
+    setHttpConfOverlay(confOverlay);
     miniHS2.start(confOverlay);
     // make SSL connection
-    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSLTestUtils.SSL_CONN_PARAMS),
+    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSL_CONN_PARAMS),
         System.getProperty("user.name"), "bar");
     hs2Conn.close();
   }
@@ -288,9 +293,9 @@ public class TestSSL {
    */
   @Test
   public void testSSLConnectionWithProperty() throws Exception {
-    SSLTestUtils.setSslConfOverlay(confOverlay);
+    setSslConfOverlay(confOverlay);
     // Test in binary mode
-    SSLTestUtils.setBinaryConfOverlay(confOverlay);
+    setBinaryConfOverlay(confOverlay);
     // Start HS2 with SSL
     miniHS2.start(confOverlay);
 
@@ -303,10 +308,10 @@ public class TestSSL {
     miniHS2.stop();
 
     // Test in http mode
-    SSLTestUtils.setHttpConfOverlay(confOverlay);
+    setHttpConfOverlay(confOverlay);
     miniHS2.start(confOverlay);
     // make SSL connection
-    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default",SSLTestUtils.SSL_CONN_PARAMS),
+    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSL_CONN_PARAMS),
         System.getProperty("user.name"), "bar");
     hs2Conn.close();
   }
@@ -317,9 +322,9 @@ public class TestSSL {
    */
   @Test
   public void testSSLFetch() throws Exception {
-    SSLTestUtils.setSslConfOverlay(confOverlay);
+    setSslConfOverlay(confOverlay);
     // Test in binary mode
-    SSLTestUtils.setBinaryConfOverlay(confOverlay);
+    setBinaryConfOverlay(confOverlay);
     // Start HS2 with SSL
     miniHS2.start(confOverlay);
 
@@ -327,11 +332,11 @@ public class TestSSL {
     Path dataFilePath = new Path(dataFileDir, "kv1.txt");
 
     // make SSL connection
-    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSLTestUtils.SSL_CONN_PARAMS),
+    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSL_CONN_PARAMS),
         System.getProperty("user.name"), "bar");
 
     // Set up test data
-    SSLTestUtils.setupTestTableWithData(tableName, dataFilePath, hs2Conn);
+    setupTestTableWithData(tableName, dataFilePath, hs2Conn);
 
     Statement stmt = hs2Conn.createStatement();
     ResultSet res = stmt.executeQuery("SELECT * FROM " + tableName);
@@ -352,20 +357,20 @@ public class TestSSL {
    */
   @Test
   public void testSSLFetchHttp() throws Exception {
-    SSLTestUtils.setSslConfOverlay(confOverlay);
+    setSslConfOverlay(confOverlay);
     // Test in http mode
-    SSLTestUtils.setHttpConfOverlay(confOverlay);
+    setHttpConfOverlay(confOverlay);
     miniHS2.start(confOverlay);
 
     String tableName = "sslTab";
     Path dataFilePath = new Path(dataFileDir, "kv1.txt");
 
     // make SSL connection
-    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSLTestUtils.SSL_CONN_PARAMS),
+    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSL_CONN_PARAMS),
         System.getProperty("user.name"), "bar");
 
     // Set up test data
-    SSLTestUtils.setupTestTableWithData(tableName, dataFilePath, hs2Conn);
+    setupTestTableWithData(tableName, dataFilePath, hs2Conn);
     Statement stmt = hs2Conn.createStatement();
     ResultSet res = stmt.executeQuery("SELECT * FROM " + tableName);
     int rowCount = 0;
@@ -388,16 +393,16 @@ public class TestSSL {
   @Test
   public void testConnectionWrongCertCN() throws Exception {
     // This call sets the default ssl params including the correct keystore in the server config
-    SSLTestUtils.setSslConfOverlay(confOverlay);
+    setSslConfOverlay(confOverlay);
     // Replace default keystore with keystore for www.example.com
     confOverlay.put(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname, dataFileDir + File.separator
         + EXAMPLEDOTCOM_KEY_STORE_NAME);
     // Binary (TCP) mode
-    SSLTestUtils.setBinaryConfOverlay(confOverlay);
+    setBinaryConfOverlay(confOverlay);
     miniHS2.start(confOverlay);
     try {
       hs2Conn =
-          DriverManager.getConnection(miniHS2.getJdbcURL("default", SSLTestUtils.SSL_CONN_PARAMS),
+          DriverManager.getConnection(miniHS2.getJdbcURL("default", SSL_CONN_PARAMS),
               System.getProperty("user.name"), "bar");
       fail("SSL connection, with the server providing wrong certifcate (with CN www.example.com, "
           + "instead of localhost), should fail");
@@ -410,11 +415,11 @@ public class TestSSL {
     miniHS2.stop();
 
     // Http mode
-    SSLTestUtils.setHttpConfOverlay(confOverlay);
+    setHttpConfOverlay(confOverlay);
     miniHS2.start(confOverlay);
     try {
       hs2Conn =
-          DriverManager.getConnection(miniHS2.getJdbcURL("default", SSLTestUtils.SSL_CONN_PARAMS),
+          DriverManager.getConnection(miniHS2.getJdbcURL("default", SSL_CONN_PARAMS),
               System.getProperty("user.name"), "bar");
       fail("SSL connection, with the server providing wrong certifcate (with CN www.example.com, "
           + "instead of localhost), should fail");
@@ -434,10 +439,10 @@ public class TestSSL {
    */
   @Test
   public void testMetastoreWithSSL() throws Exception {
-    SSLTestUtils.setMetastoreSslConf(conf);
-    SSLTestUtils.setSslConfOverlay(confOverlay);
+    setMetastoreSslConf(conf);
+    setSslConfOverlay(confOverlay);
     // Test in http mode
-    SSLTestUtils.setHttpConfOverlay(confOverlay);
+    setHttpConfOverlay(confOverlay);
     miniHS2 = new MiniHS2.Builder().withRemoteMetastore().withConf(conf).cleanupLocalDirOnStartup(false).build();
     miniHS2.start(confOverlay);
 
@@ -445,11 +450,11 @@ public class TestSSL {
     Path dataFilePath = new Path(dataFileDir, "kv1.txt");
 
     // make SSL connection
-    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSLTestUtils.SSL_CONN_PARAMS),
+    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL("default", SSL_CONN_PARAMS),
         System.getProperty("user.name"), "bar");
 
     // Set up test data
-    SSLTestUtils.setupTestTableWithData(tableName, dataFilePath, hs2Conn);
+    setupTestTableWithData(tableName, dataFilePath, hs2Conn);
     Statement stmt = hs2Conn.createStatement();
     ResultSet res = stmt.executeQuery("SELECT * FROM " + tableName);
     int rowCount = 0;
@@ -469,7 +474,7 @@ public class TestSSL {
    */
   @Test
   public void testMetastoreConnectionWrongCertCN() throws Exception {
-    SSLTestUtils.setMetastoreSslConf(conf);
+    setMetastoreSslConf(conf);
     conf.setVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH,
         dataFileDir + File.separator +  EXAMPLEDOTCOM_KEY_STORE_NAME);
     miniHS2 = new MiniHS2.Builder().withRemoteMetastore().withConf(conf).cleanupLocalDirOnStartup(false).build();
@@ -481,4 +486,55 @@ public class TestSSL {
 
     miniHS2.stop();
   }
+
+  private void setupTestTableWithData(String tableName, Path dataFilePath,
+      Connection hs2Conn) throws Exception {
+    Statement stmt = hs2Conn.createStatement();
+    stmt.execute("set hive.support.concurrency = false");
+
+    stmt.execute("drop table if exists " + tableName);
+    stmt.execute("create table " + tableName
+        + " (under_col int comment 'the under column', value string)");
+
+    // load data
+    stmt.execute("load data local inpath '"
+        + dataFilePath.toString() + "' into table " + tableName);
+    stmt.close();
+  }
+
+  private void setSslConfOverlay(Map<String, String> confOverlay) {
+    confOverlay.put(ConfVars.HIVE_SERVER2_USE_SSL.varname, "true");
+    confOverlay.put(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname,
+        dataFileDir + File.separator +  LOCALHOST_KEY_STORE_NAME);
+    confOverlay.put(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname,
+        KEY_STORE_TRUST_STORE_PASSWORD);
+  }
+
+  private void setMetastoreSslConf(HiveConf conf) {
+    conf.setBoolVar(ConfVars.HIVE_METASTORE_USE_SSL, true);
+    conf.setVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH,
+        dataFileDir + File.separator +  LOCALHOST_KEY_STORE_NAME);
+    conf.setVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD,
+        KEY_STORE_TRUST_STORE_PASSWORD);
+    conf.setVar(ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH,
+        dataFileDir + File.separator +  TRUST_STORE_NAME);
+    conf.setVar(ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD,
+        KEY_STORE_TRUST_STORE_PASSWORD);
+  }
+
+  private void clearSslConfOverlay(Map<String, String> confOverlay) {
+    confOverlay.put(ConfVars.HIVE_SERVER2_USE_SSL.varname, "false");
+  }
+
+  private void setHttpConfOverlay(Map<String, String> confOverlay) {
+    confOverlay.put(ConfVars.HIVE_SERVER2_TRANSPORT_MODE.varname, HS2_HTTP_MODE);
+    confOverlay.put(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH.varname, HS2_HTTP_ENDPOINT);
+    confOverlay.put(ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "true");
+  }
+
+  private void setBinaryConfOverlay(Map<String, String> confOverlay) {
+    confOverlay.put(ConfVars.HIVE_SERVER2_TRANSPORT_MODE.varname, HS2_BINARY_MODE);
+    confOverlay.put(ConfVars.HIVE_SERVER2_AUTHENTICATION.varname,  HS2_BINARY_AUTH_MODE);
+    confOverlay.put(ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "true");
+  }
 }


[13/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 3670de1..c8b0d4c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.metastore;
 import static org.apache.commons.lang.StringUtils.join;
 import static org.apache.commons.lang.StringUtils.repeat;
 
-import java.sql.Clob;
 import java.sql.Connection;
 import java.sql.Statement;
 import java.sql.SQLException;
@@ -61,8 +60,6 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.cache.CacheUtils;
-import org.apache.hadoop.hive.metastore.cache.CachedStore;
 import org.apache.hadoop.hive.metastore.model.MConstraint;
 import org.apache.hadoop.hive.metastore.model.MDatabase;
 import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
@@ -81,7 +78,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
 /**
  * This class contains the optimizations for MetaStore that rely on direct SQL access to
@@ -652,7 +648,7 @@ class MetaStoreDirectSql {
     loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
       @Override
       public void apply(StorageDescriptor t, Object[] fields) {
-        t.putToParameters((String)fields[1], extractSqlClob(fields[2]));
+        t.putToParameters((String)fields[1], (String)fields[2]);
       }});
     // Perform conversion of null map values
     for (StorageDescriptor t : sds.values()) {
@@ -783,7 +779,7 @@ class MetaStoreDirectSql {
       loopJoinOrderedResult(colss, queryText, 0, new ApplyFunc<List<FieldSchema>>() {
         @Override
         public void apply(List<FieldSchema> t, Object[] fields) {
-          t.add(new FieldSchema((String)fields[2], extractSqlClob(fields[3]), (String)fields[1]));
+          t.add(new FieldSchema((String)fields[2], (String)fields[3], (String)fields[1]));
         }});
     }
 
@@ -794,7 +790,7 @@ class MetaStoreDirectSql {
     loopJoinOrderedResult(serdes, queryText, 0, new ApplyFunc<SerDeInfo>() {
       @Override
       public void apply(SerDeInfo t, Object[] fields) {
-        t.putToParameters((String)fields[1], extractSqlClob(fields[2]));
+        t.putToParameters((String)fields[1], (String)fields[2]);
       }});
     // Perform conversion of null map values
     for (SerDeInfo t : serdes.values()) {
@@ -882,21 +878,6 @@ class MetaStoreDirectSql {
     return ((Number) obj).doubleValue();
   }
 
-  private String extractSqlClob(Object value) {
-    if (value == null) return null;
-    try {
-      if (value instanceof Clob) {
-        // we trim the Clob value to a max length an int can hold
-        int maxLength = (((Clob)value).length() < Integer.MAX_VALUE - 2) ? (int)((Clob)value).length() : Integer.MAX_VALUE - 2;
-        return ((Clob)value).getSubString(1L, maxLength);
-      } else {
-        return value.toString();
-      }
-    } catch (SQLException sqle) {
-      return null;
-    }
-  }
-
   private static String trimCommaList(StringBuilder sb) {
     if (sb.length() > 0) {
       sb.setLength(sb.length() - 1);
@@ -1209,7 +1190,7 @@ class MetaStoreDirectSql {
   }
 
   public AggrStats aggrColStatsForPartitions(String dbName, String tableName,
-      List<String> partNames, List<String> colNames, boolean useDensityFunctionForNDVEstimation, double  ndvTuner)
+      List<String> partNames, List<String> colNames, boolean useDensityFunctionForNDVEstimation)
       throws MetaException {
     if (colNames.isEmpty() || partNames.isEmpty()) {
       LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval");
@@ -1244,7 +1225,7 @@ class MetaStoreDirectSql {
           // Read aggregated stats for one column
           colStatsAggrFromDB =
               columnStatisticsObjForPartitions(dbName, tableName, partNames, colNamesForDB,
-                  partsFound, useDensityFunctionForNDVEstimation, ndvTuner);
+                  partsFound, useDensityFunctionForNDVEstimation);
           if (!colStatsAggrFromDB.isEmpty()) {
             ColumnStatisticsObj colStatsAggr = colStatsAggrFromDB.get(0);
             colStatsList.add(colStatsAggr);
@@ -1257,7 +1238,7 @@ class MetaStoreDirectSql {
       partsFound = partsFoundForPartitions(dbName, tableName, partNames, colNames);
       colStatsList =
           columnStatisticsObjForPartitions(dbName, tableName, partNames, colNames, partsFound,
-              useDensityFunctionForNDVEstimation, ndvTuner);
+              useDensityFunctionForNDVEstimation);
     }
     LOG.info("useDensityFunctionForNDVEstimation = " + useDensityFunctionForNDVEstimation
         + "\npartsFound = " + partsFound + "\nColumnStatisticsObj = "
@@ -1320,81 +1301,24 @@ class MetaStoreDirectSql {
 
   private List<ColumnStatisticsObj> columnStatisticsObjForPartitions(final String dbName,
     final String tableName, final List<String> partNames, List<String> colNames, long partsFound,
-    final boolean useDensityFunctionForNDVEstimation, final double  ndvTuner) throws MetaException {
+    final boolean useDensityFunctionForNDVEstimation) throws MetaException {
     final boolean areAllPartsFound = (partsFound == partNames.size());
     return runBatched(colNames, new Batchable<String, ColumnStatisticsObj>() {
       public List<ColumnStatisticsObj> run(final List<String> inputColNames) throws MetaException {
         return runBatched(partNames, new Batchable<String, ColumnStatisticsObj>() {
           public List<ColumnStatisticsObj> run(List<String> inputPartNames) throws MetaException {
             return columnStatisticsObjForPartitionsBatch(dbName, tableName, inputPartNames,
-                inputColNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner);
+                inputColNames, areAllPartsFound, useDensityFunctionForNDVEstimation);
           }
         });
       }
     });
   }
 
-  // Get aggregated column stats for a table per partition for all columns in the partition
-  // This is primarily used to populate stats object when using CachedStore (Check CachedStore#prewarm)
-  public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
-      String tblName, boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException {
-    String queryText = "select \"PARTITION_NAME\", \"COLUMN_NAME\", \"COLUMN_TYPE\", "
-        + "min(\"LONG_LOW_VALUE\"), max(\"LONG_HIGH_VALUE\"), min(\"DOUBLE_LOW_VALUE\"), max(\"DOUBLE_HIGH_VALUE\"), "
-        + "min(cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal)), max(cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)), "
-        + "sum(\"NUM_NULLS\"), max(\"NUM_DISTINCTS\"), "
-        + "max(\"AVG_COL_LEN\"), max(\"MAX_COL_LEN\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), "
-        // The following data is used to compute a partitioned table's NDV based
-        // on partitions' NDV when useDensityFunctionForNDVEstimation = true. Global NDVs cannot be
-        // accurately derived from partition NDVs, because the domain of column value two partitions
-        // can overlap. If there is no overlap then global NDV is just the sum
-        // of partition NDVs (UpperBound). But if there is some overlay then
-        // global NDV can be anywhere between sum of partition NDVs (no overlap)
-        // and same as one of the partition NDV (domain of column value in all other
-        // partitions is subset of the domain value in one of the partition)
-        // (LowerBound).But under uniform distribution, we can roughly estimate the global
-        // NDV by leveraging the min/max values.
-        // And, we also guarantee that the estimation makes sense by comparing it to the
-        // UpperBound (calculated by "sum(\"NUM_DISTINCTS\")")
-        // and LowerBound (calculated by "max(\"NUM_DISTINCTS\")")
-        + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as decimal)),"
-        + "avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
-        + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
-        + "sum(\"NUM_DISTINCTS\") from \"PART_COL_STATS\""
-        + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? group by \"PARTITION_NAME\", \"COLUMN_NAME\", \"COLUMN_TYPE\"";
-    long start = 0;
-    long end = 0;
-    Query query = null;
-    boolean doTrace = LOG.isDebugEnabled();
-    Object qResult = null;
-    ForwardQueryResult fqr = null;
-    start = doTrace ? System.nanoTime() : 0;
-    query = pm.newQuery("javax.jdo.query.SQL", queryText);
-    qResult = executeWithArray(query,
-        prepareParams(dbName, tblName, new ArrayList<String>(), new ArrayList<String>()), queryText);
-    if (qResult == null) {
-      query.closeAll();
-      return Maps.newHashMap();
-    }
-    end = doTrace ? System.nanoTime() : 0;
-    timingTrace(doTrace, queryText, start, end);
-    List<Object[]> list = ensureList(qResult);
-    Map<String, ColumnStatisticsObj> partColStatsMap = new HashMap<String, ColumnStatisticsObj>();
-    for (Object[] row : list) {
-      String partName = (String) row[0];
-      String colName = (String) row[1];
-      partColStatsMap.put(
-          CacheUtils.buildKey(dbName, tblName, CachedStore.partNameToVals(partName), colName),
-          prepareCSObjWithAdjustedNDV(row, 1, useDensityFunctionForNDVEstimation, ndvTuner));
-      Deadline.checkTimeout();
-    }
-    query.closeAll();
-    return partColStatsMap;
-  }
-
   /** Should be called with the list short enough to not trip up Oracle/etc. */
   private List<ColumnStatisticsObj> columnStatisticsObjForPartitionsBatch(String dbName,
       String tableName, List<String> partNames, List<String> colNames, boolean areAllPartsFound,
-      boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException {
+      boolean useDensityFunctionForNDVEstimation) throws MetaException {
     // TODO: all the extrapolation logic should be moved out of this class,
     // only mechanical data retrieval should remain here.
     String commonPrefix = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", "
@@ -1446,7 +1370,7 @@ class MetaStoreDirectSql {
       List<Object[]> list = ensureList(qResult);
       List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(list.size());
       for (Object[] row : list) {
-        colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation, ndvTuner));
+        colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation));
         Deadline.checkTimeout();
       }
       query.closeAll();
@@ -1505,7 +1429,7 @@ class MetaStoreDirectSql {
         }
         list = ensureList(qResult);
         for (Object[] row : list) {
-          colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation, ndvTuner));
+          colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation));
           Deadline.checkTimeout();
         }
         end = doTrace ? System.nanoTime() : 0;
@@ -1652,7 +1576,7 @@ class MetaStoreDirectSql {
               query.closeAll();
             }
           }
-          colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation, ndvTuner));
+          colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation));
           Deadline.checkTimeout();
         }
       }
@@ -1672,13 +1596,13 @@ class MetaStoreDirectSql {
   }
 
   private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i,
-      boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException {
+      boolean useDensityFunctionForNDVEstimation) throws MetaException {
     ColumnStatisticsData data = new ColumnStatisticsData();
     ColumnStatisticsObj cso = new ColumnStatisticsObj((String) row[i++], (String) row[i++], data);
     Object llow = row[i++], lhigh = row[i++], dlow = row[i++], dhigh = row[i++], declow = row[i++], dechigh = row[i++], nulls = row[i++], dist = row[i++], avglen = row[i++], maxlen = row[i++], trues = row[i++], falses = row[i++], avgLong = row[i++], avgDouble = row[i++], avgDecimal = row[i++], sumDist = row[i++];
     StatObjectConverter.fillColumnStatisticsData(cso.getColType(), data, llow, lhigh, dlow, dhigh,
         declow, dechigh, nulls, dist, avglen, maxlen, trues, falses, avgLong, avgDouble,
-        avgDecimal, sumDist, useDensityFunctionForNDVEstimation, ndvTuner);
+        avgDecimal, sumDist, useDensityFunctionForNDVEstimation);
     return cso;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
index 868e5a5..b0defb5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
@@ -75,17 +75,19 @@ public abstract class MetaStoreEventListener implements Configurable {
   }
 
   /**
-   * @param tableEvent alter table event
+   * @param add partition event
    * @throws MetaException
    */
-  public void onAlterTable (AlterTableEvent tableEvent) throws MetaException {
-  }
 
   /**
-   * @param partitionEvent add partition event
+   * @param tableEvent alter table event
    * @throws MetaException
    */
-  public void onAddPartition (AddPartitionEvent partitionEvent) throws MetaException {
+  public void onAlterTable (AlterTableEvent tableEvent) throws MetaException {
+  }
+
+  public void onAddPartition (AddPartitionEvent partitionEvent)
+      throws MetaException {
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
deleted file mode 100644
index 20011cc..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience.Private;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
-import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
-import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
-import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
-import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
-import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.DropTableEvent;
-import org.apache.hadoop.hive.metastore.events.InsertEvent;
-import org.apache.hadoop.hive.metastore.events.ListenerEvent;
-
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
-
-/**
- * This class is used to notify a list of listeners about specific MetaStore events.
- */
-@Private
-public class MetaStoreListenerNotifier {
-  private interface EventNotifier {
-    void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException;
-  }
-
-  private static Map<EventType, EventNotifier> notificationEvents = Maps.newHashMap(
-      ImmutableMap.<EventType, EventNotifier>builder()
-          .put(EventType.CREATE_DATABASE, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onCreateDatabase((CreateDatabaseEvent)event);
-            }
-          })
-          .put(EventType.DROP_DATABASE, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onDropDatabase((DropDatabaseEvent)event);
-            }
-          })
-          .put(EventType.CREATE_TABLE, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onCreateTable((CreateTableEvent)event);
-            }
-          })
-          .put(EventType.DROP_TABLE, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onDropTable((DropTableEvent)event);
-            }
-          })
-          .put(EventType.ADD_PARTITION, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onAddPartition((AddPartitionEvent)event);
-            }
-          })
-          .put(EventType.DROP_PARTITION, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onDropPartition((DropPartitionEvent)event);
-            }
-          })
-          .put(EventType.ALTER_TABLE, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onAlterTable((AlterTableEvent)event);
-            }
-          })
-          .put(EventType.ALTER_PARTITION, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onAlterPartition((AlterPartitionEvent)event);
-            }
-          })
-          .put(EventType.INSERT, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onInsert((InsertEvent)event);
-            }
-          })
-          .put(EventType.CREATE_FUNCTION, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onCreateFunction((CreateFunctionEvent)event);
-            }
-          })
-          .put(EventType.DROP_FUNCTION, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onDropFunction((DropFunctionEvent)event);
-            }
-          })
-          .put(EventType.CREATE_INDEX, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onAddIndex((AddIndexEvent)event);
-            }
-          })
-          .put(EventType.DROP_INDEX, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onDropIndex((DropIndexEvent)event);
-            }
-          })
-          .put(EventType.ALTER_INDEX, new EventNotifier() {
-            @Override
-            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
-              listener.onAlterIndex((AlterIndexEvent)event);
-            }
-          })
-          .build()
-  );
-
-  /**
-   * Notify a list of listeners about a specific metastore event. Each listener notified might update
-   * the (ListenerEvent) event by setting a parameter key/value pair. These updated parameters will
-   * be returned to the caller.
-   *
-   * @param listeners List of MetaStoreEventListener listeners.
-   * @param eventType Type of the notification event.
-   * @param event The ListenerEvent with information about the event.
-   * @return A list of key/value pair parameters that the listeners set. The returned object will return an empty
-   *         map if no parameters were updated or if no listeners were notified.
-   * @throws MetaException If an error occurred while calling the listeners.
-   */
-  public static Map<String, String> notifyEvent(List<MetaStoreEventListener> listeners,
-                                                EventType eventType,
-                                                ListenerEvent event) throws MetaException {
-
-    Preconditions.checkNotNull(listeners, "Listeners must not be null.");
-    Preconditions.checkNotNull(event, "The event must not be null.");
-
-    for (MetaStoreEventListener listener : listeners) {
-      notificationEvents.get(eventType).notify(listener, event);
-    }
-
-    // Each listener called above might set a different parameter on the event.
-    // This write permission is allowed on the listener side to avoid breaking compatibility if we change the API
-    // method calls.
-    return event.getParameters();
-  }
-
-  /**
-   * Notify a list of listeners about a specific metastore event. Each listener notified might update
-   * the (ListenerEvent) event by setting a parameter key/value pair. These updated parameters will
-   * be returned to the caller.
-   *
-   * @param listeners List of MetaStoreEventListener listeners.
-   * @param eventType Type of the notification event.
-   * @param event The ListenerEvent with information about the event.
-   * @param environmentContext An EnvironmentContext object with parameters sent by the HMS client.
-   * @return A list of key/value pair parameters that the listeners set. The returned object will return an empty
-   *         map if no parameters were updated or if no listeners were notified.
-   * @throws MetaException If an error occurred while calling the listeners.
-   */
-  public static Map<String, String> notifyEvent(List<MetaStoreEventListener> listeners,
-                                                EventType eventType,
-                                                ListenerEvent event,
-                                                EnvironmentContext environmentContext) throws MetaException {
-
-    Preconditions.checkNotNull(event, "The event must not be null.");
-
-    event.setEnvironmentContext(environmentContext);
-    return notifyEvent(listeners, eventType, event);
-  }
-
-  /**
-   * Notify a list of listeners about a specific metastore event. Each listener notified might update
-   * the (ListenerEvent) event by setting a parameter key/value pair. These updated parameters will
-   * be returned to the caller.
-   *
-   * @param listeners List of MetaStoreEventListener listeners.
-   * @param eventType Type of the notification event.
-   * @param event The ListenerEvent with information about the event.
-   * @param environmentContext An EnvironmentContext object with parameters sent by the HMS client.
-   * @param parameters A list of key/value pairs with the new parameters to add.
-   * @return A list of key/value pair parameters that the listeners set. The returned object will return an empty
-   *         map if no parameters were updated or if no listeners were notified.
-   * @throws MetaException If an error occurred while calling the listeners.
-   */
-  public static Map<String, String> notifyEvent(List<MetaStoreEventListener> listeners,
-                                                EventType eventType,
-                                                ListenerEvent event,
-                                                EnvironmentContext environmentContext,
-                                                Map<String, String> parameters) throws MetaException {
-
-    Preconditions.checkNotNull(event, "The event must not be null.");
-
-    event.putParameters(parameters);
-    return notifyEvent(listeners, eventType, event, environmentContext);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
index 320902b..9c30ee7 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore;
 
 import java.io.BufferedReader;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.FileReader;
 import java.io.IOException;
@@ -26,19 +27,21 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hive.common.util.HiveVersionInfo;
 
 import com.google.common.collect.ImmutableMap;
 
 
 public class MetaStoreSchemaInfo {
-  private static final String SQL_FILE_EXTENSION = ".sql";
-  private static final String UPGRADE_FILE_PREFIX = "upgrade-";
-  private static final String INIT_FILE_PREFIX = "hive-schema-";
-  private static final String VERSION_UPGRADE_LIST = "upgrade.order";
-  private static final String PRE_UPGRADE_PREFIX = "pre-";
+  private static String SQL_FILE_EXTENSION=".sql";
+  private static String UPGRADE_FILE_PREFIX="upgrade-";
+  private static String INIT_FILE_PREFIX="hive-schema-";
+  private static String VERSION_UPGRADE_LIST = "upgrade.order";
+  private static String PRE_UPGRADE_PREFIX = "pre-";
   private final String dbType;
   private final String hiveSchemaVersions[];
+  private final HiveConf hiveConf;
   private final String hiveHome;
 
   // Some version upgrades often don't change schema. So they are equivalent to
@@ -52,9 +55,10 @@ public class MetaStoreSchemaInfo {
           "1.2.1", "1.2.0"
       );
 
-  public MetaStoreSchemaInfo(String hiveHome, String dbType) throws HiveMetaException {
+  public MetaStoreSchemaInfo(String hiveHome, HiveConf hiveConf, String dbType) throws HiveMetaException {
     this.hiveHome = hiveHome;
     this.dbType = dbType;
+    this.hiveConf = hiveConf;
     // load upgrade order for the given dbType
     List<String> upgradeOrderList = new ArrayList<String>();
     String upgradeListFile = getMetaStoreScriptDir() + File.separator +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 3ee7977..6259cda 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -47,7 +47,6 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -87,7 +86,6 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
-import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hive.common.util.HiveStringUtils;
 import org.apache.hive.common.util.ReflectionUtil;
 
@@ -640,6 +638,14 @@ public class MetaStoreUtils {
     }
   }
 
+  static boolean isCascadeNeededInAlterTable(Table oldTable, Table newTable) {
+    //currently cascade only supports add/replace columns and
+    //changing column type/position/name/comments
+    List<FieldSchema> oldCols = oldTable.getSd().getCols();
+    List<FieldSchema> newCols = newTable.getSd().getCols();
+    return !areSameColumns(oldCols, newCols);
+  }
+
   static boolean areSameColumns(List<FieldSchema> oldCols, List<FieldSchema> newCols) {
     if (oldCols.size() != newCols.size()) {
       return false;
@@ -690,6 +696,8 @@ public class MetaStoreUtils {
       TypeInfoUtils.getTypeInfoFromTypeString(newType));
   }
 
+  public static final int MAX_MS_TYPENAME_LENGTH = 2000; // 4000/2, for an unlikely unicode case
+
   public static final String TYPE_FROM_DESERIALIZER = "<derived from deserializer>";
   /**
    * validate column type
@@ -700,6 +708,9 @@ public class MetaStoreUtils {
    */
   static public String validateColumnType(String type) {
     if (type.equals(TYPE_FROM_DESERIALIZER)) return null;
+    if (type.length() > MAX_MS_TYPENAME_LENGTH) {
+      return "type name is too long: " + type;
+    }
     int last = 0;
     boolean lastAlphaDigit = isValidTypeChar(type.charAt(last));
     for (int i = 1; i <= type.length(); i++) {
@@ -1758,19 +1769,8 @@ public class MetaStoreUtils {
    * @param conf
    * @return The SASL configuration
    */
-  public static Map<String, String> getMetaStoreSaslProperties(HiveConf conf, boolean useSSL) {
+  public static Map<String, String> getMetaStoreSaslProperties(HiveConf conf) {
     // As of now Hive Meta Store uses the same configuration as Hadoop SASL configuration
-
-    // If SSL is enabled, override the given value of "hadoop.rpc.protection" and set it to "authentication"
-    // This disables any encryption provided by SASL, since SSL already provides it
-    String hadoopRpcProtectionVal = conf.get(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION);
-    String hadoopRpcProtectionAuth = SaslRpcServer.QualityOfProtection.AUTHENTICATION.toString();
-
-    if (useSSL && hadoopRpcProtectionVal != null && !hadoopRpcProtectionVal.equals(hadoopRpcProtectionAuth)) {
-      LOG.warn("Overriding value of " + CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION + " setting it from "
-              + hadoopRpcProtectionVal + " to " + hadoopRpcProtectionAuth + " because SSL is enabled");
-      conf.set(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION, hadoopRpcProtectionAuth);
-    }
     return ShimLoader.getHadoopThriftAuthBridge().getHadoopSaslProperties(conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index c351ffd..51bc6d0 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -160,13 +160,9 @@ import org.apache.hive.common.util.HiveStringUtils;
 import org.apache.thrift.TException;
 import org.datanucleus.AbstractNucleusContext;
 import org.datanucleus.ClassLoaderResolver;
-import org.datanucleus.ClassLoaderResolverImpl;
 import org.datanucleus.NucleusContext;
-import org.datanucleus.api.jdo.JDOPersistenceManager;
 import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
 import org.datanucleus.store.rdbms.exceptions.MissingTableException;
-import org.datanucleus.store.scostore.Store;
-import org.datanucleus.util.WeakValueMap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -199,7 +195,6 @@ public class ObjectStore implements RawStore, Configurable {
   private static final Map<String, Class> PINCLASSMAP;
   private static final String HOSTNAME;
   private static final String USER;
-  private static final String JDO_PARAM = ":param";
   static {
     Map<String, Class> map = new HashMap<String, Class>();
     map.put("table", MTable.class);
@@ -239,22 +234,26 @@ public class ObjectStore implements RawStore, Configurable {
   private Pattern partitionValidationPattern;
 
   /**
-   * A Autocloseable wrapper around Query class to pass the Query object to the caller and let the caller release
-   * the resources when the QueryWrapper goes out of scope
+   * A class to pass the Query object to the caller to let the caller release
+   * resources by calling QueryWrapper.query.closeAll() after consuming all the query results.
    */
-  public static class QueryWrapper implements AutoCloseable {
+  public static class QueryWrapper {
     public Query query;
 
     /**
      * Explicitly closes the query object to release the resources
      */
-    @Override
     public void close() {
       if (query != null) {
         query.closeAll();
         query = null;
       }
     }
+
+    @Override
+    protected void finalize() {
+      this.close();
+    }
   }
 
   public ObjectStore() {
@@ -285,9 +284,6 @@ public class ObjectStore implements RawStore, Configurable {
       boolean propsChanged = !propsFromConf.equals(prop);
 
       if (propsChanged) {
-        if (pmf != null){
-          clearOutPmfClassLoaderCache(pmf);
-        }
         pmf = null;
         prop = null;
       }
@@ -752,7 +748,12 @@ public class ObjectStore implements RawStore, Configurable {
       pm.retrieve(mdb);
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     if (mdb == null) {
       throw new NoSuchObjectException("There is no database named " + name);
@@ -871,7 +872,10 @@ public class ObjectStore implements RawStore, Configurable {
       }
       success = commitTransaction();
     } finally {
-      rollbackAndCleanup(success, queryWrapper);
+      if (!success) {
+        rollbackTransaction();
+      }
+      queryWrapper.close();
     }
     return success;
   }
@@ -889,20 +893,33 @@ public class ObjectStore implements RawStore, Configurable {
       // Take the pattern and split it on the | to get all the composing
       // patterns
       String[] subpatterns = pattern.trim().split("\\|");
-      StringBuilder filterBuilder = new StringBuilder();
-      List<String> parameterVals = new ArrayList<>(subpatterns.length);
-      appendPatternCondition(filterBuilder, "name", subpatterns, parameterVals);
-      query = pm.newQuery(MDatabase.class, filterBuilder.toString());
+      String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MDatabase where (";
+      boolean first = true;
+      for (String subpattern : subpatterns) {
+        subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*");
+        if (!first) {
+          queryStr = queryStr + " || ";
+        }
+        queryStr = queryStr + " name.matches(\"" + subpattern + "\")";
+        first = false;
+      }
+      queryStr = queryStr + ")";
+      query = pm.newQuery(queryStr);
       query.setResult("name");
       query.setOrdering("name ascending");
-      Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()]));
+      Collection names = (Collection) query.execute();
       databases = new ArrayList<String>();
       for (Iterator i = names.iterator(); i.hasNext();) {
         databases.add((String) i.next());
       }
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return databases;
   }
@@ -922,7 +939,12 @@ public class ObjectStore implements RawStore, Configurable {
       databases = new ArrayList<String>((Collection<String>) query.execute());
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     Collections.sort(databases);
     return databases;
@@ -990,7 +1012,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return type;
   }
@@ -1014,7 +1041,12 @@ public class ObjectStore implements RawStore, Configurable {
       success = commitTransaction();
       LOG.debug("type not found " + typeName, e);
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return success;
   }
@@ -1174,9 +1206,6 @@ public class ObjectStore implements RawStore, Configurable {
 
   private List<MConstraint> listAllTableConstraintsWithOptionalConstraintName
     (String dbName, String tableName, String constraintname) {
-    dbName = HiveStringUtils.normalizeIdentifier(dbName);
-    tableName = HiveStringUtils.normalizeIdentifier(tableName);
-    constraintname = constraintname!=null?HiveStringUtils.normalizeIdentifier(constraintname):null;
     List<MConstraint> mConstraints = null;
     List<String> constraintNames = new ArrayList<String>();
     Query query = null;
@@ -1267,28 +1296,40 @@ public class ObjectStore implements RawStore, Configurable {
       dbName = HiveStringUtils.normalizeIdentifier(dbName);
       // Take the pattern and split it on the | to get all the composing
       // patterns
-      List<String> parameterVals = new ArrayList<>();
-      StringBuilder filterBuilder = new StringBuilder();
-      //adds database.name == dbName to the filter
-      appendSimpleCondition(filterBuilder, "database.name", new String[] {dbName}, parameterVals);
-      if(pattern != null) {
-        appendPatternCondition(filterBuilder, "tableName", pattern, parameterVals);
+      String[] subpatterns = pattern.trim().split("\\|");
+      String queryStr =
+          "select tableName from org.apache.hadoop.hive.metastore.model.MTable "
+              + "where database.name == dbName && (";
+      boolean first = true;
+      for (String subpattern : subpatterns) {
+        subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*");
+        if (!first) {
+          queryStr = queryStr + " || ";
+        }
+        queryStr = queryStr + " tableName.matches(\"" + subpattern + "\")";
+        first = false;
       }
-      if(tableType != null) {
-        appendPatternCondition(filterBuilder, "tableType", new String[] {tableType.toString()}, parameterVals);
+      queryStr = queryStr + ")";
+      if (tableType != null) {
+        queryStr = queryStr + " && tableType.matches(\"" + tableType.toString() + "\")";
       }
-
-      query = pm.newQuery(MTable.class, filterBuilder.toString());
+      query = pm.newQuery(queryStr);
+      query.declareParameters("java.lang.String dbName");
       query.setResult("tableName");
       query.setOrdering("tableName ascending");
-      Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()]));
+      Collection names = (Collection) query.execute(dbName);
       tbls = new ArrayList<String>();
       for (Iterator i = names.iterator(); i.hasNext();) {
         tbls.add((String) i.next());
       }
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return tbls;
   }
@@ -1320,7 +1361,12 @@ public class ObjectStore implements RawStore, Configurable {
       result = (Long) query.execute();
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return result.intValue();
   }
@@ -1336,20 +1382,19 @@ public class ObjectStore implements RawStore, Configurable {
       openTransaction();
       // Take the pattern and split it on the | to get all the composing
       // patterns
-      StringBuilder filterBuilder = new StringBuilder();
-      List<String> parameterVals = new ArrayList<>();
+      StringBuilder builder = new StringBuilder();
       if (dbNames != null && !dbNames.equals("*")) {
-        appendPatternCondition(filterBuilder, "database.name", dbNames, parameterVals);
+        appendPatternCondition(builder, "database.name", dbNames);
       }
       if (tableNames != null && !tableNames.equals("*")) {
-        appendPatternCondition(filterBuilder, "tableName", tableNames, parameterVals);
+        appendPatternCondition(builder, "tableName", tableNames);
       }
       if (tableTypes != null && !tableTypes.isEmpty()) {
-        appendSimpleCondition(filterBuilder, "tableType", tableTypes.toArray(new String[0]), parameterVals);
+        appendSimpleCondition(builder, "tableType", tableTypes.toArray(new String[0]));
       }
 
-      query = pm.newQuery(MTable.class, filterBuilder.toString());
-      Collection<MTable> tables = (Collection<MTable>) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()]));
+      query = pm.newQuery(MTable.class, builder.toString());
+      Collection<MTable> tables = (Collection<MTable>) query.execute();
       for (MTable table : tables) {
         TableMeta metaData = new TableMeta(
             table.getDatabase().getName(), table.getTableName(), table.getTableType());
@@ -1358,29 +1403,29 @@ public class ObjectStore implements RawStore, Configurable {
       }
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return metas;
   }
 
-  private StringBuilder appendPatternCondition(StringBuilder filterBuilder, String fieldName,
-      String[] elements, List<String> parameterVals) {
-    return appendCondition(filterBuilder, fieldName, elements, true, parameterVals);
-  }
-
   private StringBuilder appendPatternCondition(StringBuilder builder,
-      String fieldName, String elements, List<String> parameters) {
+      String fieldName, String elements) {
       elements = HiveStringUtils.normalizeIdentifier(elements);
-    return appendCondition(builder, fieldName, elements.split("\\|"), true, parameters);
+    return appendCondition(builder, fieldName, elements.split("\\|"), true);
   }
 
   private StringBuilder appendSimpleCondition(StringBuilder builder,
-      String fieldName, String[] elements, List<String> parameters) {
-    return appendCondition(builder, fieldName, elements, false, parameters);
+      String fieldName, String[] elements) {
+    return appendCondition(builder, fieldName, elements, false);
   }
 
   private StringBuilder appendCondition(StringBuilder builder,
-      String fieldName, String[] elements, boolean pattern, List<String> parameters) {
+      String fieldName, String[] elements, boolean pattern) {
     if (builder.length() > 0) {
       builder.append(" && ");
     }
@@ -1390,15 +1435,14 @@ public class ObjectStore implements RawStore, Configurable {
       if (pattern) {
         element = "(?i)" + element.replaceAll("\\*", ".*");
       }
-      parameters.add(element);
       if (builder.length() > length) {
         builder.append(" || ");
       }
       builder.append(fieldName);
       if (pattern) {
-        builder.append(".matches(").append(JDO_PARAM).append(parameters.size()).append(")");
+        builder.append(".matches(\"").append(element).append("\")");
       } else {
-        builder.append(" == ").append(JDO_PARAM).append(parameters.size());
+        builder.append(" == \"").append(element).append("\"");
       }
     }
     builder.append(" )");
@@ -1444,7 +1488,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     nmtbl.mtbl = mtbl;
     return nmtbl;
@@ -1487,10 +1536,15 @@ public class ObjectStore implements RawStore, Configurable {
       }
       committed = commitTransaction();
     } finally {
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
       if (dbExistsQuery != null) {
         dbExistsQuery.closeAll();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return tables;
   }
@@ -2011,7 +2065,12 @@ public class ObjectStore implements RawStore, Configurable {
         }
       }
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return ret;
   }
@@ -2243,7 +2302,10 @@ public class ObjectStore implements RawStore, Configurable {
       success =  commitTransaction();
       return parts;
     } finally {
-      rollbackAndCleanup(success, queryWrapper);
+      if (!success) {
+        rollbackTransaction();
+      }
+      queryWrapper.close();
     }
   }
 
@@ -2345,7 +2407,6 @@ public class ObjectStore implements RawStore, Configurable {
     for (Iterator i = names.iterator(); i.hasNext();) {
       pns.add((String) i.next());
     }
-
     if (query != null) {
       query.closeAll();
     }
@@ -2440,7 +2501,10 @@ public class ObjectStore implements RawStore, Configurable {
       }
       success = commitTransaction();
     } finally {
-      rollbackAndCleanup(success, queryWrapper);
+      if (!success) {
+        rollbackTransaction();
+      }
+      queryWrapper.close();
     }
     return partitions;
   }
@@ -2462,7 +2526,10 @@ public class ObjectStore implements RawStore, Configurable {
       }
       success = commitTransaction();
     } finally {
-      rollbackAndCleanup(success, queryWrapper);
+      if (!success) {
+        rollbackTransaction();
+      }
+      queryWrapper.close();
     }
     return partitionNames;
   }
@@ -3227,7 +3294,12 @@ public class ObjectStore implements RawStore, Configurable {
       success = commitTransaction();
       LOG.debug("Done retrieving all objects for listTableNamesByFilter");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return tableNames;
   }
@@ -3273,7 +3345,12 @@ public class ObjectStore implements RawStore, Configurable {
       success = commitTransaction();
       LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return partNames;
   }
@@ -3494,7 +3571,10 @@ public class ObjectStore implements RawStore, Configurable {
       success = commitTransaction();
       LOG.debug("successfully deleted a CD in removeUnusedColumnDescriptor");
     } finally {
-      rollbackAndCleanup(success, queryWrapper);
+      if (!success) {
+        rollbackTransaction();
+      }
+      queryWrapper.close();
     }
   }
 
@@ -3578,7 +3658,12 @@ public class ObjectStore implements RawStore, Configurable {
       constraintNameIfExists = (String) constraintExistsQuery.execute(name);
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, constraintExistsQuery);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (constraintExistsQuery != null) {
+          constraintExistsQuery.closeAll();
+      }
     }
     return constraintNameIfExists != null && !constraintNameIfExists.isEmpty();
   }
@@ -3826,7 +3911,12 @@ public class ObjectStore implements RawStore, Configurable {
       pm.retrieve(midx);
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return midx;
   }
@@ -3889,7 +3979,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       return indexes;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -3916,7 +4011,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       success = commitTransaction();
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return pns;
   }
@@ -4039,7 +4139,12 @@ public class ObjectStore implements RawStore, Configurable {
       pm.retrieve(mRoleMember);
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mRoleMember;
   }
@@ -4108,7 +4213,11 @@ public class ObjectStore implements RawStore, Configurable {
       }
       success = commitTransaction();
     } finally {
-      rollbackAndCleanup(success, queryWrapper);
+      if (!success) {
+        rollbackTransaction();
+      }
+
+      queryWrapper.close();
     }
     return success;
   }
@@ -4178,7 +4287,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       LOG.debug("Done retrieving all objects for listRoles");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
 
     if (principalType == PrincipalType.USER) {
@@ -4244,6 +4358,7 @@ public class ObjectStore implements RawStore, Configurable {
       mRoleMemebership = (List<MRoleMap>) query.execute(roleName, principalType.toString());
       pm.retrieveAll(mRoleMemebership);
       success = commitTransaction();
+
       LOG.debug("Done retrieving all objects for listMSecurityPrincipalMembershipRole");
     } finally {
       if (!success) {
@@ -4277,7 +4392,12 @@ public class ObjectStore implements RawStore, Configurable {
       pm.retrieve(mrole);
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mrole;
   }
@@ -4299,7 +4419,12 @@ public class ObjectStore implements RawStore, Configurable {
       success = commitTransaction();
       return roleNames;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -5125,7 +5250,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       LOG.debug("Done retrieving all objects for listRoleMembers");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mRoleMemeberList;
   }
@@ -5176,7 +5306,12 @@ public class ObjectStore implements RawStore, Configurable {
         userNameDbPriv.addAll(mPrivs);
       }
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return userNameDbPriv;
   }
@@ -5216,7 +5351,12 @@ public class ObjectStore implements RawStore, Configurable {
       commited = commitTransaction();
       return convertGlobal(userNameDbPriv);
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -5259,7 +5399,12 @@ public class ObjectStore implements RawStore, Configurable {
       mSecurityDBList.addAll(mPrivs);
       LOG.debug("Done retrieving all objects for listPrincipalDBGrants");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityDBList;
   }
@@ -5382,7 +5527,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       LOG.debug("Done retrieving all objects for listAllTableGrants");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityTabList;
   }
@@ -5409,7 +5559,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       LOG.debug("Done retrieving all objects for listTableAllPartitionGrants");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityTabPartList;
   }
@@ -5437,7 +5592,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       LOG.debug("Done retrieving all objects for listTableAllColumnGrants");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mTblColPrivilegeList;
   }
@@ -5466,7 +5626,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       LOG.debug("Done retrieving all objects for listTableAllPartitionColumnGrants");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityColList;
   }
@@ -5509,6 +5674,7 @@ public class ObjectStore implements RawStore, Configurable {
   private List<MDBPrivilege> listDatabaseGrants(String dbName, QueryWrapper queryWrapper) {
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
     boolean success = false;
+
     try {
       LOG.debug("Executing listDatabaseGrants");
 
@@ -5616,7 +5782,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       LOG.debug("Done retrieving all objects for listAllTableGrants");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityTabPartList;
   }
@@ -5676,7 +5847,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       LOG.debug("Done retrieving all objects for listPrincipalPartitionGrants");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityTabPartList;
   }
@@ -5740,7 +5916,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrants");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityColList;
   }
@@ -5802,7 +5983,12 @@ public class ObjectStore implements RawStore, Configurable {
 
       LOG.debug("Done retrieving all objects for listPrincipalPartitionColumnGrants");
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityColList;
   }
@@ -5864,7 +6050,12 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done retrieving all objects for listPrincipalPartitionColumnGrantsAll");
       return result;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -5892,7 +6083,12 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done retrieving all objects for listPartitionColumnGrantsAll");
       return result;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -5967,7 +6163,12 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants");
       return result;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -5990,7 +6191,12 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants");
       return result;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6030,7 +6236,7 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done retrieving all objects for listPrincipalAllPartitionGrants");
     } finally {
       if (!success) {
-       rollbackTransaction();
+        rollbackTransaction();
       }
     }
     return mSecurityTabPartList;
@@ -6062,7 +6268,12 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done retrieving all objects for listPrincipalPartitionGrantsAll");
       return result;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6088,7 +6299,12 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done retrieving all objects for listPrincipalPartitionGrantsAll");
       return result;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6166,7 +6382,12 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrantsAll");
       return result;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6191,7 +6412,12 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrantsAll");
       return result;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6268,7 +6494,12 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done executing isPartitionMarkedForEvent");
       return (partEvents != null && !partEvents.isEmpty()) ? true : false;
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6322,6 +6553,7 @@ public class ObjectStore implements RawStore, Configurable {
   public Collection<?> executeJDOQLSelect(String queryStr, QueryWrapper queryWrapper) {
     boolean committed = false;
     Collection<?> result = null;
+
     try {
       openTransaction();
       Query query = queryWrapper.query = pm.newQuery(queryStr);
@@ -6362,7 +6594,12 @@ public class ObjectStore implements RawStore, Configurable {
         return -1;
       }
     } finally {
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6392,7 +6629,12 @@ public class ObjectStore implements RawStore, Configurable {
         return null;
       }
     } finally {
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6503,7 +6745,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       return retVal;
     } finally {
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6591,7 +6838,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       return retVal;
     } finally {
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6625,7 +6877,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       return retVal;
     } finally {
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6716,7 +6973,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       return retVal;
     } finally {
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -6793,7 +7055,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       return retVal;
     } finally {
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -7004,6 +7271,7 @@ public class ObjectStore implements RawStore, Configurable {
     }
 
     boolean committed = false;
+
     try {
       openTransaction();
 
@@ -7050,7 +7318,7 @@ public class ObjectStore implements RawStore, Configurable {
     for (String colName : colNames) {
       boolean foundCol = false;
       for (FieldSchema mCol : colList) {
-        if (mCol.getName().equals(colName)) {
+        if (mCol.getName().equals(colName.trim())) {
           foundCol = true;
           break;
         }
@@ -7162,16 +7430,13 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public AggrStats get_aggr_stats_for(String dbName, String tblName,
       final List<String> partNames, final List<String> colNames) throws MetaException, NoSuchObjectException {
-    final boolean useDensityFunctionForNDVEstimation = HiveConf.getBoolVar(getConf(),
-        HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION);
-    final double ndvTuner = HiveConf.getFloatVar(getConf(),
-        HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER);
+    final boolean  useDensityFunctionForNDVEstimation = HiveConf.getBoolVar(getConf(), HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION);
     return new GetHelper<AggrStats>(dbName, tblName, true, false) {
       @Override
       protected AggrStats getSqlResult(GetHelper<AggrStats> ctx)
           throws MetaException {
         return directSql.aggrColStatsForPartitions(dbName, tblName, partNames,
-            colNames, useDensityFunctionForNDVEstimation, ndvTuner);
+            colNames, useDensityFunctionForNDVEstimation);
       }
       @Override
       protected AggrStats getJdoResult(GetHelper<AggrStats> ctx)
@@ -7189,38 +7454,6 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
-      String tableName) throws MetaException, NoSuchObjectException {
-    final boolean useDensityFunctionForNDVEstimation = HiveConf.getBoolVar(getConf(),
-        HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION);
-    final double ndvTuner = HiveConf.getFloatVar(getConf(),
-        HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER);
-    return new GetHelper<Map<String, ColumnStatisticsObj>>(dbName, tableName, true, false) {
-      @Override
-      protected Map<String, ColumnStatisticsObj> getSqlResult(
-          GetHelper<Map<String, ColumnStatisticsObj>> ctx) throws MetaException {
-        return directSql.getAggrColStatsForTablePartitions(dbName, tblName,
-            useDensityFunctionForNDVEstimation, ndvTuner);
-      }
-
-      @Override
-      protected Map<String, ColumnStatisticsObj> getJdoResult(
-          GetHelper<Map<String, ColumnStatisticsObj>> ctx) throws MetaException,
-          NoSuchObjectException {
-        // This is fast path for query optimizations, if we can find this info
-        // quickly using directSql, do it. No point in failing back to slow path
-        // here.
-        throw new MetaException("Jdo path is not implemented for stats aggr.");
-      }
-
-      @Override
-      protected String describeResult() {
-        return null;
-      }
-    }.run(true);
-  }
-
-  @Override
   public void flushCache() {
     // NOP as there's no caching
   }
@@ -7233,12 +7466,7 @@ public class ObjectStore implements RawStore, Configurable {
     try {
       openTransaction();
       // We are not going to verify SD for each partition. Just verify for the table.
-      // ToDo: we need verify the partition column instead
-      try {
-        validateTableCols(table, colNames);
-      } catch (MetaException me) {
-        LOG.warn("The table does not have the same column definition as its partition.");
-      }
+      validateTableCols(table, colNames);
       Query query = queryWrapper.query = pm.newQuery(MPartitionColumnStatistics.class);
       String paramStr = "java.lang.String t1, java.lang.String t2";
       String filter = "tableName == t1 && dbName == t2 && (";
@@ -7365,7 +7593,12 @@ public class ObjectStore implements RawStore, Configurable {
       rollbackTransaction();
       throw e;
     } finally {
-      rollbackAndCleanup(ret, query);
+      if (!ret) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return ret;
   }
@@ -7435,7 +7668,12 @@ public class ObjectStore implements RawStore, Configurable {
       rollbackTransaction();
       throw e;
     } finally {
-      rollbackAndCleanup(ret, query);
+      if (!ret) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return ret;
   }
@@ -7457,7 +7695,12 @@ public class ObjectStore implements RawStore, Configurable {
       delCnt = query.deletePersistentAll(curTime, expiryTime);
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
       LOG.debug("Done executing cleanupEvents");
     }
     return delCnt;
@@ -7561,7 +7804,12 @@ public class ObjectStore implements RawStore, Configurable {
       return tokenIdents;
     } finally {
       LOG.debug("Done executing getAllTokenIdentifers with status : " + committed);
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -7604,7 +7852,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       committed = commitTransaction();
     } finally {
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     LOG.debug("Done executing updateMasterKey with status : " + committed);
     if (null == masterKey) {
@@ -7632,7 +7885,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       success = commitTransaction();
     } finally {
-      rollbackAndCleanup(success, query);
+      if (!success) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     LOG.debug("Done executing removeMasterKey with status : " + success);
     return (null != masterKey) && success;
@@ -7658,7 +7916,12 @@ public class ObjectStore implements RawStore, Configurable {
       return masterKeys;
     } finally {
       LOG.debug("Done executing getMasterKeys with status : " + committed);
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -7770,7 +8033,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       return mVerTables.get(0);
     } finally {
-      rollbackAndCleanup(committed, query);
+      if (!committed) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -7996,7 +8264,12 @@ public class ObjectStore implements RawStore, Configurable {
       pm.retrieve(mfunc);
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mfunc;
   }
@@ -8044,23 +8317,37 @@ public class ObjectStore implements RawStore, Configurable {
       dbName = HiveStringUtils.normalizeIdentifier(dbName);
       // Take the pattern and split it on the | to get all the composing
       // patterns
-      List<String> parameterVals = new ArrayList<>();
-      StringBuilder filterBuilder = new StringBuilder();
-      appendSimpleCondition(filterBuilder, "database.name", new String[] { dbName }, parameterVals);
-      if(pattern != null) {
-        appendPatternCondition(filterBuilder, "functionName", pattern, parameterVals);
+      String[] subpatterns = pattern.trim().split("\\|");
+      String queryStr =
+          "select functionName from org.apache.hadoop.hive.metastore.model.MFunction "
+              + "where database.name == dbName && (";
+      boolean first = true;
+      for (String subpattern : subpatterns) {
+        subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*");
+        if (!first) {
+          queryStr = queryStr + " || ";
+        }
+        queryStr = queryStr + " functionName.matches(\"" + subpattern + "\")";
+        first = false;
       }
-      query = pm.newQuery(MFunction.class, filterBuilder.toString());
+      queryStr = queryStr + ")";
+      query = pm.newQuery(queryStr);
+      query.declareParameters("java.lang.String dbName");
       query.setResult("functionName");
       query.setOrdering("functionName ascending");
-      Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()]));
+      Collection names = (Collection) query.execute(dbName);
       funcs = new ArrayList<String>();
       for (Iterator i = names.iterator(); i.hasNext();) {
         funcs.add((String) i.next());
       }
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return funcs;
   }
@@ -8069,9 +8356,6 @@ public class ObjectStore implements RawStore, Configurable {
   public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
     boolean commited = false;
     Query query = null;
-
-    NotificationEventResponse result = new NotificationEventResponse();
-    result.setEvents(new ArrayList<NotificationEvent>());
     try {
       openTransaction();
       long lastEvent = rqst.getLastEvent();
@@ -8081,9 +8365,11 @@ public class ObjectStore implements RawStore, Configurable {
       Collection<MNotificationLog> events = (Collection) query.execute(lastEvent);
       commited = commitTransaction();
       if (events == null) {
-        return result;
+        return null;
       }
       Iterator<MNotificationLog> i = events.iterator();
+      NotificationEventResponse result = new NotificationEventResponse();
+      result.setEvents(new ArrayList<NotificationEvent>());
       int maxEvents = rqst.getMaxEvents() > 0 ? rqst.getMaxEvents() : Integer.MAX_VALUE;
       int numEvents = 0;
       while (i.hasNext() && numEvents++ < maxEvents) {
@@ -8091,8 +8377,11 @@ public class ObjectStore implements RawStore, Configurable {
       }
       return result;
     } finally {
+      if (query != null) {
+        query.closeAll();
+      }
       if (!commited) {
-        rollbackAndCleanup(commited, query);
+        rollbackTransaction();
         return null;
       }
     }
@@ -8122,7 +8411,12 @@ public class ObjectStore implements RawStore, Configurable {
       pm.makePersistent(translateThriftToDb(entry));
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -8142,7 +8436,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -8161,7 +8460,12 @@ public class ObjectStore implements RawStore, Configurable {
       commited = commitTransaction();
       return new CurrentNotificationEventId(id);
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -8229,99 +8533,20 @@ public class ObjectStore implements RawStore, Configurable {
    */
   public static void unCacheDataNucleusClassLoaders() {
     PersistenceManagerFactory pmf = ObjectStore.getPMF();
-    clearOutPmfClassLoaderCache(pmf);
-  }
-
-  private static void clearOutPmfClassLoaderCache(PersistenceManagerFactory pmf) {
-    if ((pmf == null) || (!(pmf instanceof JDOPersistenceManagerFactory))) {
-      return;
-    }
-    // NOTE : This is hacky, and this section of code is fragile depending on DN code varnames
-    // so it's likely to stop working at some time in the future, especially if we upgrade DN
-    // versions, so we actively need to find a better way to make sure the leak doesn't happen
-    // instead of just clearing out the cache after every call.
-    JDOPersistenceManagerFactory jdoPmf = (JDOPersistenceManagerFactory) pmf;
-    NucleusContext nc = jdoPmf.getNucleusContext();
-    try {
-      Field pmCache = pmf.getClass().getDeclaredField("pmCache");
-      pmCache.setAccessible(true);
-      Set<JDOPersistenceManager> pmSet = (Set<JDOPersistenceManager>)pmCache.get(pmf);
-      for (JDOPersistenceManager pm : pmSet) {
-        org.datanucleus.ExecutionContext ec = (org.datanucleus.ExecutionContext)pm.getExecutionContext();
-        if (ec instanceof org.datanucleus.ExecutionContextThreadedImpl) {
-          ClassLoaderResolver clr = ((org.datanucleus.ExecutionContextThreadedImpl)ec).getClassLoaderResolver();
-          clearClr(clr);
-        }
-      }
-      org.datanucleus.plugin.PluginManager pluginManager = jdoPmf.getNucleusContext().getPluginManager();
-      Field registryField = pluginManager.getClass().getDeclaredField("registry");
-      registryField.setAccessible(true);
-      org.datanucleus.plugin.PluginRegistry registry = (org.datanucleus.plugin.PluginRegistry)registryField.get(pluginManager);
-      if (registry instanceof org.datanucleus.plugin.NonManagedPluginRegistry) {
-        org.datanucleus.plugin.NonManagedPluginRegistry nRegistry = (org.datanucleus.plugin.NonManagedPluginRegistry)registry;
-        Field clrField = nRegistry.getClass().getDeclaredField("clr");
-        clrField.setAccessible(true);
-        ClassLoaderResolver clr = (ClassLoaderResolver)clrField.get(nRegistry);
-        clearClr(clr);
-      }
-      if (nc instanceof org.datanucleus.PersistenceNucleusContextImpl) {
-        org.datanucleus.PersistenceNucleusContextImpl pnc = (org.datanucleus.PersistenceNucleusContextImpl)nc;
-        org.datanucleus.store.types.TypeManagerImpl tm = (org.datanucleus.store.types.TypeManagerImpl)pnc.getTypeManager();
-        Field clrField = tm.getClass().getDeclaredField("clr");
-        clrField.setAccessible(true);
-        ClassLoaderResolver clr = (ClassLoaderResolver)clrField.get(tm);
-        clearClr(clr);
-        Field storeMgrField = pnc.getClass().getDeclaredField("storeMgr");
-        storeMgrField.setAccessible(true);
-        org.datanucleus.store.rdbms.RDBMSStoreManager storeMgr = (org.datanucleus.store.rdbms.RDBMSStoreManager)storeMgrField.get(pnc);
-        Field backingStoreField = storeMgr.getClass().getDeclaredField("backingStoreByMemberName");
-        backingStoreField.setAccessible(true);
-        Map<String, Store> backingStoreByMemberName = (Map<String, Store>)backingStoreField.get(storeMgr);
-        for (Store store : backingStoreByMemberName.values()) {
-          org.datanucleus.store.rdbms.scostore.BaseContainerStore baseStore = (org.datanucleus.store.rdbms.scostore.BaseContainerStore)store;
-          clrField = org.datanucleus.store.rdbms.scostore.BaseContainerStore.class.getDeclaredField("clr");
-          clrField.setAccessible(true);
-          clr = (ClassLoaderResolver)clrField.get(baseStore);
-          clearClr(clr);
-        }
-      }
-      Field classLoaderResolverMap = AbstractNucleusContext.class.getDeclaredField(
-          "classLoaderResolverMap");
-      classLoaderResolverMap.setAccessible(true);
-      Map<String,ClassLoaderResolver> loaderMap =
-          (Map<String, ClassLoaderResolver>) classLoaderResolverMap.get(nc);
-      for (ClassLoaderResolver clr : loaderMap.values()){
-        clearClr(clr);
-      }
-      classLoaderResolverMap.set(nc, new HashMap<String, ClassLoaderResolver>());
-      LOG.debug("Removed cached classloaders from DataNucleus NucleusContext");
-    } catch (Exception e) {
-      LOG.warn("Failed to remove cached classloaders from DataNucleus NucleusContext ", e);
-    }
-  }
-
-  private static void clearClr(ClassLoaderResolver clr) throws Exception {
-    if (clr != null){
-      if (clr instanceof ClassLoaderResolverImpl){
-        ClassLoaderResolverImpl clri = (ClassLoaderResolverImpl) clr;
-        long resourcesCleared = clearFieldMap(clri,"resources");
-        long loadedClassesCleared = clearFieldMap(clri,"loadedClasses");
-        long unloadedClassesCleared = clearFieldMap(clri, "unloadedClasses");
-        LOG.debug("Cleared ClassLoaderResolverImpl: " +
-            resourcesCleared + "," + loadedClassesCleared + "," + unloadedClassesCleared);
+    if ((pmf != null) && (pmf instanceof JDOPersistenceManagerFactory)) {
+      JDOPersistenceManagerFactory jdoPmf = (JDOPersistenceManagerFactory) pmf;
+      NucleusContext nc = jdoPmf.getNucleusContext();
+      try {
+        Field classLoaderResolverMap = AbstractNucleusContext.class.getDeclaredField(
+            "classLoaderResolverMap");
+        classLoaderResolverMap.setAccessible(true);
+        classLoaderResolverMap.set(nc, new HashMap<String, ClassLoaderResolver>());
+        LOG.debug("Removed cached classloaders from DataNucleus NucleusContext");
+      } catch (Exception e) {
+        LOG.warn("Failed to remove cached classloaders from DataNucleus NucleusContext ", e);
       }
     }
   }
-  private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldName) throws Exception {
-    Field mapField = ClassLoaderResolverImpl.class.getDeclaredField(mapFieldName);
-    mapField.setAccessible(true);
-
-    Map<String,Class> map = (Map<String, Class>) mapField.get(clri);
-    long sz = map.size();
-    mapField.set(clri, Collections.synchronizedMap(new WeakValueMap()));
-    return sz;
-  }
-
 
   @Override
   public List<SQLPrimaryKey> getPrimaryKeys(String db_name, String tbl_name) throws MetaException {
@@ -8332,12 +8557,10 @@ public class ObjectStore implements RawStore, Configurable {
     }
   }
 
-  protected List<SQLPrimaryKey> getPrimaryKeysInternal(final String db_name_input,
-    final String tbl_name_input,
+  protected List<SQLPrimaryKey> getPrimaryKeysInternal(final String db_name,
+    final String tbl_name,
     boolean allowSql, boolean allowJdo)
   throws MetaException, NoSuchObjectException {
-    final String db_name = HiveStringUtils.normalizeIdentifier(db_name_input);
-    final String tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name_input);
     return new GetListHelper<SQLPrimaryKey>(db_name, tbl_name, allowSql, allowJdo) {
 
       @Override
@@ -8380,7 +8603,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return primaryKeys;
   }
@@ -8405,7 +8633,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       commited = commitTransaction();
      } finally {
-        rollbackAndCleanup(commited, query);
+       if (!commited) {
+        rollbackTransaction();
+       }
+       if (query != null) {
+        query.closeAll();
+       }
      }
      return ret;
    }
@@ -8421,13 +8654,9 @@ public class ObjectStore implements RawStore, Configurable {
     }
   }
 
-  protected List<SQLForeignKey> getForeignKeysInternal(final String parent_db_name_input,
-    final String parent_tbl_name_input, final String foreign_db_name_input,
-    final String foreign_tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException {
-    final String parent_db_name = parent_db_name_input;
-    final String parent_tbl_name = parent_tbl_name_input;
-    final String foreign_db_name = foreign_db_name_input;
-    final String foreign_tbl_name = foreign_tbl_name_input;
+  protected List<SQLForeignKey> getForeignKeysInternal(final String parent_db_name,
+    final String parent_tbl_name, final String foreign_db_name, final String foreign_tbl_name,
+    boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException {
     return new GetListHelper<SQLForeignKey>(foreign_db_name, foreign_tbl_name, allowSql, allowJdo) {
 
       @Override
@@ -8528,7 +8757,12 @@ public class ObjectStore implements RawStore, Configurable {
       }
       commited = commitTransaction();
     } finally {
-      rollbackAndCleanup(commited, query);
+      if (!commited) {
+        rollbackTransaction();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return foreignKeys;
   }
@@ -8556,46 +8790,6 @@ public class ObjectStore implements RawStore, Configurable {
     }
   }
 
-  /**
-   * This is a cleanup method which is used to rollback a active transaction
-   * if the success flag is false and close the associated Query object. This method is used
-   * internally and visible for testing purposes only
-   * @param success Rollback the current active transaction if false
-   * @param query Query object which needs to be closed
-   */
-  @VisibleForTesting
-  void rollbackAndCleanup(boolean success, Query query) {
-    try {
-      if (!success) {
-        rollbackTransaction();
-      }
-    } finally {
-      if (query != null) {
-        query.closeAll();
-      }
-    }
-  }
-
-  /**
-   * This is a cleanup method which is used to rollback a active transaction
-   * if the success flag is false and close the associated QueryWrapper object. This method is used
-   * internally and visible for testing purposes only
-   * @param success Rollback the current active transaction if false
-   * @param queryWrapper QueryWrapper object which needs to be closed
-   */
-  @VisibleForTesting
-  void rollbackAndCleanup(boolean success, QueryWrapper queryWrapper) {
-    try {
-      if (!success) {
-        rollbackTransaction();
-      }
-    } finally {
-      if (queryWrapper != null) {
-        queryWrapper.close();
-      }
-    }
-  }
-
   @Override
   public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
     boolean success = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index ded978c..63b696d 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
@@ -590,17 +589,6 @@ public interface RawStore extends Configurable {
     List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException;
 
   /**
-   * Get all partition column statistics for a table
-   * @param dbName
-   * @param tableName
-   * @return Map of partition column statistics
-   * @throws MetaException
-   * @throws NoSuchObjectException
-   */
-  public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
-      String tableName) throws MetaException, NoSuchObjectException;
-
-  /**
    * Get the next notification event.
    * @param rqst Request containing information on the last processed notification.
    * @return list of notifications, sorted by eventId


[08/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
index afe1484..af1fa66 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorSelectOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorSparkHashTableSinkOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorSparkPartitionPruningSinkOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
-import org.apache.hadoop.hive.ql.exec.vector.reducesink.VectorReduceSinkCommonOperator;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc;
 import org.apache.hadoop.hive.ql.parse.spark.SparkPartitionPruningSinkOperator;
@@ -144,17 +143,13 @@ public final class OperatorFactory {
 
   public static <T extends OperatorDesc> Operator<T> getVectorOperator(
     Class<? extends Operator<?>> opClass, CompilationOpContext cContext, T conf,
-        VectorizationContext vContext, Operator<? extends OperatorDesc> originalOp) throws HiveException {
+        VectorizationContext vContext) throws HiveException {
     try {
       VectorDesc vectorDesc = ((AbstractOperatorDesc) conf).getVectorDesc();
       vectorDesc.setVectorOp(opClass);
-      Operator<T> op = (Operator<T>) opClass.getDeclaredConstructor(CompilationOpContext.class,
-          VectorizationContext.class, OperatorDesc.class).newInstance(cContext, vContext, conf);
-      op.setOperatorId(originalOp.getOperatorId());
-      if (op instanceof VectorReduceSinkOperator || op instanceof VectorReduceSinkCommonOperator) {
-        ((ReduceSinkDesc) op.getConf()).setOutputOperators(((ReduceSinkDesc) originalOp.getConf())
-            .getOutputOperators());
-      }
+      Operator<T> op = (Operator<T>) opClass.getDeclaredConstructor(
+          CompilationOpContext.class, VectorizationContext.class, OperatorDesc.class)
+          .newInstance(cContext, vContext, conf);
       return op;
     } catch (Exception e) {
       e.printStackTrace();
@@ -163,12 +158,11 @@ public final class OperatorFactory {
   }
 
   public static <T extends OperatorDesc> Operator<T> getVectorOperator(
-      CompilationOpContext cContext, T conf, VectorizationContext vContext,
-      Operator<? extends OperatorDesc> originalOp) throws HiveException {
+      CompilationOpContext cContext, T conf, VectorizationContext vContext) throws HiveException {
     Class<T> descClass = (Class<T>) conf.getClass();
-    Class<? extends Operator<? extends OperatorDesc>> opClass = vectorOpvec.get(descClass);
+    Class<?> opClass = vectorOpvec.get(descClass);
     if (opClass != null) {
-      return getVectorOperator(opClass, cContext, conf, vContext, originalOp);
+      return getVectorOperator(vectorOpvec.get(descClass), cContext, conf, vContext);
     }
     throw new HiveException("No vector operator for descriptor class " + descClass.getName());
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
index 5412ef1..3b10bfd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
@@ -107,7 +107,8 @@ public class ReplCopyTask extends Task<ReplCopyWork> implements Serializable {
       srcFiles.addAll(Arrays.asList(srcs));
       LOG.debug("ReplCopyTask numFiles:" + (srcFiles == null ? "null" : srcFiles.size()));
 
-      if (!FileUtils.mkdir(dstFs, toPath, conf)) {
+      boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+      if (!FileUtils.mkdir(dstFs, toPath, inheritPerms, conf)) {
         console.printError("Cannot make target directory: " + toPath.toString());
         return 2;
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
index 01a652d..247d589 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
@@ -33,12 +33,10 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Properties;
 
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.CopyOnFirstWriteProperties;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.exec.vector.VectorFileSinkOperator;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
@@ -225,7 +223,6 @@ public class SerializationUtilities {
       kryo.register(java.sql.Timestamp.class, new TimestampSerializer());
       kryo.register(Path.class, new PathSerializer());
       kryo.register(Arrays.asList("").getClass(), new ArraysAsListSerializer());
-      kryo.register(CopyOnFirstWriteProperties.class, new CopyOnFirstWritePropertiesSerializer());
 
       ((Kryo.DefaultInstantiatorStrategy) kryo.getInstantiatorStrategy())
           .setFallbackInstantiatorStrategy(
@@ -425,33 +422,6 @@ public class SerializationUtilities {
   }
 
   /**
-   * CopyOnFirstWriteProperties needs a special serializer, since it extends Properties,
-   * which implements Map, so MapSerializer would be used for it by default. Yet it has
-   * the additional 'interned' field that the standard MapSerializer doesn't handle
-   * properly. But FieldSerializer doesn't work for it as well, because the Hashtable
-   * superclass declares most of its fields transient.
-   */
-  private static class CopyOnFirstWritePropertiesSerializer extends
-      com.esotericsoftware.kryo.serializers.MapSerializer {
-
-    @Override
-    public void write(Kryo kryo, Output output, Map map) {
-      super.write(kryo, output, map);
-      CopyOnFirstWriteProperties p = (CopyOnFirstWriteProperties) map;
-      Properties ip = p.getInterned();
-      kryo.writeObjectOrNull(output, ip, Properties.class);
-    }
-
-    @Override
-    public Map read(Kryo kryo, Input input, Class<Map> type) {
-      Map map = super.read(kryo, input, type);
-      Properties ip = kryo.readObjectOrNull(input, Properties.class);
-      ((CopyOnFirstWriteProperties) map).setInterned(ip);
-      return map;
-    }
-  }
-
-  /**
    * Serializes the plan.
    *
    * @param plan The plan, such as QueryPlan, MapredWork, etc.

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
index 65363ed..65227e9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
@@ -347,15 +347,14 @@ public class StatsNoJobTask extends Task<StatsNoJobWork> implements Serializable
     try {
 
       // Wait a while for existing tasks to terminate
-      while (!threadPool.awaitTermination(10, TimeUnit.SECONDS)) {
-        LOG.debug("Waiting for all stats tasks to finish...");
-      }
-      // Cancel currently executing tasks
-      threadPool.shutdownNow();
-
-      // Wait a while for tasks to respond to being cancelled
       if (!threadPool.awaitTermination(100, TimeUnit.SECONDS)) {
-        LOG.debug("Stats collection thread pool did not terminate");
+        // Cancel currently executing tasks
+        threadPool.shutdownNow();
+
+        // Wait a while for tasks to respond to being cancelled
+        if (!threadPool.awaitTermination(100, TimeUnit.SECONDS)) {
+          LOG.debug("Stats collection thread pool did not terminate");
+        }
       }
     } catch (InterruptedException ie) {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java
index eddc31e..a596e92 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java
@@ -22,6 +22,7 @@ import java.io.Serializable;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.session.OperationLog;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -34,6 +35,7 @@ public class TaskRunner extends Thread {
   protected Task<? extends Serializable> tsk;
   protected TaskResult result;
   protected SessionState ss;
+  private OperationLog operationLog;
   private static AtomicLong taskCounter = new AtomicLong(0);
   private static ThreadLocal<Long> taskRunnerID = new ThreadLocal<Long>() {
     @Override
@@ -72,6 +74,7 @@ public class TaskRunner extends Thread {
   public void run() {
     runner = Thread.currentThread();
     try {
+      OperationLog.setCurrentOperationLog(operationLog);
       SessionState.start(ss);
       runSequential();
     } finally {
@@ -110,4 +113,8 @@ public class TaskRunner extends Thread {
   public static long getTaskRunnerID () {
     return taskRunnerID.get();
   }
+
+  public void setOperationLog(OperationLog operationLog) {
+    this.operationLog = operationLog;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
index 48ae02f..f3c7c77 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
@@ -27,7 +27,6 @@ import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.llap.LlapDaemonInfo;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.io.HiveKey;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -106,8 +105,8 @@ public class TopNHash {
     }
 
     final boolean isTez = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez");
-    final boolean isLlap = LlapDaemonInfo.INSTANCE.isLlap();
-    final int numExecutors = isLlap ? LlapDaemonInfo.INSTANCE.getNumExecutors() : 1;
+    final boolean isLlap = isTez && HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_EXECUTION_MODE).equals("llap");
+    final int numExecutors = isLlap ? HiveConf.getIntVar(hconf, HiveConf.ConfVars.LLAP_DAEMON_NUM_EXECUTORS) : 1;
 
     // Used Memory = totalMemory() - freeMemory();
     // Total Free Memory = maxMemory() - Used Memory;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 777c119..5b5ddc3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -76,7 +76,6 @@ import java.util.zip.Deflater;
 import java.util.zip.DeflaterOutputStream;
 import java.util.zip.InflaterInputStream;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.WordUtils;
@@ -110,8 +109,6 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.Context;
-import org.apache.hadoop.hive.ql.Driver.DriverState;
-import org.apache.hadoop.hive.ql.Driver.LockedDriverState;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
@@ -206,6 +203,7 @@ import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.hadoop.mapred.SequenceFileOutputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Shell;
 import org.apache.hive.common.util.ACLConfigurationParser;
 import org.apache.hive.common.util.ReflectionUtil;
 import org.slf4j.Logger;
@@ -282,11 +280,11 @@ public final class Utilities {
    * The object in the reducer are composed of these top level fields.
    */
 
-  public static final String HADOOP_LOCAL_FS = "file:///";
+  public static String HADOOP_LOCAL_FS = "file:///";
   public static final String HADOOP_LOCAL_FS_SCHEME = "file";
-  public static final String MAP_PLAN_NAME = "map.xml";
-  public static final String REDUCE_PLAN_NAME = "reduce.xml";
-  public static final String MERGE_PLAN_NAME = "merge.xml";
+  public static String MAP_PLAN_NAME = "map.xml";
+  public static String REDUCE_PLAN_NAME = "reduce.xml";
+  public static String MERGE_PLAN_NAME = "merge.xml";
   public static final String INPUT_NAME = "iocontext.input.name";
   public static final String HAS_MAP_WORK = "has.map.work";
   public static final String HAS_REDUCE_WORK = "has.reduce.work";
@@ -295,11 +293,11 @@ public final class Utilities {
   public static final String HIVE_ADDED_JARS = "hive.added.jars";
   public static final String VECTOR_MODE = "VECTOR_MODE";
   public static final String USE_VECTORIZED_INPUT_FILE_FORMAT = "USE_VECTORIZED_INPUT_FILE_FORMAT";
-  public static final String MAPNAME = "Map ";
-  public static final String REDUCENAME = "Reducer ";
+  public static String MAPNAME = "Map ";
+  public static String REDUCENAME = "Reducer ";
 
   @Deprecated
-  protected static final String DEPRECATED_MAPRED_DFSCLIENT_PARALLELISM_MAX = "mapred.dfsclient.parallelism.max";
+  protected static String DEPRECATED_MAPRED_DFSCLIENT_PARALLELISM_MAX = "mapred.dfsclient.parallelism.max";
 
   /**
    * ReduceField:
@@ -605,7 +603,7 @@ public final class Utilities {
   public static void setMapRedWork(Configuration conf, MapredWork w, Path hiveScratchDir) {
     String useName = conf.get(INPUT_NAME);
     if (useName == null) {
-      useName = "mapreduce:" + hiveScratchDir;
+      useName = "mapreduce";
     }
     conf.set(INPUT_NAME, useName);
     setMapWork(conf, w.getMapWork(), hiveScratchDir, true);
@@ -769,8 +767,8 @@ public final class Utilities {
   // Note: When DDL supports specifying what string to represent null,
   // we should specify "NULL" to represent null in the temp table, and then
   // we can make the following translation deprecated.
-  public static final String nullStringStorage = "\\N";
-  public static final String nullStringOutput = "NULL";
+  public static String nullStringStorage = "\\N";
+  public static String nullStringOutput = "NULL";
 
   public static Random randGen = new Random();
 
@@ -2683,7 +2681,7 @@ public final class Utilities {
     setColumnTypeList(jobConf, rowSchema, excludeVCs);
   }
 
-  public static final String suffix = ".hashtable";
+  public static String suffix = ".hashtable";
 
   public static Path generatePath(Path basePath, String dumpFilePrefix,
       Byte tag, String bigBucketFileName) {
@@ -3164,7 +3162,6 @@ public final class Utilities {
 
     Set<Path> pathsProcessed = new HashSet<Path>();
     List<Path> pathsToAdd = new LinkedList<Path>();
-    LockedDriverState lDrvStat = LockedDriverState.getLockedDriverState();
     // AliasToWork contains all the aliases
     Collection<String> aliasToWork = work.getAliasToWork().keySet();
     if (!skipDummy) {
@@ -3185,9 +3182,6 @@ public final class Utilities {
       boolean hasLogged = false;
       Path path = null;
       for (Map.Entry<Path, ArrayList<String>> e : pathToAliases) {
-        if (lDrvStat != null && lDrvStat.driverState == DriverState.INTERRUPT)
-          throw new IOException("Operation is Canceled. ");
-
         Path file = e.getKey();
         List<String> aliases = e.getValue();
         if (aliases.contains(alias)) {
@@ -3241,8 +3235,6 @@ public final class Utilities {
     List<Path> finalPathsToAdd = new LinkedList<>();
     List<Future<Path>> futures = new LinkedList<>();
     for (final Path path : pathsToAdd) {
-      if (lDrvStat != null && lDrvStat.driverState == DriverState.INTERRUPT)
-        throw new IOException("Operation is Canceled. ");
       if (pool == null) {
         finalPathsToAdd.add(new GetInputPathsCallable(path, job, work, hiveScratchDir, ctx, skipDummy).call());
       } else {
@@ -3252,8 +3244,6 @@ public final class Utilities {
 
     if (pool != null) {
       for (Future<Path> future : futures) {
-        if (lDrvStat != null && lDrvStat.driverState == DriverState.INTERRUPT)
-          throw new IOException("Operation is Canceled. ");
         finalPathsToAdd.add(future.get());
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionError.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionError.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionError.java
deleted file mode 100644
index 4ad4f98..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionError.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.exec.mapjoin;
-
-/**
- * When this Error is thrown, better not retry.
- */
-public class MapJoinMemoryExhaustionError extends Error {
-  private static final long serialVersionUID = 3678353959830506881L;
-  public MapJoinMemoryExhaustionError(String msg) {
-    super(msg);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionException.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionException.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionException.java
new file mode 100644
index 0000000..dbe00b6
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionException.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.exec.mapjoin;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+
+
+public class MapJoinMemoryExhaustionException extends HiveException {
+  private static final long serialVersionUID = 3678353959830506881L;
+  public MapJoinMemoryExhaustionException(String msg) {
+    super(msg);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java
index d5e81e1..7fc3226 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java
@@ -86,17 +86,17 @@ public class MapJoinMemoryExhaustionHandler {
    *
    * @param tableContainerSize currently table container size
    * @param numRows number of rows processed
-   * @throws MapJoinMemoryExhaustionError
+   * @throws MapJoinMemoryExhaustionException
    */
   public void checkMemoryStatus(long tableContainerSize, long numRows)
-  throws MapJoinMemoryExhaustionError {
+  throws MapJoinMemoryExhaustionException {
     long usedMemory = memoryMXBean.getHeapMemoryUsage().getUsed();
     double percentage = (double) usedMemory / (double) maxHeapSize;
     String msg = Utilities.now() + "\tProcessing rows:\t" + numRows + "\tHashtable size:\t"
         + tableContainerSize + "\tMemory usage:\t" + usedMemory + "\tpercentage:\t" + percentageNumberFormat.format(percentage);
     console.printInfo(msg);
     if(percentage > maxMemoryUsage) {
-      throw new MapJoinMemoryExhaustionError(msg);
+      throw new MapJoinMemoryExhaustionException(msg);
     }
    }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index 93a36c6..1945163 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -32,7 +32,6 @@ import java.util.Properties;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
-import org.apache.hadoop.hive.ql.log.LogDivertAppenderForTest;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -69,7 +68,6 @@ import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveKey;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormatImpl;
 import org.apache.hadoop.hive.ql.io.IOPrepareCache;
-import org.apache.hadoop.hive.ql.log.LogDivertAppender;
 import org.apache.hadoop.hive.ql.log.NullAppender;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.FetchWork;
@@ -118,8 +116,6 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
   protected transient JobConf job;
   public static MemoryMXBean memoryMXBean;
   protected HadoopJobExecHelper jobExecHelper;
-  private transient boolean isShutdown = false;
-  private transient boolean jobKilled = false;
 
   protected static transient final Logger LOG = LoggerFactory.getLogger(ExecDriver.class);
 
@@ -416,7 +412,10 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
 
       if (driverContext.isShutdown()) {
         LOG.warn("Task was cancelled");
-        killJob();
+        if (rj != null) {
+          rj.killJob();
+          rj = null;
+        }
         return 5;
       }
 
@@ -449,7 +448,7 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
 
         if (rj != null) {
           if (returnVal != 0) {
-            killJob();
+            rj.killJob();
           }
           jobID = rj.getID().toString();
         }
@@ -633,8 +632,6 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
   private static void setupChildLog4j(Configuration conf) {
     try {
       LogUtils.initHiveExecLog4j();
-      LogDivertAppender.registerRoutingAppender(conf);
-      LogDivertAppenderForTest.registerRoutingAppenderIfInTest(conf);
     } catch (LogInitializationException e) {
       System.err.println(e.getMessage());
     }
@@ -706,8 +703,6 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
     }
     System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId);
 
-    LogUtils.registerLoggingContext(conf);
-
     if (noLog) {
       // If started from main(), and noLog is on, we should not output
       // any logs. To turn the log on, please set -Dtest.silent=false
@@ -858,37 +853,22 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
     ss.getHiveHistory().logPlanProgress(queryPlan);
   }
 
-  public boolean isTaskShutdown() {
-    return isShutdown;
-  }
-
   @Override
   public void shutdown() {
     super.shutdown();
-    killJob();
-    isShutdown = true;
-  }
-
-  @Override
-  public String getExternalHandle() {
-    return this.jobID;
-  }
-
-  private void killJob() {
-    boolean needToKillJob = false;
-    synchronized(this) {
-      if (rj != null && !jobKilled) {
-        jobKilled = true;
-        needToKillJob = true;
-      }
-    }
-    if (needToKillJob) {
+    if (rj != null) {
       try {
         rj.killJob();
       } catch (Exception e) {
         LOG.warn("failed to kill job " + rj.getID(), e);
       }
+      rj = null;
     }
   }
+
+  @Override
+  public String getExternalHandle() {
+    return this.jobID;
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
index c5d4f9a..591ea97 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
@@ -60,7 +60,7 @@ import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionError;
+import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionException;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -69,6 +69,7 @@ import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
+import org.apache.hadoop.hive.ql.session.OperationLog;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
@@ -326,8 +327,18 @@ public class MapredLocalTask extends Task<MapredLocalWork> implements Serializab
 
       CachingPrintStream errPrintStream = new CachingPrintStream(System.err);
 
-      StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
-      StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, errPrintStream);
+      StreamPrinter outPrinter;
+      StreamPrinter errPrinter;
+      OperationLog operationLog = OperationLog.getCurrentOperationLog();
+      if (operationLog != null) {
+        outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out,
+            operationLog.getPrintStream());
+        errPrinter = new StreamPrinter(executor.getErrorStream(), null, errPrintStream,
+            operationLog.getPrintStream());
+      } else {
+        outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
+        errPrinter = new StreamPrinter(executor.getErrorStream(), null, errPrintStream);
+      }
 
       outPrinter.start();
       errPrinter.start();
@@ -384,7 +395,7 @@ public class MapredLocalTask extends Task<MapredLocalWork> implements Serializab
           + Utilities.showTime(elapsed) + " sec.");
     } catch (Throwable throwable) {
       if (throwable instanceof OutOfMemoryError
-          || (throwable instanceof MapJoinMemoryExhaustionError)) {
+          || (throwable instanceof MapJoinMemoryExhaustionException)) {
         l4j.error("Hive Runtime Error: Map local work exhausted memory", throwable);
         return 3;
       } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
index 360b639..04e24bd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
@@ -24,8 +24,6 @@ import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hive.common.MemoryEstimate;
-import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -48,7 +46,7 @@ import com.google.common.annotations.VisibleForTesting;
  * Initially inspired by HPPC LongLongOpenHashMap; however, the code is almost completely reworked
  * and there's very little in common left save for quadratic probing (and that with some changes).
  */
-public final class BytesBytesMultiHashMap implements MemoryEstimate {
+public final class BytesBytesMultiHashMap {
   public static final Logger LOG = LoggerFactory.getLogger(BytesBytesMultiHashMap.class);
 
   /*
@@ -523,18 +521,7 @@ public final class BytesBytesMultiHashMap implements MemoryEstimate {
    * @return number of bytes
    */
   public long memorySize() {
-    return getEstimatedMemorySize();
-  }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    JavaDataModel jdm = JavaDataModel.get();
-    long size = 0;
-    size += writeBuffers.getEstimatedMemorySize();
-    size += jdm.lengthForLongArrayOfSize(refs.length);
-    // 11 primitive1 fields, 2 refs above with alignment
-    size += JavaDataModel.alignUp(15 * jdm.primitive1(), jdm.memoryAlign());
-    return size;
+    return writeBuffers.size() + refs.length * 8 + 100;
   }
 
   public void seal() {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java
index adf1a90..a3bccc6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java
@@ -53,7 +53,7 @@ import org.apache.hadoop.io.Writable;
 public class HashMapWrapper extends AbstractMapJoinTableContainer implements Serializable {
   private static final long serialVersionUID = 1L;
   protected static final Logger LOG = LoggerFactory.getLogger(HashMapWrapper.class);
-  private static final long DEFAULT_HASHMAP_ENTRY_SIZE = 1024L;
+
   // default threshold for using main memory based HashMap
   private static final int THRESHOLD = 1000000;
   private static final float LOADFACTOR = 0.75f;
@@ -140,14 +140,6 @@ public class HashMapWrapper extends AbstractMapJoinTableContainer implements Ser
     return new GetAdaptor(keyTypeFromLoader);
   }
 
-  @Override
-  public long getEstimatedMemorySize() {
-    // TODO: Key and Values are Object[] which can be eagerly deserialized or lazily deserialized. To accurately
-    // estimate the entry size, every possible Objects in Key, Value should implement MemoryEstimate interface which
-    // is very intrusive. So assuming default entry size here.
-    return size() * DEFAULT_HASHMAP_ENTRY_SIZE;
-  }
-
   private class GetAdaptor implements ReusableGetAdaptor {
 
     private Object[] currentKey;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index 6523f00..04e89e8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -118,11 +118,6 @@ public class HybridHashTableContainer
 
   private final String spillLocalDirs;
 
-  @Override
-  public long getEstimatedMemorySize() {
-    return memoryUsed;
-  }
-
   /**
    * This class encapsulates the triplet together since they are closely related to each other
    * The triplet: hashmap (either in memory or on disk), small table container, big table container

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java
index 014d17a..c86e5f5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java
@@ -26,7 +26,6 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.MemoryEstimate;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
 import org.apache.hadoop.hive.ql.exec.JoinUtil;
@@ -34,7 +33,6 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapper;
 import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapperBatch;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.ByteStream.RandomAccessOutput;
 import org.apache.hadoop.hive.serde2.AbstractSerDe;
@@ -74,11 +72,6 @@ public class MapJoinBytesTableContainer
          implements MapJoinTableContainer, MapJoinTableContainerDirectAccess {
   private static final Logger LOG = LoggerFactory.getLogger(MapJoinTableContainer.class);
 
-  // TODO: For object inspector fields, assigning 16KB for now. To better estimate the memory size every
-  // object inspectors have to implement MemoryEstimate interface which is a lot of change with little benefit compared
-  // to writing an instrumentation agent for object size estimation
-  public static final long DEFAULT_OBJECT_INSPECTOR_MEMORY_SIZE = 16 * 1024L;
-
   private final BytesBytesMultiHashMap hashMap;
   /** The OI used to deserialize values. We never deserialize keys. */
   private LazyBinaryStructObjectInspector internalValueOi;
@@ -154,7 +147,7 @@ public class MapJoinBytesTableContainer
     this.notNullMarkers = notNullMarkers;
   }
 
-  public static interface KeyValueHelper extends BytesBytesMultiHashMap.KvSource, MemoryEstimate {
+  public static interface KeyValueHelper extends BytesBytesMultiHashMap.KvSource {
     void setKeyValue(Writable key, Writable val) throws SerDeException;
     /** Get hash value from the key. */
     int getHashFromKey() throws SerDeException;
@@ -223,22 +216,6 @@ public class MapJoinBytesTableContainer
     public int getHashFromKey() throws SerDeException {
       throw new UnsupportedOperationException("Not supported for MapJoinBytesTableContainer");
     }
-
-    @Override
-    public long getEstimatedMemorySize() {
-      JavaDataModel jdm = JavaDataModel.get();
-      long size = 0;
-      size += keySerDe == null ? 0 : jdm.object();
-      size += valSerDe == null ? 0 : jdm.object();
-      size += keySoi == null ? 0 : DEFAULT_OBJECT_INSPECTOR_MEMORY_SIZE;
-      size += valSoi == null ? 0 : DEFAULT_OBJECT_INSPECTOR_MEMORY_SIZE;
-      size += keyOis == null ? 0 : jdm.arrayList() + keyOis.size() * DEFAULT_OBJECT_INSPECTOR_MEMORY_SIZE;
-      size += valOis == null ? 0 : jdm.arrayList() + valOis.size() * DEFAULT_OBJECT_INSPECTOR_MEMORY_SIZE;
-      size += keyObjs == null ? 0 : jdm.array() + keyObjs.length * jdm.object();
-      size += valObjs == null ? 0 : jdm.array() + valObjs.length * jdm.object();
-      size += jdm.primitive1();
-      return size;
-    }
   }
 
   static class LazyBinaryKvWriter implements KeyValueHelper {
@@ -342,15 +319,6 @@ public class MapJoinBytesTableContainer
       aliasFilter &= filterGetter.getShort();
       return aliasFilter;
     }
-
-    @Override
-    public long getEstimatedMemorySize() {
-      JavaDataModel jdm = JavaDataModel.get();
-      long size = 0;
-      size += (4 * jdm.object());
-      size += jdm.primitive1();
-      return size;
-    }
   }
 
   /*
@@ -393,15 +361,6 @@ public class MapJoinBytesTableContainer
       int keyLength = key.getLength();
       return HashCodeUtil.murmurHash(keyBytes, 0, keyLength);
     }
-
-    @Override
-    public long getEstimatedMemorySize() {
-      JavaDataModel jdm = JavaDataModel.get();
-      long size = 0;
-      size += jdm.object() + (key == null ? 0 : key.getCapacity());
-      size += jdm.object() + (val == null ? 0 : val.getCapacity());
-      return size;
-    }
   }
 
   @Override
@@ -809,19 +768,4 @@ public class MapJoinBytesTableContainer
   public int size() {
     return hashMap.size();
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    JavaDataModel jdm = JavaDataModel.get();
-    long size = 0;
-    size += hashMap.getEstimatedMemorySize();
-    size += directWriteHelper == null ? 0 : directWriteHelper.getEstimatedMemorySize();
-    size += writeHelper == null ? 0 : writeHelper.getEstimatedMemorySize();
-    size += sortableSortOrders == null ? 0 : jdm.lengthForBooleanArrayOfSize(sortableSortOrders.length);
-    size += nullMarkers == null ? 0 : jdm.lengthForByteArrayOfSize(nullMarkers.length);
-    size += notNullMarkers == null ? 0 : jdm.lengthForByteArrayOfSize(notNullMarkers.length);
-    size += jdm.arrayList(); // empty list
-    size += DEFAULT_OBJECT_INSPECTOR_MEMORY_SIZE;
-    return size;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java
index 5ca5ff6..6d71fef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java
@@ -23,7 +23,6 @@ import java.util.List;
 
 import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
 import org.apache.hadoop.hive.ql.exec.JoinUtil;
-import org.apache.hadoop.hive.common.MemoryEstimate;
 import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapper;
 import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapperBatch;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
@@ -32,7 +31,7 @@ import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.io.Writable;
 
-public interface MapJoinTableContainer extends MemoryEstimate {
+public interface MapJoinTableContainer {
   /**
    * Retrieve rows from hashtable key by key, one key at a time, w/o copying the structures
    * for each key. "Old" HashMapWrapper will still create/retrieve new objects for java HashMap;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
index 4ca8f93..4c69899 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
@@ -356,9 +356,12 @@ public class RemoteHiveSparkClient implements HiveSparkClient {
     private void logConfigurations(JobConf localJobConf) {
       if (LOG.isInfoEnabled()) {
         LOG.info("Logging job configuration: ");
-        StringBuilder outWriter = new StringBuilder();
-        // redact sensitive information before logging
-        HiveConfUtil.dumpConfig(localJobConf, outWriter);
+        StringWriter outWriter = new StringWriter();
+        try {
+          Configuration.dumpConfiguration(localJobConf, outWriter);
+        } catch (IOException e) {
+          LOG.warn("Error logging job configuration", e);
+        }
         LOG.info(outWriter.toString());
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
index 5f85f9e..12a76a7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
@@ -298,13 +298,12 @@ public class SparkPlanGenerator {
       throw new IllegalArgumentException(msg, e);
     }
     if (work instanceof MapWork) {
-      MapWork mapWork = (MapWork) work;
       cloned.setBoolean("mapred.task.is.map", true);
-      List<Path> inputPaths = Utilities.getInputPaths(cloned, mapWork,
+      List<Path> inputPaths = Utilities.getInputPaths(cloned, (MapWork) work,
           scratchDir, context, false);
       Utilities.setInputPaths(cloned, inputPaths);
-      Utilities.setMapWork(cloned, mapWork, scratchDir, false);
-      Utilities.createTmpDirs(cloned, mapWork);
+      Utilities.setMapWork(cloned, (MapWork) work, scratchDir, false);
+      Utilities.createTmpDirs(cloned, (MapWork) work);
       if (work instanceof MergeFileWork) {
         MergeFileWork mergeFileWork = (MergeFileWork) work;
         cloned.set(Utilities.MAPRED_MAPPER_CLASS, MergeFileMapper.class.getName());
@@ -314,21 +313,9 @@ public class SparkPlanGenerator {
       } else {
         cloned.set(Utilities.MAPRED_MAPPER_CLASS, ExecMapper.class.getName());
       }
-      if (mapWork.getMaxSplitSize() != null) {
-        HiveConf.setLongVar(cloned, HiveConf.ConfVars.MAPREDMAXSPLITSIZE,
-            mapWork.getMaxSplitSize());
-      }
-      if (mapWork.getMinSplitSize() != null) {
+      if (((MapWork) work).getMinSplitSize() != null) {
         HiveConf.setLongVar(cloned, HiveConf.ConfVars.MAPREDMINSPLITSIZE,
-            mapWork.getMinSplitSize());
-      }
-      if (mapWork.getMinSplitSizePerNode() != null) {
-        HiveConf.setLongVar(cloned, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERNODE,
-            mapWork.getMinSplitSizePerNode());
-      }
-      if (mapWork.getMinSplitSizePerRack() != null) {
-        HiveConf.setLongVar(cloned, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERRACK,
-            mapWork.getMinSplitSizePerRack());
+            ((MapWork) work).getMinSplitSize());
       }
       // remember the JobConf cloned for each MapWork, so we won't clone for it again
       workToJobConf.put(work, cloned);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
index 7eaad18..27bed9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
@@ -95,7 +95,6 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
   // number of columns pertaining to keys in a vectorized row batch
   private int keysColumnOffset;
   private static final int BATCH_SIZE = VectorizedRowBatch.DEFAULT_SIZE;
-  private static final int BATCH_BYTES = VectorizedRowBatch.DEFAULT_BYTES;
   private StructObjectInspector keyStructInspector;
   private StructObjectInspector[] valueStructInspectors;
   /* this is only used in the error code path */
@@ -374,7 +373,6 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
     }
 
     int rowIdx = 0;
-    int batchBytes = 0;
     try {
       while (values.hasNext()) {
         /* deserialize value into columns */
@@ -383,13 +381,11 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
 
         VectorizedBatchUtil.addRowToBatchFrom(valueObj, valueStructInspectors[tag], rowIdx,
             keysColumnOffset, batch, buffer);
-        batchBytes += valueWritable.getLength();
         rowIdx++;
-        if (rowIdx >= BATCH_SIZE || batchBytes > BATCH_BYTES) {
+        if (rowIdx >= BATCH_SIZE) {
           VectorizedBatchUtil.setBatchSize(batch, rowIdx);
           reducer.process(batch, tag);
           rowIdx = 0;
-          batchBytes = 0;
           if (isLogInfoEnabled) {
             logMemoryInfo();
           }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
index 98b1605..4c01329 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
@@ -83,8 +83,6 @@ public class SparkTask extends Task<SparkWork> {
   private transient int totalTaskCount;
   private transient int failedTaskCount;
   private transient List<Integer> stageIds;
-  private transient SparkJobRef jobRef = null;
-  private transient boolean isShutdown = false;
 
   @Override
   public void initialize(QueryState queryState, QueryPlan queryPlan, DriverContext driverContext,
@@ -109,7 +107,7 @@ public class SparkTask extends Task<SparkWork> {
 
       perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_SUBMIT_JOB);
       submitTime = perfLogger.getStartTime(PerfLogger.SPARK_SUBMIT_JOB);
-      jobRef = sparkSession.submit(driverContext, sparkWork);
+      SparkJobRef jobRef = sparkSession.submit(driverContext, sparkWork);
       perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_SUBMIT_JOB);
 
       addToHistory(jobRef);
@@ -129,14 +127,8 @@ public class SparkTask extends Task<SparkWork> {
         // TODO: If the timeout is because of lack of resources in the cluster, we should
         // ideally also cancel the app request here. But w/o facilities from Spark or YARN,
         // it's difficult to do it on hive side alone. See HIVE-12650.
-        LOG.info("Failed to submit Spark job " + sparkJobID);
-        jobRef.cancelJob();
-      } else if (rc == 4) {
-        LOG.info("The number of tasks reaches above the limit " + conf.getIntVar(HiveConf.ConfVars.SPARK_JOB_MAX_TASKS) +
-            ". Cancelling Spark job " + sparkJobID + " with application ID " + jobID );
         jobRef.cancelJob();
       }
-
       if (this.jobID == null) {
         this.jobID = sparkJobStatus.getAppID();
       }
@@ -298,23 +290,6 @@ public class SparkTask extends Task<SparkWork> {
     return finishTime;
   }
 
-  public boolean isTaskShutdown() {
-    return isShutdown;
-  }
-
-  @Override
-  public void shutdown() {
-    super.shutdown();
-    if (jobRef != null && !isShutdown) {
-      try {
-        jobRef.cancelJob();
-      } catch (Exception e) {
-        LOG.warn("failed to kill job", e);
-      }
-    }
-    isShutdown = true;
-  }
-
   /**
    * Set the number of reducers for the spark work.
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java
index eb9883a..7d18c0a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java
@@ -78,7 +78,7 @@ public class SparkUtilities {
     Path localFile = new Path(source.getPath());
     Path remoteFile = new Path(SessionState.get().getSparkSession().getHDFSSessionDir(),
         getFileName(source));
-    FileSystem fileSystem = FileSystem.get(remoteFile.toUri(), conf);
+    FileSystem fileSystem = FileSystem.get(conf);
     // Overwrite if the remote file already exists. Whether the file can be added
     // on executor is up to spark, i.e. spark.files.overwrite
     fileSystem.copyFromLocalFile(false, true, localFile, remoteFile);
@@ -92,7 +92,7 @@ public class SparkUtilities {
     String deployMode = sparkConf.contains("spark.submit.deployMode") ?
         sparkConf.get("spark.submit.deployMode") : null;
     return SparkClientUtilities.isYarnClusterMode(master, deployMode) &&
-        !(source.getScheme().equals("hdfs") || source.getScheme().equals("viewfs"));
+        !source.getScheme().equals("hdfs");
   }
 
   private static String getFileName(URI uri) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
index 9dfb65e..dd73f3e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
@@ -34,8 +34,7 @@ import org.apache.spark.JobExecutionStatus;
  * It print current job status to console and sleep current thread between monitor interval.
  */
 public class RemoteSparkJobMonitor extends SparkJobMonitor {
-  private int sparkJobMaxTaskCount = -1;
-  private int totalTaskCount = 0;
+
   private RemoteSparkJobStatus sparkJobStatus;
   private final HiveConf hiveConf;
 
@@ -43,7 +42,6 @@ public class RemoteSparkJobMonitor extends SparkJobMonitor {
     super(hiveConf);
     this.sparkJobStatus = sparkJobStatus;
     this.hiveConf = hiveConf;
-    sparkJobMaxTaskCount = hiveConf.getIntVar(HiveConf.ConfVars.SPARK_JOB_MAX_TASKS);
   }
 
   @Override
@@ -102,17 +100,6 @@ public class RemoteSparkJobMonitor extends SparkJobMonitor {
               } else {
                 console.logInfo(format);
               }
-            } else {
-              // Count the number of tasks, and kill application if it goes beyond the limit.
-              if (sparkJobMaxTaskCount != -1 && totalTaskCount == 0) {
-                totalTaskCount = getTotalTaskCount(progressMap);
-                if (totalTaskCount > sparkJobMaxTaskCount) {
-                  rc = 4;
-                  done = true;
-                  console.printInfo("\nThe total number of task in the Spark job [" + totalTaskCount + "] is greater than the limit [" +
-                      sparkJobMaxTaskCount + "]. The Spark job will be cancelled.");
-                }
-              }
             }
 
             printStatus(progressMap, lastProgressMap);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
index 41730b5..0b224f2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
@@ -66,6 +66,7 @@ abstract class SparkJobMonitor {
   private int lines = 0;
   private final PrintStream out;
 
+
   private static final int COLUMN_1_WIDTH = 16;
   private static final String HEADER_FORMAT = "%16s%10s %13s  %5s  %9s  %7s  %7s  %6s  ";
   private static final String STAGE_FORMAT = "%-16s%10s %13s  %5s  %9s  %7s  %7s  %6s  ";
@@ -172,15 +173,6 @@ abstract class SparkJobMonitor {
     lastPrintTime = System.currentTimeMillis();
   }
 
-  protected int getTotalTaskCount(Map<String, SparkStageProgress> progressMap) {
-    int totalTasks = 0;
-    for (SparkStageProgress progress: progressMap.values() ) {
-      totalTasks += progress.getTotalTaskCount();
-    }
-
-    return totalTasks;
-  }
-
   private String getReport(Map<String, SparkStageProgress> progressMap) {
     StringBuilder reportBuffer = new StringBuilder();
     SimpleDateFormat dt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobStatus.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobStatus.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobStatus.java
index 67db303..951dbb4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobStatus.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobStatus.java
@@ -67,9 +67,6 @@ public class RemoteSparkJobStatus implements SparkJobStatus {
       return getAppID.get(sparkClientTimeoutInSeconds, TimeUnit.SECONDS);
     } catch (Exception e) {
       LOG.warn("Failed to get APP ID.", e);
-      if (Thread.interrupted()) {
-        error = e;
-      }
       return null;
     }
   }
@@ -189,9 +186,6 @@ public class RemoteSparkJobStatus implements SparkJobStatus {
   }
 
   public JobHandle.State getRemoteJobState() {
-    if (error != null) {
-      return JobHandle.State.FAILED;
-    }
     return jobHandle.getState();
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 6497495..aa2dfc7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -336,11 +336,6 @@ public class DagUtils {
       setupAutoReducerParallelism(edgeProp, w);
       break;
     }
-    case CUSTOM_SIMPLE_EDGE: {
-      setupQuickStart(edgeProp, w);
-      break;
-    }
-
     default:
       // nothing
     }
@@ -970,9 +965,10 @@ public class DagUtils {
    * @return true if the file names match else returns false.
    * @throws IOException when any file system related call fails
    */
-  private boolean checkPreExisting(FileSystem sourceFS, Path src, Path dest, Configuration conf)
+  private boolean checkPreExisting(Path src, Path dest, Configuration conf)
     throws IOException {
     FileSystem destFS = dest.getFileSystem(conf);
+    FileSystem sourceFS = src.getFileSystem(conf);
     FileStatus destStatus = FileUtils.getFileStatusOrNull(destFS, dest);
     if (destStatus != null) {
       return (sourceFS.getFileStatus(src).getLen() == destStatus.getLen());
@@ -992,9 +988,7 @@ public class DagUtils {
   public LocalResource localizeResource(
       Path src, Path dest, LocalResourceType type, Configuration conf) throws IOException {
     FileSystem destFS = dest.getFileSystem(conf);
-    // We call copyFromLocal below, so we basically assume src is a local file.
-    FileSystem srcFs = FileSystem.getLocal(conf);
-    if (src != null && !checkPreExisting(srcFs, src, dest, conf)) {
+    if (src != null && !checkPreExisting(src, dest, conf)) {
       // copy the src to the destination and create local resource.
       // do not overwrite.
       String srcStr = src.toString();
@@ -1006,7 +1000,7 @@ public class DagUtils {
       // authoritative one), don't wait infinitely for the notifier, just wait a little bit
       // and check HDFS before and after.
       if (notifierOld != null
-          && checkOrWaitForTheFile(srcFs, src, dest, conf, notifierOld, 1, 150, false)) {
+          && checkOrWaitForTheFile(src, dest, conf, notifierOld, 1, 150, false)) {
         return createLocalResource(destFS, dest, type, LocalResourceVisibility.PRIVATE);
       }
       try {
@@ -1028,7 +1022,7 @@ public class DagUtils {
             conf, HiveConf.ConfVars.HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL, TimeUnit.MILLISECONDS);
         // Only log on the first wait, and check after wait on the last iteration.
         if (!checkOrWaitForTheFile(
-            srcFs, src, dest, conf, notifierOld, waitAttempts, sleepInterval, true)) {
+            src, dest, conf, notifierOld, waitAttempts, sleepInterval, true)) {
           LOG.error("Could not find the jar that was being uploaded");
           throw new IOException("Previous writer likely failed to write " + dest +
               ". Failing because I am unlikely to write too.");
@@ -1043,10 +1037,10 @@ public class DagUtils {
         LocalResourceVisibility.PRIVATE);
   }
 
-  public boolean checkOrWaitForTheFile(FileSystem srcFs, Path src, Path dest, Configuration conf,
-      Object notifier, int waitAttempts, long sleepInterval, boolean doLog) throws IOException {
+  public boolean checkOrWaitForTheFile(Path src, Path dest, Configuration conf, Object notifier,
+      int waitAttempts, long sleepInterval, boolean doLog) throws IOException {
     for (int i = 0; i < waitAttempts; i++) {
-      if (checkPreExisting(srcFs, src, dest, conf)) return true;
+      if (checkPreExisting(src, dest, conf)) return true;
       if (doLog && i == 0) {
         LOG.info("Waiting for the file " + dest + " (" + waitAttempts + " attempts, with "
             + sleepInterval + "ms interval)");
@@ -1065,7 +1059,7 @@ public class DagUtils {
         throw new IOException(interruptedException);
       }
     }
-    return checkPreExisting(srcFs, src, dest, conf); // One last check.
+    return checkPreExisting(src, dest, conf); // One last check.
   }
 
   /**
@@ -1271,20 +1265,6 @@ public class DagUtils {
     }
   }
 
-  private void setupQuickStart(TezEdgeProperty edgeProp, Vertex v)
-    throws IOException {
-    if (!edgeProp.isSlowStart()) {
-      Configuration pluginConf = new Configuration(false);
-      VertexManagerPluginDescriptor desc =
-              VertexManagerPluginDescriptor.create(ShuffleVertexManager.class.getName());
-      pluginConf.setFloat(ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_MIN_SRC_FRACTION, 0);
-      pluginConf.setFloat(ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_MAX_SRC_FRACTION, 0);
-      UserPayload payload = TezUtils.createUserPayloadFromConf(pluginConf);
-      desc.setUserPayload(payload);
-      v.setVertexManagerPlugin(desc);
-    }
-  }
-
   public String createDagName(Configuration conf, QueryPlan plan) {
     String name = getUserSpecifiedDagName(conf);
     if (name == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
index 7011d23..7b13e90 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
@@ -24,7 +24,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionError;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -147,20 +146,7 @@ public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTable
       }
       nwayConf.setNumberOfPartitions(numPartitions);
     }
-    final float inflationFactor = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASH_TABLE_INFLATION_FACTOR);
-    final long memoryCheckInterval = HiveConf.getLongVar(hconf,
-      HiveConf.ConfVars.LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL);
-    final boolean isLlap = "llap".equals(HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_EXECUTION_MODE));
-    long numEntries = 0;
-    long noCondTaskSize = desc.getNoConditionalTaskSize();
-    boolean doMemCheck = isLlap && inflationFactor > 0.0f && noCondTaskSize > 0 && memoryCheckInterval > 0;
-    if (!doMemCheck) {
-      LOG.info("Not doing hash table memory monitoring. isLlap: {} inflationFactor: {} noConditionalTaskSize: {} " +
-        "memoryCheckInterval: {}", isLlap, inflationFactor, noCondTaskSize, memoryCheckInterval);
-    } else {
-      LOG.info("Memory monitoring for hash table loader enabled. noconditionalTaskSize: {} inflationFactor: {} ",
-        noCondTaskSize, inflationFactor);
-    }
+
     for (int pos = 0; pos < mapJoinTables.length; pos++) {
       if (pos == desc.getPosBigTable()) {
         continue;
@@ -219,32 +205,12 @@ public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTable
           tableContainer = new HashMapWrapper(hconf, keyCount);
         }
 
-        LOG.info("Using tableContainer: " + tableContainer.getClass().getSimpleName());
+        LOG.info("Using tableContainer " + tableContainer.getClass().getSimpleName());
 
         tableContainer.setSerde(keyCtx, valCtx);
         while (kvReader.next()) {
-          tableContainer.putRow((Writable) kvReader.getCurrentKey(), (Writable) kvReader.getCurrentValue());
-          numEntries++;
-          if (doMemCheck && ((numEntries % memoryCheckInterval) == 0)) {
-            final long estMemUsage = tableContainer.getEstimatedMemorySize();
-            final long threshold = (long) (inflationFactor * noCondTaskSize);
-            // guard against poor configuration of noconditional task size. We let hash table grow till 2/3'rd memory
-            // available for container/executor
-            final long effectiveThreshold = (long) Math.max(threshold, (2.0/3.0) * desc.getMaxMemoryAvailable());
-            if (estMemUsage > effectiveThreshold) {
-              String msg = "Hash table loading exceeded memory limits." +
-                " estimatedMemoryUsage: " + estMemUsage + " noconditionalTaskSize: " + noCondTaskSize +
-                " inflationFactor: " + inflationFactor + " threshold: " + threshold +
-                " effectiveThreshold: " + effectiveThreshold;
-              LOG.error(msg);
-              throw new MapJoinMemoryExhaustionError(msg);
-            } else {
-              if (LOG.isInfoEnabled()) {
-                LOG.info("Checking hash table loader memory usage.. numEntries: {} estimatedMemoryUsage: {} " +
-                  "effectiveThreshold: {}", numEntries, estMemUsage, effectiveThreshold);
-              }
-            }
-          }
+          tableContainer.putRow(
+              (Writable)kvReader.getCurrentKey(), (Writable)kvReader.getCurrentValue());
         }
         tableContainer.seal();
         LOG.info("Finished loading hashtable using " + tableContainer.getClass() + ". Small table position: " + pos);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
index 60660ac..ad8b9e0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
@@ -103,8 +103,6 @@ public class ReduceRecordSource implements RecordSource {
   // number of columns pertaining to keys in a vectorized row batch
   private int firstValueColumnOffset;
 
-  private final int BATCH_BYTES = VectorizedRowBatch.DEFAULT_BYTES;
-
   private StructObjectInspector keyStructInspector;
   private StructObjectInspector valueStructInspectors;
 
@@ -192,9 +190,7 @@ public class ReduceRecordSource implements RecordSource {
                                   VectorizedBatchUtil.typeInfosFromStructObjectInspector(
                                       keyStructInspector),
                                   /* useExternalBuffer */ true,
-                                  binarySortableSerDe.getSortOrders(),
-                                  binarySortableSerDe.getNullMarkers(),
-                                  binarySortableSerDe.getNotNullMarkers()));
+                                  binarySortableSerDe.getSortOrders()));
         keyBinarySortableDeserializeToRow.init(0);
 
         final int valuesSize = valueStructInspectors.getAllStructFieldRefs().size();
@@ -439,7 +435,6 @@ public class ReduceRecordSource implements RecordSource {
     final int maxSize = batch.getMaxSize();
     Preconditions.checkState(maxSize > 0);
     int rowIdx = 0;
-    int batchBytes = keyBytes.length;
     try {
       for (Object value : values) {
         if (valueLazyBinaryDeserializeToRow != null) {
@@ -447,7 +442,6 @@ public class ReduceRecordSource implements RecordSource {
           BytesWritable valueWritable = (BytesWritable) value;
           byte[] valueBytes = valueWritable.getBytes();
           int valueLength = valueWritable.getLength();
-          batchBytes += valueLength;
 
           // l4j.info("ReduceRecordSource processVectorGroup valueBytes " + valueLength + " " +
           //     VectorizedBatchUtil.displayBytes(valueBytes, 0, valueLength));
@@ -456,7 +450,7 @@ public class ReduceRecordSource implements RecordSource {
           valueLazyBinaryDeserializeToRow.deserialize(batch, rowIdx);
         }
         rowIdx++;
-        if (rowIdx >= maxSize || batchBytes >= BATCH_BYTES) {
+        if (rowIdx >= maxSize) {
 
           // Batch is full.
           batch.size = rowIdx;
@@ -468,7 +462,6 @@ public class ReduceRecordSource implements RecordSource {
             batch.cols[i].reset();
           }
           rowIdx = 0;
-          batchBytes = 0;
         }
       }
       if (rowIdx > 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
index 4242262..486d43a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
@@ -23,8 +23,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionError;
-import org.apache.tez.runtime.api.TaskFailureType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -43,8 +41,6 @@ import org.apache.tez.runtime.api.LogicalOutput;
 import org.apache.tez.runtime.api.ProcessorContext;
 import org.apache.tez.runtime.library.api.KeyValueWriter;
 
-import com.google.common.base.Throwables;
-
 /**
  * Hive processor for Tez that forms the vertices in Tez and processes the data.
  * Does what ExecMapper and ExecReducer does for hive in MR framework.
@@ -193,11 +189,8 @@ public class TezProcessor extends AbstractLogicalIOProcessor {
     } catch (Throwable t) {
       originalThrowable = t;
     } finally {
-      if (originalThrowable != null && (originalThrowable instanceof Error ||
-        Throwables.getRootCause(originalThrowable) instanceof Error)) {
-        LOG.error("Cannot recover from this FATAL error", StringUtils.stringifyException(originalThrowable));
-        getContext().reportFailure(TaskFailureType.FATAL, originalThrowable,
-                      "Cannot recover from this error");
+      if (originalThrowable != null && originalThrowable instanceof Error) {
+        LOG.error(StringUtils.stringifyException(originalThrowable));
         throw new RuntimeException(originalThrowable);
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java
index b4d8ffa..8f45947 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java
@@ -76,7 +76,6 @@ public class TezSessionPoolManager {
   private static final Logger LOG = LoggerFactory.getLogger(TezSessionPoolManager.class);
   private static final Random rdm = new Random();
 
-  private volatile SessionState initSessionState;
   private BlockingQueue<TezSessionPoolSession> defaultQueuePool;
 
   /** Priority queue sorted by expiration time of live sessions that could be expired. */
@@ -137,8 +136,6 @@ public class TezSessionPoolManager {
 
   public void startPool() throws Exception {
     if (initialSessions.isEmpty()) return;
-    // Hive SessionState available at this point.
-    initSessionState = SessionState.get();
     int threadCount = Math.min(initialSessions.size(),
         HiveConf.getIntVar(initConf, ConfVars.HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS));
     Preconditions.checkArgument(threadCount > 0);
@@ -262,27 +259,13 @@ public class TezSessionPoolManager {
       expirationThread = new Thread(new Runnable() {
         @Override
         public void run() {
-          try {
-            SessionState.setCurrentSessionState(initSessionState);
-            runExpirationThread();
-          } catch (Exception e) {
-            LOG.warn("Exception in TezSessionPool-expiration thread. Thread will shut down", e);
-          } finally {
-            LOG.info("TezSessionPool-expiration thread exiting");
-          }
+          runExpirationThread();
         }
       }, "TezSessionPool-expiration");
       restartThread = new Thread(new Runnable() {
         @Override
         public void run() {
-          try {
-            SessionState.setCurrentSessionState(initSessionState);
-            runRestartThread();
-          } catch (Exception e) {
-            LOG.warn("Exception in TezSessionPool-cleanup thread. Thread will shut down", e);
-          } finally {
-            LOG.info("TezSessionPool-cleanup thread exiting");
-          }
+          runRestartThread();
         }
       }, "TezSessionPool-cleanup");
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
index 036e918..ed1ba9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
@@ -345,7 +345,6 @@ public class TezSessionState {
       String user, final Configuration conf) throws IOException {
     // TODO: parts of this should be moved out of TezSession to reuse the clients, but there's
     //       no good place for that right now (HIVE-13698).
-    // TODO: De-link from SessionState. A TezSession can be linked to different Hive Sessions via the pool.
     SessionState session = SessionState.get();
     boolean isInHs2 = session != null && session.isHiveServerQuery();
     Token<LlapTokenIdentifier> token = null;
@@ -439,7 +438,6 @@ public class TezSessionState {
   private void setupSessionAcls(Configuration tezConf, HiveConf hiveConf) throws
       IOException {
 
-    // TODO: De-link from SessionState. A TezSession can be linked to different Hive Sessions via the pool.
     String user = SessionState.getUserFromAuthenticator();
     UserGroupInformation loginUserUgi = UserGroupInformation.getLoginUser();
     String loginUser =
@@ -453,7 +451,6 @@ public class TezSessionState {
             TezConfiguration.TEZ_AM_MODIFY_ACLS, addHs2User, user, loginUser);
 
     if (LOG.isDebugEnabled()) {
-      // TODO: De-link from SessionState. A TezSession can be linked to different Hive Sessions via the pool.
       LOG.debug(
           "Setting Tez Session access for sessionId={} with viewAclString={}, modifyStr={}",
           SessionState.get().getSessionId(), viewStr, modifyStr);
@@ -595,7 +592,6 @@ public class TezSessionState {
    */
   private Path createTezDir(String sessionId) throws IOException {
     // tez needs its own scratch dir (per session)
-    // TODO: De-link from SessionState. A TezSession can be linked to different Hive Sessions via the pool.
     Path tezDir = new Path(SessionState.get().getHdfsScratchDirURIString(), TEZ_DIR);
     tezDir = new Path(tezDir, sessionId);
     FileSystem fs = tezDir.getFileSystem(conf);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
index 1c84c6a..6c8bf29 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
@@ -18,10 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.tez;
 
-import java.io.Serializable;
-import org.apache.hadoop.hive.ql.exec.ConditionalTask;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
-
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -328,14 +324,6 @@ public class TezTask extends Task<TezWork> {
     }
   }
 
-  void checkOutputSpec(BaseWork work, JobConf jc) throws IOException {
-    for (Operator<?> op : work.getAllOperators()) {
-      if (op instanceof FileSinkOperator) {
-        ((FileSinkOperator) op).checkOutputSpecs(null, jc);
-      }
-    }
-  }
-
   DAG build(JobConf conf, TezWork work, Path scratchDir,
       LocalResource appJarLr, List<LocalResource> additionalLr, Context ctx)
       throws Exception {
@@ -369,6 +357,7 @@ public class TezTask extends Task<TezWork> {
     setAccessControlsForCurrentUser(dag, queryPlan.getQueryId(), conf);
 
     for (BaseWork w: ws) {
+
       boolean isFinal = work.getLeaves().contains(w);
 
       // translate work to vertex
@@ -390,8 +379,6 @@ public class TezTask extends Task<TezWork> {
             children.add(v);
           }
         }
-        JobConf parentConf = workToConf.get(unionWorkItems.get(0));
-        checkOutputSpec(w, parentConf);
 
         // create VertexGroup
         Vertex[] vertexArray = new Vertex[unionWorkItems.size()];
@@ -404,7 +391,7 @@ public class TezTask extends Task<TezWork> {
 
         // For a vertex group, all Outputs use the same Key-class, Val-class and partitioner.
         // Pick any one source vertex to figure out the Edge configuration.
-       
+        JobConf parentConf = workToConf.get(unionWorkItems.get(0));
 
         // now hook up the children
         for (BaseWork v: children) {
@@ -417,7 +404,6 @@ public class TezTask extends Task<TezWork> {
       } else {
         // Regular vertices
         JobConf wxConf = utils.initializeVertexConf(conf, ctx, w);
-        checkOutputSpec(w, wxConf);
         Vertex wx =
             utils.createVertex(wxConf, w, scratchDir, appJarLr, additionalLr, fs, ctx, !isFinal,
                 work, work.getVertexType(w));

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/Constants.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/Constants.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/Constants.java
index cd3404a..eccbbb6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/Constants.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/Constants.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hadoop.hive.ql.exec.tez.monitoring;
 
 import org.apache.hadoop.hive.common.log.InPlaceUpdate;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java
index 7cb74a5..1400be4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hadoop.hive.ql.exec.tez.monitoring;
 
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -28,7 +11,6 @@ import org.apache.tez.common.counters.TaskCounter;
 import org.apache.tez.common.counters.TezCounter;
 import org.apache.tez.common.counters.TezCounters;
 import org.apache.tez.dag.api.DAG;
-import org.apache.tez.dag.api.TezConfiguration;
 import org.apache.tez.dag.api.TezException;
 import org.apache.tez.dag.api.Vertex;
 import org.apache.tez.dag.api.client.DAGClient;
@@ -77,58 +59,24 @@ class DAGSummary implements PrintSummary {
     this.hiveCounters = hiveCounters(dagClient);
   }
 
-  private long hiveInputRecordsFromTezCounters(String vertexName, String inputVertexName) {
-    // Get the counters for the input vertex.
-    Set<StatusGetOpts> statusOptions = new HashSet<>(1);
-    statusOptions.add(StatusGetOpts.GET_COUNTERS);
-    VertexStatus inputVertexStatus = vertexStatus(statusOptions, inputVertexName);
-    final TezCounters inputVertexCounters = inputVertexStatus.getVertexCounters();
-
-    // eg, group name TaskCounter_Map_7_OUTPUT_Reducer_8, counter name OUTPUT_RECORDS
-    String groupName = formattedName("TaskCounter", inputVertexName, vertexName);
-    String counterName = "OUTPUT_RECORDS";
-
-    // Do not create counter if it does not exist -
-    // instead fall back to default behavior for determining input records.
-    TezCounter tezCounter = inputVertexCounters.getGroup(groupName).findCounter(counterName, false);
-    if (tezCounter == null) {
-      return -1;
-    } else {
-      return tezCounter.getValue();
-    }
-  }
-
-  private long hiveInputRecordsFromHiveCounters(String inputVertexName) {
-    // The record count from these counters may not be correct if the input vertex has
-    // edges to more than one vertex, since this value counts the records going to all
-    // destination vertices.
-
-    String intermediateRecordsCounterName = formattedName(
-        ReduceSinkOperator.Counter.RECORDS_OUT_INTERMEDIATE.toString(),
-        inputVertexName
-    );
-    String recordsOutCounterName = formattedName(FileSinkOperator.Counter.RECORDS_OUT.toString(),
-        inputVertexName);
-    return hiveCounterValue(intermediateRecordsCounterName) + hiveCounterValue(recordsOutCounterName);
-  }
-
   private long hiveInputRecordsFromOtherVertices(String vertexName) {
     List<Vertex> inputVerticesList = dag.getVertex(vertexName).getInputVertices();
     long result = 0;
     for (Vertex inputVertex : inputVerticesList) {
-      long inputVertexRecords = hiveInputRecordsFromTezCounters(vertexName, inputVertex.getName());
-      if (inputVertexRecords < 0) {
-        inputVertexRecords = hiveInputRecordsFromHiveCounters(inputVertex.getName());
-      }
-      result += inputVertexRecords;
+      String intermediateRecordsCounterName = formattedName(
+          ReduceSinkOperator.Counter.RECORDS_OUT_INTERMEDIATE.toString(),
+          inputVertex.getName()
+      );
+      String recordsOutCounterName = formattedName(FileSinkOperator.Counter.RECORDS_OUT.toString(),
+          inputVertex.getName());
+      result += (
+          hiveCounterValue(intermediateRecordsCounterName)
+              + hiveCounterValue(recordsOutCounterName)
+      );
     }
     return result;
   }
 
-  private String formattedName(String counterName, String srcVertexName, String destVertexName) {
-    return String.format("%s_", counterName) + srcVertexName.replace(" ", "_") + "_OUTPUT_" + destVertexName.replace(" ", "_");
-  }
-
   private String formattedName(String counterName, String vertexName) {
     return String.format("%s_", counterName) + vertexName.replace(" ", "_");
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/FSCountersSummary.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/FSCountersSummary.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/FSCountersSummary.java
index fd85504..0a28edd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/FSCountersSummary.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/FSCountersSummary.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hadoop.hive.ql.exec.tez.monitoring;
 
 import org.apache.hadoop.fs.FileSystem;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/LLAPioSummary.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/LLAPioSummary.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/LLAPioSummary.java
index 10e9f57..81f1755 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/LLAPioSummary.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/LLAPioSummary.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hadoop.hive.ql.exec.tez.monitoring;
 
 import org.apache.hadoop.hive.llap.counters.LlapIOCounters;


[41/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/PigDelegator.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/PigDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/PigDelegator.java
index 663e4b6..aeb89df 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/PigDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/PigDelegator.java
@@ -51,7 +51,7 @@ public class PigDelegator extends LauncherDelegator {
                boolean usesHcatalog, String completedUrl, boolean enablelog,
                Boolean enableJobReconnect)
     throws NotAuthorizedException, BadParam, BusyException, QueueException,
-    ExecuteException, IOException, InterruptedException, TooManyRequestsException {
+    ExecuteException, IOException, InterruptedException {
     runAs = user;
     List<String> args = makeArgs(execute,
       srcFile, pigArgs,

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
index 793881b..5aed3b3 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
@@ -170,7 +170,6 @@ public class SecureProxySupport {
         return null;
       }
     });
-    FileSystem.closeAllForUGI(ugi);
     return twrapper.tokens;
   }
   private static void collectTokens(FileSystem fs, TokenWrapper twrapper, Credentials creds, String userName) throws IOException {
@@ -205,7 +204,6 @@ public class SecureProxySupport {
         return null;
       }
     });
-    FileSystem.closeAllForUGI(ugi);
 
   }
 
@@ -222,7 +220,6 @@ public class SecureProxySupport {
         return client.getDelegationToken(c.getUser(), u);
       }
     });
-    FileSystem.closeAllForUGI(ugi);
     return s;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java
index 43a7d57..2da0204 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java
@@ -27,8 +27,6 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeoutException;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import javax.servlet.http.HttpServletRequest;
@@ -652,7 +650,7 @@ public class Server {
                       @FormParam("enablelog") boolean enablelog,
                       @FormParam("enablejobreconnect") Boolean enablejobreconnect)
     throws NotAuthorizedException, BusyException, BadParam, QueueException,
-    ExecuteException, IOException, InterruptedException, TooManyRequestsException {
+    ExecuteException, IOException, InterruptedException {
     verifyUser();
     verifyParam(inputs, "input");
     verifyParam(mapper, "mapper");
@@ -706,7 +704,7 @@ public class Server {
                   @FormParam("enablelog") boolean enablelog,
                   @FormParam("enablejobreconnect") Boolean enablejobreconnect)
     throws NotAuthorizedException, BusyException, BadParam, QueueException,
-    ExecuteException, IOException, InterruptedException, TooManyRequestsException {
+    ExecuteException, IOException, InterruptedException {
     verifyUser();
     verifyParam(jar, "jar");
     verifyParam(mainClass, "class");
@@ -756,7 +754,7 @@ public class Server {
                @FormParam("enablelog") boolean enablelog,
                @FormParam("enablejobreconnect") Boolean enablejobreconnect)
     throws NotAuthorizedException, BusyException, BadParam, QueueException,
-    ExecuteException, IOException, InterruptedException, TooManyRequestsException {
+    ExecuteException, IOException, InterruptedException {
     verifyUser();
     if (execute == null && srcFile == null) {
       throw new BadParam("Either execute or file parameter required");
@@ -807,7 +805,7 @@ public class Server {
               @FormParam("enablelog") boolean enablelog,
               @FormParam("enablejobreconnect") Boolean enablejobreconnect)
     throws NotAuthorizedException, BusyException, BadParam, QueueException,
-    IOException, InterruptedException, TooManyRequestsException {
+    IOException, InterruptedException {
     verifyUser();
     if (command == null && optionsFile == null)
       throw new BadParam("Must define Sqoop command or a optionsfile contains Sqoop command to run Sqoop job.");
@@ -861,7 +859,7 @@ public class Server {
               @FormParam("enablelog") boolean enablelog,
               @FormParam("enablejobreconnect") Boolean enablejobreconnect)
     throws NotAuthorizedException, BusyException, BadParam, QueueException,
-    ExecuteException, IOException, InterruptedException, TooManyRequestsException {
+    ExecuteException, IOException, InterruptedException {
     verifyUser();
     if (execute == null && srcFile == null) {
       throw new BadParam("Either execute or file parameter required");
@@ -893,8 +891,7 @@ public class Server {
   @Path("jobs/{jobid}")
   @Produces({MediaType.APPLICATION_JSON})
   public QueueStatusBean showJobId(@PathParam("jobid") String jobid)
-    throws NotAuthorizedException, BadParam, IOException, InterruptedException,
-           BusyException, TimeoutException, ExecutionException, TooManyRequestsException {
+    throws NotAuthorizedException, BadParam, IOException, InterruptedException {
 
     verifyUser();
     verifyParam(jobid, ":jobid");
@@ -971,8 +968,7 @@ public class Server {
                                        @QueryParam("showall") boolean showall,
                                        @QueryParam("jobid") String jobid,
                                        @QueryParam("numrecords") String numrecords)
-    throws NotAuthorizedException, BadParam, IOException, InterruptedException,
-    BusyException, TimeoutException, ExecutionException, TooManyRequestsException {
+    throws NotAuthorizedException, BadParam, IOException, InterruptedException {
 
     verifyUser();
 
@@ -984,14 +980,19 @@ public class Server {
       showDetails = true;
     }
 
+    ListDelegator ld = new ListDelegator(appConf);
+    List<String> list = ld.run(getDoAsUser(), showall);
+    List<JobItemBean> detailList = new ArrayList<JobItemBean>();
+    int currRecord = 0;
     int numRecords;
+
     // Parse numrecords to an integer
     try {
       if (numrecords != null) {
         numRecords = Integer.parseInt(numrecords);
-        if (numRecords <= 0) {
-          throw new BadParam("numrecords should be an integer > 0");
-        }
+  if (numRecords <= 0) {
+    throw new BadParam("numrecords should be an integer > 0");
+  }
       }
       else {
         numRecords = -1;
@@ -1001,8 +1002,57 @@ public class Server {
       throw new BadParam("Invalid numrecords format: numrecords should be an integer > 0");
     }
 
-    ListDelegator ld = new ListDelegator(appConf);
-    return ld.run(getDoAsUser(), showall, jobid, numRecords, showDetails);
+    // Sort the list as requested
+    boolean isAscendingOrder = true;
+    switch (appConf.getListJobsOrder()) {
+    case lexicographicaldesc:
+      Collections.sort(list, Collections.reverseOrder());
+      isAscendingOrder = false;
+      break;
+    case lexicographicalasc:
+    default:
+      Collections.sort(list);
+      break;
+    }
+
+    for (String job : list) {
+      // If numRecords = -1, fetch all records.
+      // Hence skip all the below checks when numRecords = -1.
+      if (numRecords != -1) {
+        // If currRecord >= numRecords, we have already fetched the top #numRecords
+        if (currRecord >= numRecords) {
+          break;
+        }
+        else if (jobid == null || jobid.trim().length() == 0) {
+            currRecord++;
+        }
+        // If the current record needs to be returned based on the
+        // filter conditions specified by the user, increment the counter
+        else if (isAscendingOrder && job.compareTo(jobid) > 0 || !isAscendingOrder && job.compareTo(jobid) < 0) {
+          currRecord++;
+        }
+        // The current record should not be included in the output detailList.
+        else {
+          continue;
+        }
+      }
+      JobItemBean jobItem = new JobItemBean();
+      jobItem.id = job;
+      if (showDetails) {
+        StatusDelegator sd = new StatusDelegator(appConf);
+        try {
+          jobItem.detail = sd.run(getDoAsUser(), job);
+        }
+        catch(Exception ex) {
+          /*if we could not get status for some reason, log it, and send empty status back with
+          * just the ID so that caller knows to even look in the log file*/
+          LOG.info("Failed to get status detail for jobId='" + job + "'", ex);
+          jobItem.detail = new QueueStatusBean(job, "Failed to retrieve status; see WebHCat logs");
+        }
+      }
+      detailList.add(jobItem);
+    }
+    return detailList;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SqoopDelegator.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SqoopDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SqoopDelegator.java
index eb84fb2..fde5f60 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SqoopDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SqoopDelegator.java
@@ -50,7 +50,7 @@ public class SqoopDelegator extends LauncherDelegator {
                String callback, String completedUrl, boolean enablelog,
                Boolean enableJobReconnect, String libdir)
   throws NotAuthorizedException, BadParam, BusyException, QueueException,
-  IOException, InterruptedException, TooManyRequestsException
+  IOException, InterruptedException
   {
     if(TempletonUtils.isset(appConf.sqoopArchive())) {
       if(!TempletonUtils.isset(appConf.sqoopPath()) && !TempletonUtils.isset(appConf.sqoopHome())) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/StatusDelegator.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/StatusDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/StatusDelegator.java
index c042ae8..fac0170 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/StatusDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/StatusDelegator.java
@@ -19,13 +19,10 @@
 package org.apache.hive.hcatalog.templeton;
 
 import java.io.IOException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeoutException;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.shims.HadoopShims.WebHCatJTShim;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapred.JobID;
@@ -44,78 +41,18 @@ import org.apache.hive.hcatalog.templeton.tool.JobState;
  */
 public class StatusDelegator extends TempletonDelegator {
   private static final Logger LOG = LoggerFactory.getLogger(StatusDelegator.class);
-  private final String JOB_STATUS_EXECUTE_THREAD_PREFIX = "JobStatusExecute";
-
-  /**
-   * Current thread id used to set in execution threads.
-   */
-  private final String statusThreadId = Thread.currentThread().getName();
-
-  /*
-   * Job status request executor to get status of a job.
-   */
-  private static JobRequestExecutor<QueueStatusBean> jobRequest =
-                   new JobRequestExecutor<QueueStatusBean>(JobRequestExecutor.JobRequestType.Status,
-                   AppConfig.JOB_STATUS_MAX_THREADS, AppConfig.JOB_STATUS_TIMEOUT);
 
   public StatusDelegator(AppConfig appConf) {
     super(appConf);
   }
 
-  /*
-   * Gets status of job form job id. If maximum concurrent job status requests are configured
-   * then status request will be executed on a thread from thread pool. If job status request
-   * time out is configured then request execution thread will be interrupted if thread
-   * times out and does no action.
-   */
-  public QueueStatusBean run(final String user, final String id, boolean enableThreadPool)
-    throws NotAuthorizedException, BadParam, IOException, InterruptedException,
-           BusyException, TimeoutException, ExecutionException, TooManyRequestsException {
-    if (jobRequest.isThreadPoolEnabled() && enableThreadPool) {
-      return jobRequest.execute(getJobStatusCallableTask(user, id));
-    } else {
-      return getJobStatus(user, id);
-    }
-  }
-
-  /*
-   * Job callable task for job status operation. Overrides behavior of execute() to get
-   * status of a job. No need to override behavior of cleanup() as there is nothing to be
-   * done if job sttaus operation is timed out or interrupted.
-   */
-  private JobCallable<QueueStatusBean> getJobStatusCallableTask(final String user,
-                                 final String id) {
-    return new JobCallable<QueueStatusBean>() {
-      @Override
-      public QueueStatusBean execute() throws NotAuthorizedException, BadParam, IOException,
-                                    InterruptedException, BusyException {
-       /*
-        * Change the current thread name to include parent thread Id if it is executed
-        * in thread pool. Useful to extract logs specific to a job request and helpful
-        * to debug job issues.
-        */
-        Thread.currentThread().setName(String.format("%s-%s-%s", JOB_STATUS_EXECUTE_THREAD_PREFIX,
-                                       statusThreadId, Thread.currentThread().getId()));
-
-        return getJobStatus(user, id);
-      }
-    };
-  }
-
-  public QueueStatusBean run(final String user, final String id)
-    throws NotAuthorizedException, BadParam, IOException, InterruptedException,
-           BusyException, TimeoutException, ExecutionException, TooManyRequestsException {
-    return run(user, id, true);
-  }
-
-  public QueueStatusBean getJobStatus(String user, String id)
+  public QueueStatusBean run(String user, String id)
     throws NotAuthorizedException, BadParam, IOException, InterruptedException
   {
     WebHCatJTShim tracker = null;
     JobState state = null;
-    UserGroupInformation ugi = null;
     try {
-      ugi = UgiFactory.getUgi(user);
+      UserGroupInformation ugi = UgiFactory.getUgi(user);
       tracker = ShimLoader.getHadoopShims().getWebHCatShim(appConf, ugi);
       JobID jobid = StatusDelegator.StringToJobID(id);
       if (jobid == null)
@@ -129,8 +66,6 @@ public class StatusDelegator extends TempletonDelegator {
         tracker.close();
       if (state != null)
         state.close();
-      if (ugi != null)
-        FileSystem.closeAllForUGI(ugi);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/StreamingDelegator.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/StreamingDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/StreamingDelegator.java
index 590e49f..839b56a 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/StreamingDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/StreamingDelegator.java
@@ -51,7 +51,7 @@ public class StreamingDelegator extends LauncherDelegator {
                Boolean enableJobReconnect,
                JobType jobType)
     throws NotAuthorizedException, BadParam, BusyException, QueueException,
-    ExecuteException, IOException, InterruptedException, TooManyRequestsException {
+    ExecuteException, IOException, InterruptedException {
       List<String> args = makeArgs(inputs, inputreader, output, mapper, reducer, combiner,
       fileList, cmdenvs, jarArgs);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TooManyRequestsException.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TooManyRequestsException.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TooManyRequestsException.java
deleted file mode 100644
index 9d55ad4..0000000
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TooManyRequestsException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hive.hcatalog.templeton;
-
-/**
- * Raise this exception if web service is busy with existing requests and not able
- * service new requests.
- */
-public class TooManyRequestsException extends SimpleWebException {
-  /*
-   * The current version of jetty server doesn't have the status
-   * HttpStatus.TOO_MANY_REQUESTS_429. Hence, passing this as constant.
-   */
-  public static int TOO_MANY_REQUESTS_429 = 429;
-
-  public TooManyRequestsException(String msg) {
-    super(TOO_MANY_REQUESTS_429, msg);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
index f4c4b76..15ab8b9 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
@@ -81,14 +81,9 @@ public class TempletonControllerJob extends Configured implements Tool, JobSubmi
     this.appConf = conf;
   }
 
-  private Job job = null;
+  private JobID submittedJobId;
 
   public String getSubmittedId() {
-    if (job == null ) {
-      return null;
-    }
-
-    JobID submittedJobId = job.getJobID();
     if (submittedJobId == null) {
       return null;
     } else {
@@ -124,7 +119,7 @@ public class TempletonControllerJob extends Configured implements Tool, JobSubmi
 
     String user = UserGroupInformation.getCurrentUser().getShortUserName();
     conf.set("user.name", user);
-    job = new Job(conf);
+    Job job = new Job(conf);
     job.setJarByClass(LaunchMapper.class);
     job.setJobName(TempletonControllerJob.class.getSimpleName());
     job.setMapperClass(LaunchMapper.class);
@@ -146,7 +141,7 @@ public class TempletonControllerJob extends Configured implements Tool, JobSubmi
 
     job.submit();
 
-    JobID submittedJobId = job.getJobID();
+    submittedJobId = job.getJobID();
     if(metastoreTokenStrForm != null) {
       //so that it can be cancelled later from CompleteDelegator
       DelegationTokenCache.getStringFormTokenCache().storeDelegationToken(

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonUtils.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonUtils.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonUtils.java
index e0ccc70..07b005b 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonUtils.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonUtils.java
@@ -362,7 +362,6 @@ public class TempletonUtils {
     if (hadoopFsIsMissing(defaultFs, p))
       throw new FileNotFoundException("File " + fname + " does not exist.");
 
-    FileSystem.closeAllForUGI(ugi);
     return p;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/ConcurrentJobRequestsTestBase.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/ConcurrentJobRequestsTestBase.java b/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/ConcurrentJobRequestsTestBase.java
deleted file mode 100644
index 5fcae46..0000000
--- a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/ConcurrentJobRequestsTestBase.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hive.hcatalog.templeton;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.Future;
-
-import org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.mockito.Mockito;
-import org.mockito.stubbing.Answer;
-
-/*
- * Base class for mocking job operations with concurrent requests.
- */
-public class ConcurrentJobRequestsTestBase {
-  private static final Logger LOG = LoggerFactory.getLogger(ConcurrentJobRequestsTestBase.class);
-  private boolean started = false;
-  private Object lock = new Object();
-
-  MockAnswerTestHelper<QueueStatusBean> statusJobHelper = new MockAnswerTestHelper<QueueStatusBean>();
-  MockAnswerTestHelper<QueueStatusBean> killJobHelper = new MockAnswerTestHelper<QueueStatusBean>();
-  MockAnswerTestHelper<List<JobItemBean>> listJobHelper = new MockAnswerTestHelper<List<JobItemBean>>();
-  MockAnswerTestHelper<Integer> submitJobHelper = new MockAnswerTestHelper<Integer>();
-
-  /*
-   * Waits for other threads to join and returns with its Id.
-   */
-  private int waitForAllThreadsToStart(JobRunnable jobRunnable, int poolThreadCount) {
-    int currentId = jobRunnable.threadStartCount.incrementAndGet();
-    LOG.info("Waiting for other threads with thread id: " + currentId);
-    synchronized(lock) {
-      /*
-       * We need a total of poolThreadCount + 1 threads to start at same. There are
-       * poolThreadCount threads in thread pool and another one which has started them.
-       * The thread which sees atomic counter as poolThreadCount+1 is the last thread`
-       * to join and wake up all threads to start all at once.
-       */
-      if (currentId > poolThreadCount) {
-        LOG.info("Waking up all threads: " + currentId);
-        started = true;
-        this.lock.notifyAll();
-      } else {
-        while (!started) {
-          try {
-            this.lock.wait();
-          } catch (InterruptedException ignore) {
-          }
-        }
-      }
-    }
-
-    return currentId;
-  }
-
-  public JobRunnable ConcurrentJobsStatus(final int threadCount, AppConfig appConfig,
-         final boolean killThreads, boolean interruptThreads, final Answer<QueueStatusBean> answer)
-         throws IOException, InterruptedException, QueueException, NotAuthorizedException,
-         BadParam, BusyException {
-
-    StatusDelegator delegator = new StatusDelegator(appConfig);
-    final StatusDelegator mockDelegator = Mockito.spy(delegator);
-
-    Mockito.doAnswer(answer).when(mockDelegator).getJobStatus(Mockito.any(String.class),
-                             Mockito.any(String.class));
-
-    JobRunnable statusJobRunnable = new JobRunnable() {
-      @Override
-      public void run() {
-        try {
-          int threadId = waitForAllThreadsToStart(this, threadCount);
-          LOG.info("Started executing Job Status operation. ThreadId : " + threadId);
-          mockDelegator.run("admin", "job_1000" + threadId);
-        } catch (Exception ex) {
-          exception = ex;
-        }
-      }
-    };
-
-    executeJobOperations(statusJobRunnable, threadCount, killThreads, interruptThreads);
-    return statusJobRunnable;
-  }
-
-  public JobRunnable ConcurrentListJobs(final int threadCount, AppConfig config,
-         final boolean killThreads, boolean interruptThreads, final Answer<List<JobItemBean>> answer)
-         throws IOException, InterruptedException, QueueException, NotAuthorizedException,
-         BadParam, BusyException {
-
-    ListDelegator delegator = new ListDelegator(config);
-    final ListDelegator mockDelegator = Mockito.spy(delegator);
-
-    Mockito.doAnswer(answer).when(mockDelegator).listJobs(Mockito.any(String.class),
-                             Mockito.any(boolean.class), Mockito.any(String.class),
-                             Mockito.any(int.class), Mockito.any(boolean.class));
-
-    JobRunnable listJobRunnable = new JobRunnable() {
-      @Override
-      public void run() {
-        try {
-          int threadId = waitForAllThreadsToStart(this, threadCount);
-          LOG.info("Started executing Job List operation. ThreadId : " + threadId);
-          mockDelegator.run("admin", true, "", 10, true);
-        } catch (Exception ex) {
-          exception = ex;
-        }
-      }
-    };
-
-    executeJobOperations(listJobRunnable, threadCount, killThreads, interruptThreads);
-    return listJobRunnable;
-  }
-
-  public JobRunnable SubmitConcurrentJobs(final int threadCount, AppConfig config,
-         final boolean killThreads, boolean interruptThreads, final Answer<Integer> responseAnswer,
-         final Answer<QueueStatusBean> timeoutResponseAnswer, final String jobIdResponse)
-         throws IOException, InterruptedException, QueueException, NotAuthorizedException,
-         BusyException, TimeoutException, Exception {
-
-    LauncherDelegator delegator = new LauncherDelegator(config);
-    final LauncherDelegator mockDelegator = Mockito.spy(delegator);
-    final List<String> listArgs = new ArrayList<String>();
-
-    TempletonControllerJob mockCtrl = Mockito.mock(TempletonControllerJob.class);
-
-    Mockito.doReturn(jobIdResponse).when(mockCtrl).getSubmittedId();
-
-    Mockito.doReturn(mockCtrl).when(mockDelegator).getTempletonController();
-
-    Mockito.doAnswer(responseAnswer).when(mockDelegator).runTempletonControllerJob(
-              Mockito.any(TempletonControllerJob.class), Mockito.any(List.class));
-
-    Mockito.doAnswer(timeoutResponseAnswer).when(mockDelegator).killJob(
-              Mockito.any(String.class), Mockito.any(String.class));
-
-    Mockito.doNothing().when(mockDelegator).registerJob(Mockito.any(String.class),
-           Mockito.any(String.class), Mockito.any(String.class), Mockito.any(Map.class));
-
-    JobRunnable submitJobRunnable = new JobRunnable() {
-      @Override
-      public void run() {
-        try {
-          int threadId = waitForAllThreadsToStart(this, threadCount);
-          LOG.info("Started executing Job Submit operation. ThreadId : " + threadId);
-          mockDelegator.enqueueController("admin", null, "", listArgs);
-        } catch (Throwable ex) {
-          exception = ex;
-        }
-      }
-    };
-
-    executeJobOperations(submitJobRunnable, threadCount, killThreads, interruptThreads);
-    return submitJobRunnable;
-  }
-
-  public void executeJobOperations(JobRunnable jobRunnable, int threadCount, boolean killThreads,
-                                   boolean interruptThreads)
-    throws IOException, InterruptedException, QueueException, NotAuthorizedException {
-
-    started = false;
-
-    ExecutorService executorService = new ThreadPoolExecutor(threadCount, threadCount, 0L,
-        TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());;
-
-    ArrayList<Future<?>> futures = new ArrayList<Future<?>>();
-    for (int i = 0; i < threadCount; i++) {
-      futures.add(executorService.submit(jobRunnable));
-    }
-
-    waitForAllThreadsToStart(jobRunnable, threadCount);
-    LOG.info("Started all threads ");
-
-    if (killThreads) {
-      executorService.shutdownNow();
-    } else {
-      if (interruptThreads){
-        for (Future<?> future : futures) {
-          LOG.info("Cancelling the thread");
-          future.cancel(true);
-        }
-      }
-
-      executorService.shutdown();
-    }
-
-    /*
-     * For both graceful or forceful shutdown, wait for tasks to terminate such that
-     * appropriate exceptions are raised and stored in JobRunnable.exception.
-     */
-    if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
-      LOG.info("Force Shutting down the pool\n");
-      if (!killThreads) {
-        /*
-         * killThreads option has already done force shutdown. No need to do again.
-         */
-        executorService.shutdownNow();
-      }
-    }
-  }
-
-  public abstract class JobRunnable implements Runnable {
-    public volatile Throwable exception = null;
-    public AtomicInteger threadStartCount = new AtomicInteger(0);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/MockAnswerTestHelper.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/MockAnswerTestHelper.java b/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/MockAnswerTestHelper.java
deleted file mode 100644
index 9f1744e..0000000
--- a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/MockAnswerTestHelper.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hive.hcatalog.templeton;
-
-import java.io.IOException;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-/*
- * Helper class to generate mocked response.
- */
-public class MockAnswerTestHelper<T> {
-  public Answer<T> getIOExceptionAnswer() {
-    return new Answer<T>() {
-      @Override
-      public T answer(InvocationOnMock invocation) throws Exception {
-        throw new IOException("IOException raised manually.");
-      }
-    };
-  }
-
-  public Answer<T> getOutOfMemoryErrorAnswer() {
-    return new Answer<T>() {
-      @Override
-      public T answer(InvocationOnMock invocation) throws OutOfMemoryError {
-        throw new OutOfMemoryError("OutOfMemoryError raised manually.");
-      }
-    };
-  }
-
-  public Answer<T> getDelayedResonseAnswer(final int delayInSeconds, final T response) {
-    return new Answer<T>() {
-      @Override
-      public T answer(InvocationOnMock invocation) throws InterruptedException {
-        Thread.sleep(1000 * delayInSeconds);
-        return response;
-      }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequests.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequests.java b/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequests.java
deleted file mode 100644
index 695dcc6..0000000
--- a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequests.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hive.hcatalog.templeton;
-
-import java.util.ArrayList;
-
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import static org.junit.Assert.assertTrue;
-
-/*
- * Test submission of concurrent job requests.
- */
-public class TestConcurrentJobRequests extends ConcurrentJobRequestsTestBase {
-
-  private static AppConfig config;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @BeforeClass
-  public static void setUp() {
-    final String[] args = new String[] {};
-    Main main = new Main(args);
-    config = main.getAppConfigInstance();
-  }
-
-  @Test
-  public void ConcurrentJobsStatusSuccess() {
-    try {
-      JobRunnable jobRunnable = ConcurrentJobsStatus(6, config, false, false,
-                statusJobHelper.getDelayedResonseAnswer(4, new QueueStatusBean("job_1000", "Job not found")));
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentListJobsSuccess() {
-    try {
-      JobRunnable jobRunnable = ConcurrentListJobs(6, config, false, false,
-                listJobHelper.getDelayedResonseAnswer(4, new ArrayList<JobItemBean>()));
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentSubmitJobsSuccess() {
-    try {
-      JobRunnable jobRunnable = SubmitConcurrentJobs(6, config, false, false,
-                submitJobHelper.getDelayedResonseAnswer(4, 0),
-                killJobHelper.getDelayedResonseAnswer(4, null), "job_1000");
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreads.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreads.java b/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreads.java
deleted file mode 100644
index 6f8da40..0000000
--- a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreads.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hive.hcatalog.templeton;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.concurrent.TimeoutException;
-import org.eclipse.jetty.http.HttpStatus;
-
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import static org.junit.Assert.assertTrue;
-
-/*
- * Test submission of concurrent job requests with the controlled number of concurrent
- * Requests. Verify that we get busy exception and appropriate message.
- */
-public class TestConcurrentJobRequestsThreads extends ConcurrentJobRequestsTestBase {
-
-  private static AppConfig config;
-  private static QueueStatusBean statusBean;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @BeforeClass
-  public static void setUp() {
-    final String[] args = new String[] {};
-    Main main = new Main(args);
-    config = main.getAppConfigInstance();
-    config.setInt(AppConfig.JOB_STATUS_MAX_THREADS, 5);
-    config.setInt(AppConfig.JOB_LIST_MAX_THREADS, 5);
-    config.setInt(AppConfig.JOB_SUBMIT_MAX_THREADS, 5);
-    statusBean = new QueueStatusBean("job_1000", "Job not found");
-  }
-
-  @Test
-  public void ConcurrentJobsStatusTooManyRequestsException() {
-    try {
-      JobRunnable jobRunnable = ConcurrentJobsStatus(6, config, false, false,
-                statusJobHelper.getDelayedResonseAnswer(4, statusBean));
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof TooManyRequestsException);
-      TooManyRequestsException ex = (TooManyRequestsException)jobRunnable.exception;
-      assertTrue(ex.httpCode == TooManyRequestsException.TOO_MANY_REQUESTS_429);
-      String expectedMessage = "Unable to service the status job request as templeton service is busy "
-                                 + "with too many status job requests. Please wait for some time before "
-                                 + "retrying the operation. Please refer to the config "
-                                 + "templeton.parallellism.job.status to configure concurrent requests.";
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * Verify that new job requests have no issues.
-       */
-      jobRunnable = ConcurrentJobsStatus(5, config, false, false,
-                statusJobHelper.getDelayedResonseAnswer(4, statusBean));
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentListJobsTooManyRequestsException() {
-    try {
-      JobRunnable jobRunnable = ConcurrentListJobs(6, config, false, false,
-                listJobHelper.getDelayedResonseAnswer(4, new ArrayList<JobItemBean>()));
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof TooManyRequestsException);
-      TooManyRequestsException ex = (TooManyRequestsException)jobRunnable.exception;
-      assertTrue(ex.httpCode == TooManyRequestsException.TOO_MANY_REQUESTS_429);
-      String expectedMessage = "Unable to service the list job request as templeton service is busy "
-                               + "with too many list job requests. Please wait for some time before "
-                               + "retrying the operation. Please refer to the config "
-                               + "templeton.parallellism.job.list to configure concurrent requests.";
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * Verify that new job requests have no issues.
-       */
-      jobRunnable = ConcurrentListJobs(5, config, false, false,
-                listJobHelper.getDelayedResonseAnswer(4, new ArrayList<JobItemBean>()));
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentSubmitJobsTooManyRequestsException() {
-    try {
-      JobRunnable jobRunnable = SubmitConcurrentJobs(6, config, false, false,
-                submitJobHelper.getDelayedResonseAnswer(4, 0),
-                killJobHelper.getDelayedResonseAnswer(0, statusBean), "job_1000");
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof TooManyRequestsException);
-      TooManyRequestsException ex = (TooManyRequestsException)jobRunnable.exception;
-      assertTrue(ex.httpCode == TooManyRequestsException.TOO_MANY_REQUESTS_429);
-      String expectedMessage = "Unable to service the submit job request as templeton service is busy "
-                                + "with too many submit job requests. Please wait for some time before "
-                                + "retrying the operation. Please refer to the config "
-                                + "templeton.parallellism.job.submit to configure concurrent requests.";
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * Verify that new job requests have no issues.
-       */
-      jobRunnable = SubmitConcurrentJobs(5, config, false, false,
-                submitJobHelper.getDelayedResonseAnswer(4, 0),
-                killJobHelper.getDelayedResonseAnswer(0, statusBean), "job_1000");
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreadsAndTimeout.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreadsAndTimeout.java b/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreadsAndTimeout.java
deleted file mode 100644
index ef49cbd..0000000
--- a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreadsAndTimeout.java
+++ /dev/null
@@ -1,374 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hive.hcatalog.templeton;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.concurrent.TimeoutException;
-import org.eclipse.jetty.http.HttpStatus;
-
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import static org.junit.Assert.assertTrue;
-
-/*
- * Test submission of concurrent job requests with the controlled number of concurrent
- * Requests and job request execution time outs. Verify that we get appropriate exceptions
- * and exception message.
- */
-public class TestConcurrentJobRequestsThreadsAndTimeout extends ConcurrentJobRequestsTestBase {
-
-  private static AppConfig config;
-  private static QueueStatusBean statusBean;
-  private static String statusTooManyRequestsExceptionMessage;
-  private static String listTooManyRequestsExceptionMessage;
-  private static String submitTooManyRequestsExceptionMessage;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @BeforeClass
-  public static void setUp() {
-    final String[] args = new String[] {};
-    Main main = new Main(args);
-    config = main.getAppConfigInstance();
-    config.setInt(AppConfig.JOB_STATUS_MAX_THREADS, 5);
-    config.setInt(AppConfig.JOB_LIST_MAX_THREADS, 5);
-    config.setInt(AppConfig.JOB_SUBMIT_MAX_THREADS, 5);
-    config.setInt(AppConfig.JOB_SUBMIT_TIMEOUT, 5);
-    config.setInt(AppConfig.JOB_STATUS_TIMEOUT, 5);
-    config.setInt(AppConfig.JOB_LIST_TIMEOUT, 5);
-    config.setInt(AppConfig.JOB_TIMEOUT_TASK_RETRY_COUNT, 4);
-    config.setInt(AppConfig.JOB_TIMEOUT_TASK_RETRY_INTERVAL, 1);
-    statusBean = new QueueStatusBean("job_1000", "Job not found");
-
-    statusTooManyRequestsExceptionMessage = "Unable to service the status job request as "
-                                 + "templeton service is busy with too many status job requests. "
-                                 + "Please wait for some time before retrying the operation. "
-                                 + "Please refer to the config templeton.parallellism.job.status "
-                                 + "to configure concurrent requests.";
-    listTooManyRequestsExceptionMessage = "Unable to service the list job request as "
-                                 + "templeton service is busy with too many list job requests. "
-                                 + "Please wait for some time before retrying the operation. "
-                                 + "Please refer to the config templeton.parallellism.job.list "
-                                 + "to configure concurrent requests.";
-    submitTooManyRequestsExceptionMessage = "Unable to service the submit job request as "
-                                 + "templeton service is busy with too many submit job requests. "
-                                 + "Please wait for some time before retrying the operation. "
-                                 + "Please refer to the config templeton.parallellism.job.submit "
-                                 + "to configure concurrent requests.";
-  }
-
-  @Test
-  public void ConcurrentJobsStatusTooManyRequestsException() {
-    try {
-      JobRunnable jobRunnable = ConcurrentJobsStatus(6, config, false, false,
-                statusJobHelper.getDelayedResonseAnswer(4, statusBean));
-      verifyTooManyRequestsException(jobRunnable.exception, this.statusTooManyRequestsExceptionMessage);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentListJobsTooManyRequestsException() {
-    try {
-      JobRunnable jobRunnable = ConcurrentListJobs(6, config, false, false,
-                listJobHelper.getDelayedResonseAnswer(4, new ArrayList<JobItemBean>()));
-      verifyTooManyRequestsException(jobRunnable.exception, this.listTooManyRequestsExceptionMessage);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentSubmitJobsTooManyRequestsException() {
-    try {
-      JobRunnable jobRunnable = SubmitConcurrentJobs(6, config, false, false,
-                submitJobHelper.getDelayedResonseAnswer(4, 0),
-                killJobHelper.getDelayedResonseAnswer(0, statusBean), "job_1000");
-      verifyTooManyRequestsException(jobRunnable.exception, this.submitTooManyRequestsExceptionMessage);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentJobsStatusTimeOutException() {
-    try {
-      JobRunnable jobRunnable = ConcurrentJobsStatus(5, config, false, false,
-                statusJobHelper.getDelayedResonseAnswer(6, statusBean));
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof TimeoutException);
-      String expectedMessage = "Status job request got timed out. Please wait for some time before "
-                               + "retrying the operation. Please refer to the config "
-                               + "templeton.job.status.timeout to configure job request time out.";
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * Verify that new job requests should succeed with no issues.
-       */
-      jobRunnable = ConcurrentJobsStatus(5, config, false, false,
-                statusJobHelper.getDelayedResonseAnswer(0, statusBean));
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentListJobsTimeOutException() {
-    try {
-      JobRunnable jobRunnable = ConcurrentListJobs(5, config, false, false,
-                listJobHelper.getDelayedResonseAnswer(6, new ArrayList<JobItemBean>()));
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof TimeoutException);
-      String expectedMessage = "List job request got timed out. Please wait for some time before "
-                               + "retrying the operation. Please refer to the config "
-                               + "templeton.job.list.timeout to configure job request time out.";
-
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * Verify that new job requests should succeed with no issues.
-       */
-      jobRunnable = ConcurrentListJobs(5, config, false, false,
-                listJobHelper.getDelayedResonseAnswer(1, new ArrayList<JobItemBean>()));
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentSubmitJobsTimeOutException() {
-    try {
-      JobRunnable jobRunnable = SubmitConcurrentJobs(5, config, false, false,
-                submitJobHelper.getDelayedResonseAnswer(6, 0),
-                killJobHelper.getDelayedResonseAnswer(0, statusBean), "job_1000");
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof QueueException);
-      String expectedMessage = "Submit job request got timed out. Please wait for some time before "
-                               + "retrying the operation. Please refer to the config "
-                               + "templeton.job.submit.timeout to configure job request time out.";
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * For submit operation, tasks are not cancelled. Verify that new job request
-       * should fail with TooManyRequestsException.
-       */
-      jobRunnable = SubmitConcurrentJobs(1, config, false, false,
-                submitJobHelper.getDelayedResonseAnswer(0, 0),
-                killJobHelper.getDelayedResonseAnswer(0, statusBean), "job_1000");
-      verifyTooManyRequestsException(jobRunnable.exception, this.submitTooManyRequestsExceptionMessage);
-
-     /*
-      * Sleep until all threads with clean up tasks are completed.
-      */
-      Thread.sleep(2000);
-
-      /*
-       * Now, tasks would have passed. Verify that new job requests should succeed with no issues.
-       */
-      jobRunnable = SubmitConcurrentJobs(5, config, false, false,
-                submitJobHelper.getDelayedResonseAnswer(0, 0),
-                killJobHelper.getDelayedResonseAnswer(0, statusBean), "job_1000");
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentStatusJobsVerifyExceptions() {
-    try {
-      /*
-       * Trigger kill threads and verify we get InterruptedException and expected Message.
-       */
-      int timeoutTaskDelay = 4;
-      JobRunnable jobRunnable = ConcurrentJobsStatus(5, config, true, false,
-                statusJobHelper.getDelayedResonseAnswer(timeoutTaskDelay, statusBean));
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof InterruptedException);
-      String expectedMessage = "Status job request got interrupted. Please wait for some time before "
-                               + "retrying the operation.";
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * Interrupt all thread and verify we get InterruptedException and expected Message.
-       */
-      jobRunnable = ConcurrentJobsStatus(5, config, false, true,
-                statusJobHelper.getDelayedResonseAnswer(timeoutTaskDelay, statusBean));
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof InterruptedException);
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * Raise custom exception like IOException and verify expected Message.
-       */
-      jobRunnable = ConcurrentJobsStatus(5, config, false, false,
-                                     statusJobHelper.getIOExceptionAnswer());
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception.getCause() instanceof IOException);
-
-      /*
-       * Now new job requests should succeed as status operation has no cancel threads.
-       */
-      jobRunnable = ConcurrentJobsStatus(5, config, false, false,
-                statusJobHelper.getDelayedResonseAnswer(0, statusBean));
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentListJobsVerifyExceptions() {
-    try {
-      /*
-       * Trigger kill threads and verify we get InterruptedException and expected Message.
-       */
-      int timeoutTaskDelay = 4;
-      JobRunnable jobRunnable = ConcurrentListJobs(5, config, true, false,
-                listJobHelper.getDelayedResonseAnswer(timeoutTaskDelay, new ArrayList<JobItemBean>()));
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof InterruptedException);
-      String expectedMessage = "List job request got interrupted. Please wait for some time before "
-                               + "retrying the operation.";
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * Interrupt all thread and verify we get InterruptedException and expected Message.
-       */
-      jobRunnable = ConcurrentListJobs(5, config, false, true,
-                listJobHelper.getDelayedResonseAnswer(timeoutTaskDelay, new ArrayList<JobItemBean>()));
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof InterruptedException);
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * Raise custom exception like IOException and verify expected Message.
-       */
-      jobRunnable = ConcurrentListJobs(5, config, false, false,
-                listJobHelper.getIOExceptionAnswer());
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception.getCause() instanceof IOException);
-
-      /*
-       * Now new job requests should succeed as list operation has no cancel threads.
-       */
-      jobRunnable = ConcurrentListJobs(5, config, false, false,
-                listJobHelper.getDelayedResonseAnswer(0, new ArrayList<JobItemBean>()));
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  @Test
-  public void ConcurrentSubmitJobsVerifyExceptions() {
-    try {
-      int timeoutTaskDelay = 4;
-
-      /*
-       * Raise custom exception like IOException and verify expected Message.
-       * This should not invoke cancel operation.
-       */
-      JobRunnable jobRunnable = SubmitConcurrentJobs(1, config, false, false,
-                submitJobHelper.getIOExceptionAnswer(),
-                killJobHelper.getDelayedResonseAnswer(timeoutTaskDelay, statusBean), "job_1002");
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof QueueException);
-      assertTrue(jobRunnable.exception.getMessage().contains("IOException raised manually."));
-
-      /*
-       * Raise custom exception like IOException and verify expected Message.
-       * This should not invoke cancel operation.
-       */
-      jobRunnable = SubmitConcurrentJobs(1, config, false, false,
-                submitJobHelper.getOutOfMemoryErrorAnswer(),
-                killJobHelper.getDelayedResonseAnswer(timeoutTaskDelay, statusBean), "job_1003");
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof QueueException);
-      assertTrue(jobRunnable.exception.getMessage().contains("OutOfMemoryError raised manually."));
-
-      /*
-       * Trigger kill threads and verify that we get InterruptedException and expected
-       * Message. This should raise 3 kill operations and ensure that retries keep the time out
-       * occupied for 4 sec.
-       */
-      jobRunnable = SubmitConcurrentJobs(3, config, true, false,
-                submitJobHelper.getDelayedResonseAnswer(2, 0),
-                killJobHelper.getDelayedResonseAnswer(timeoutTaskDelay, statusBean), "job_1000");
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof QueueException);
-      String expectedMessage = "Submit job request got interrupted. Please wait for some time "
-                               + "before retrying the operation.";
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * Interrupt all threads and verify we get InterruptedException and expected
-       * Message. Also raise 2 kill operations and ensure that retries keep the time out
-       * occupied for 4 sec.
-       */
-      jobRunnable = SubmitConcurrentJobs(2, config, false, true,
-                submitJobHelper.getDelayedResonseAnswer(2, 0),
-                killJobHelper.getDelayedResonseAnswer(0, statusBean), "job_1001");
-      assertTrue(jobRunnable.exception != null);
-      assertTrue(jobRunnable.exception instanceof QueueException);
-      assertTrue(jobRunnable.exception.getMessage().contains(expectedMessage));
-
-      /*
-       * For submit operation, tasks are not cancelled. Verify that new job request
-       * should fail with TooManyRequestsException.
-       */
-      jobRunnable = SubmitConcurrentJobs(1, config, false, false,
-                submitJobHelper.getDelayedResonseAnswer(0, 0),
-                killJobHelper.getDelayedResonseAnswer(0, statusBean), "job_1002");
-      verifyTooManyRequestsException(jobRunnable.exception, this.submitTooManyRequestsExceptionMessage);
-
-      /*
-       * Sleep until all threads with clean up tasks are completed. 2 seconds completing task
-       * and 1 sec grace period.
-       */
-      Thread.sleep((timeoutTaskDelay + 2 + 1) * 1000);
-
-      /*
-       * Now new job requests should succeed as all cancel threads would have completed.
-       */
-      jobRunnable = SubmitConcurrentJobs(5, config, false, false,
-                submitJobHelper.getDelayedResonseAnswer(0, 0),
-                killJobHelper.getDelayedResonseAnswer(0, statusBean), "job_1004");
-      assertTrue(jobRunnable.exception == null);
-    } catch (Exception e) {
-      assertTrue(false);
-    }
-  }
-
-  private void verifyTooManyRequestsException(Throwable exception, String expectedMessage) {
-      assertTrue(exception != null);
-      assertTrue(exception instanceof TooManyRequestsException);
-      TooManyRequestsException ex = (TooManyRequestsException)exception;
-      assertTrue(ex.httpCode == TooManyRequestsException.TOO_MANY_REQUESTS_429);
-      assertTrue(exception.getMessage().contains(expectedMessage));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hplsql/pom.xml
----------------------------------------------------------------------
diff --git a/hplsql/pom.xml b/hplsql/pom.xml
index 44da8b2..d1337cb 100644
--- a/hplsql/pom.xml
+++ b/hplsql/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hplsql/src/main/java/org/apache/hive/hplsql/Udf.java
----------------------------------------------------------------------
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Udf.java b/hplsql/src/main/java/org/apache/hive/hplsql/Udf.java
index 4901e89..9c29eeb 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Udf.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Udf.java
@@ -60,7 +60,15 @@ public class Udf extends GenericUDF {
   @Override
   public Object evaluate(DeferredObject[] arguments) throws HiveException {
     if (exec == null) {
-      initExec(arguments);
+      exec = new Exec(); 
+      String query = queryOI.getPrimitiveJavaObject(arguments[0].get());
+      String[] args = { "-e", query, "-trace" };
+      try {
+        exec.setUdfRun(true);
+        exec.init(args);
+      } catch (Exception e) {
+        throw new HiveException(e.getMessage());
+      }
     }
     if (arguments.length > 1) {
       setParameters(arguments);
@@ -71,22 +79,6 @@ public class Udf extends GenericUDF {
     }
     return null;
   }
-
-  /**
-   * init exec
-   */
-  public void initExec(DeferredObject[] arguments) throws HiveException {
-    exec = new Exec();
-    exec.enterGlobalScope();
-    String query = queryOI.getPrimitiveJavaObject(arguments[0].get());
-    String[] args = { "-e", query, "-trace" };
-    try {
-      exec.setUdfRun(true);
-      exec.init(args);
-    } catch (Exception e) {
-      throw new HiveException(e.getMessage());
-    }
-  }
   
   /**
    * Set parameters for the current call

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlUdf.java
----------------------------------------------------------------------
diff --git a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlUdf.java b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlUdf.java
deleted file mode 100644
index 3896229..0000000
--- a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlUdf.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hive.hplsql;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredJavaObject;
-
-public class TestHplsqlUdf {
-  StringObjectInspector queryOI = PrimitiveObjectInspectorFactory.javaStringObjectInspector;
-  ObjectInspector argOI = PrimitiveObjectInspectorFactory.javaStringObjectInspector;
-
-  /**
-   * test evaluate for exec init and setParameters
-   */
-  @Test
-  public void testEvaluateWithoutRun() throws HiveException {
-    // init udf
-    Udf udf = new Udf();
-    ObjectInspector[] initArguments = {queryOI, argOI};
-    udf.initialize(initArguments);
-    //set arguments
-    DeferredObject queryObj = new DeferredJavaObject("hello(:1)");
-      DeferredObject argObj = new DeferredJavaObject("name");
-      DeferredObject[] argumentsObj = {queryObj, argObj};
-      
-      // init exec and set parameters, included
-      udf.initExec(argumentsObj);
-      udf.setParameters(argumentsObj);
-      
-      // checking var exists and its value is right
-      Var var = udf.exec.findVariable(":1");
-      Assert.assertNotNull(var);
-      String val = (String) var.value;
-      Assert.assertEquals(val, "name");
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/custom-serde/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-serde/pom.xml b/itests/custom-serde/pom.xml
index 78b68c5..166ffde 100644
--- a/itests/custom-serde/pom.xml
+++ b/itests/custom-serde/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/custom-udfs/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-udfs/pom.xml b/itests/custom-udfs/pom.xml
index de7df16..b230b41 100644
--- a/itests/custom-udfs/pom.xml
+++ b/itests/custom-udfs/pom.xml
@@ -19,7 +19,7 @@ limitations under the License.
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/custom-udfs/udf-classloader-udf1/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-udfs/udf-classloader-udf1/pom.xml b/itests/custom-udfs/udf-classloader-udf1/pom.xml
index f863efd..0a95c94 100644
--- a/itests/custom-udfs/udf-classloader-udf1/pom.xml
+++ b/itests/custom-udfs/udf-classloader-udf1/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it-custom-udfs</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/custom-udfs/udf-classloader-udf2/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-udfs/udf-classloader-udf2/pom.xml b/itests/custom-udfs/udf-classloader-udf2/pom.xml
index 2553f3e..e3f30f1 100644
--- a/itests/custom-udfs/udf-classloader-udf2/pom.xml
+++ b/itests/custom-udfs/udf-classloader-udf2/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it-custom-udfs</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/custom-udfs/udf-classloader-util/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-udfs/udf-classloader-util/pom.xml b/itests/custom-udfs/udf-classloader-util/pom.xml
index 565a661..fe285d7 100644
--- a/itests/custom-udfs/udf-classloader-util/pom.xml
+++ b/itests/custom-udfs/udf-classloader-util/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it-custom-udfs</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/custom-udfs/udf-vectorized-badexample/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-udfs/udf-vectorized-badexample/pom.xml b/itests/custom-udfs/udf-vectorized-badexample/pom.xml
index 6dc923d..35c1a2f 100644
--- a/itests/custom-udfs/udf-vectorized-badexample/pom.xml
+++ b/itests/custom-udfs/udf-vectorized-badexample/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it-custom-udfs</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hcatalog-unit/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/pom.xml b/itests/hcatalog-unit/pom.xml
index c157aed..3ef87f9 100644
--- a/itests/hcatalog-unit/pom.xml
+++ b/itests/hcatalog-unit/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 93ff498..8468b84 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hive.metastore.RawStore;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
@@ -916,12 +915,6 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   }
 
   @Override
-  public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
-      String tableName) throws MetaException, NoSuchObjectException {
-    return objectStore.getAggrColStatsForTablePartitions(dbName, tableName);
-  }
-
-  @Override
   @CanNotRetry
   public Boolean commitTransactionExpectDeadlock() {
     return null;


[29/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
deleted file mode 100644
index 8877681..0000000
--- a/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
+++ /dev/null
@@ -1,340 +0,0 @@
--- Timestamp: 2011-09-22 15:32:02.024
--- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
--- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
--- Specified schema is: APP
--- appendLogs: false
-
--- ----------------------------------------------
--- DDL Statements for functions
--- ----------------------------------------------
-
-CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
-
-CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
-
--- ----------------------------------------------
--- DDL Statements for tables
--- ----------------------------------------------
-
-CREATE TABLE "APP"."DBS" ("DB_ID" BIGINT NOT NULL, "DESC" VARCHAR(4000), "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, "NAME" VARCHAR(128), "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
-
-CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
-
-CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-
-CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
-
-CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
-
-CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128));
-
-CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
-
-CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-
-CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
-
-CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000));
-
-CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128));
-
-CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT);
-
-CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767));
-
-CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128));
-
-CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-
-CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(256));
-
-CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
-
-CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL);
-
-CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128));
-
-CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL);
-
-CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL);
-
-RUN '022-HIVE-11107.derby.sql';
-
-CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
-
-CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
-
-CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000));
-
-CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767));
-
-CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767));
-
-CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
-
-CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
-
-CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" CLOB, "TBL_NAME" VARCHAR(256), "MESSAGE_FORMAT" VARCHAR(16));
-
-CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER NOT NULL, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL,  "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL);
-
-ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION");
-
-CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID");
-
--- ----------------------------------------------
--- DDL Statements for indexes
--- ----------------------------------------------
-
-CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID");
-
-CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
-
-CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");
-
-CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME");
-
-CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME");
-
-CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
-
-CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID");
-
-CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID");
-
-CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID");
-
--- ----------------------------------------------
--- DDL Statements for keys
--- ----------------------------------------------
-
--- primary/unique
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID");
-
-ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
-
-ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID");
-
-ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID");
-
-ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
-
-ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME");
-
-ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID");
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID");
-
-ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID");
-
-ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
-
-ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID");
-
-ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
-
-ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID");
-
-ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID");
-
-ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME");
-
-ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID");
-
-ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID");
-
-ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID");
-
-ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID");
-
-ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID");
-
-ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
-
-ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
-
-ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID");
-
-ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID");
-
-ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
-
-ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID");
-
-ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID");
-
-ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID");
-
-ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID");
-
-ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID");
-
--- foreign
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID");
-
-ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
--- ----------------------------------------------
--- DDL Statements for checks
--- ----------------------------------------------
-
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N'));
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N'));
-
--- ----------------------------
--- Transaction and Lock Tables
--- ----------------------------
-RUN 'hive-txn-schema-3.0.0.derby.sql';
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0');
-

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/hive-txn-schema-2.2.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-txn-schema-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/hive-txn-schema-2.2.0.derby.sql
index 3a049aa..b31ea6e 100644
--- a/metastore/scripts/upgrade/derby/hive-txn-schema-2.2.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/hive-txn-schema-2.2.0.derby.sql
@@ -39,7 +39,7 @@ CREATE TABLE TXN_COMPONENTS (
 CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TXNID bigint,
   CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(256),
+  CTC_TABLE varchar(128),
   CTC_PARTITION varchar(767)
 );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/hive-txn-schema-2.3.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-txn-schema-2.3.0.derby.sql b/metastore/scripts/upgrade/derby/hive-txn-schema-2.3.0.derby.sql
deleted file mode 100644
index 52713df..0000000
--- a/metastore/scripts/upgrade/derby/hive-txn-schema-2.3.0.derby.sql
+++ /dev/null
@@ -1,134 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the License); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an AS IS BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
---
--- Tables for transaction management
--- 
-CREATE TABLE TXNS (
-  TXN_ID bigint PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED bigint NOT NULL,
-  TXN_LAST_HEARTBEAT bigint NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL,
-  TXN_AGENT_INFO varchar(128),
-  TXN_META_INFO varchar(128),
-  TXN_HEARTBEAT_COUNT integer
-);
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
-  TC_DATABASE varchar(128) NOT NULL,
-  TC_TABLE varchar(128),
-  TC_PARTITION varchar(767),
-  TC_OPERATION_TYPE char(1) NOT NULL
-);
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID bigint,
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(256),
-  CTC_PARTITION varchar(767)
-);
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID bigint NOT NULL,
-  HL_LOCK_INT_ID bigint NOT NULL,
-  HL_TXNID bigint,
-  HL_DB varchar(128) NOT NULL,
-  HL_TABLE varchar(128),
-  HL_PARTITION varchar(767),
-  HL_LOCK_STATE char(1) NOT NULL,
-  HL_LOCK_TYPE char(1) NOT NULL,
-  HL_LAST_HEARTBEAT bigint NOT NULL,
-  HL_ACQUIRED_AT bigint,
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  HL_HEARTBEAT_COUNT integer,
-  HL_AGENT_INFO varchar(128),
-  HL_BLOCKEDBY_EXT_ID bigint,
-  HL_BLOCKEDBY_INT_ID bigint,
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
-); 
-
-CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID bigint PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_TBLPROPERTIES varchar(2048),
-  CQ_WORKER_ID varchar(128),
-  CQ_START bigint,
-  CQ_RUN_AS varchar(128),
-  CQ_HIGHEST_TXN_ID bigint,
-  CQ_META_INFO varchar(2048) for bit data,
-  CQ_HADOOP_JOB_ID varchar(32)
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE COMPLETED_COMPACTIONS (
-  CC_ID bigint PRIMARY KEY,
-  CC_DATABASE varchar(128) NOT NULL,
-  CC_TABLE varchar(128) NOT NULL,
-  CC_PARTITION varchar(767),
-  CC_STATE char(1) NOT NULL,
-  CC_TYPE char(1) NOT NULL,
-  CC_TBLPROPERTIES varchar(2048),
-  CC_WORKER_ID varchar(128),
-  CC_START bigint,
-  CC_END bigint,
-  CC_RUN_AS varchar(128),
-  CC_HIGHEST_TXN_ID bigint,
-  CC_META_INFO varchar(2048) for bit data,
-  CC_HADOOP_JOB_ID varchar(32)
-);
-
-CREATE TABLE AUX_TABLE (
-  MT_KEY1 varchar(128) NOT NULL,
-  MT_KEY2 bigint NOT NULL,
-  MT_COMMENT varchar(255),
-  PRIMARY KEY(MT_KEY1, MT_KEY2)
-);
-
---1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK
---This is a good candidate for Index orgainzed table
-CREATE TABLE WRITE_SET (
-  WS_DATABASE varchar(128) NOT NULL,
-  WS_TABLE varchar(128) NOT NULL,
-  WS_PARTITION varchar(767),
-  WS_TXNID bigint NOT NULL,
-  WS_COMMIT_ID bigint NOT NULL,
-  WS_OPERATION_TYPE char(1) NOT NULL
-);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql b/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql
deleted file mode 100644
index 52713df..0000000
--- a/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql
+++ /dev/null
@@ -1,134 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the License); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an AS IS BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
---
--- Tables for transaction management
--- 
-CREATE TABLE TXNS (
-  TXN_ID bigint PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED bigint NOT NULL,
-  TXN_LAST_HEARTBEAT bigint NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL,
-  TXN_AGENT_INFO varchar(128),
-  TXN_META_INFO varchar(128),
-  TXN_HEARTBEAT_COUNT integer
-);
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
-  TC_DATABASE varchar(128) NOT NULL,
-  TC_TABLE varchar(128),
-  TC_PARTITION varchar(767),
-  TC_OPERATION_TYPE char(1) NOT NULL
-);
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID bigint,
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(256),
-  CTC_PARTITION varchar(767)
-);
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID bigint NOT NULL,
-  HL_LOCK_INT_ID bigint NOT NULL,
-  HL_TXNID bigint,
-  HL_DB varchar(128) NOT NULL,
-  HL_TABLE varchar(128),
-  HL_PARTITION varchar(767),
-  HL_LOCK_STATE char(1) NOT NULL,
-  HL_LOCK_TYPE char(1) NOT NULL,
-  HL_LAST_HEARTBEAT bigint NOT NULL,
-  HL_ACQUIRED_AT bigint,
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  HL_HEARTBEAT_COUNT integer,
-  HL_AGENT_INFO varchar(128),
-  HL_BLOCKEDBY_EXT_ID bigint,
-  HL_BLOCKEDBY_INT_ID bigint,
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
-); 
-
-CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID bigint PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_TBLPROPERTIES varchar(2048),
-  CQ_WORKER_ID varchar(128),
-  CQ_START bigint,
-  CQ_RUN_AS varchar(128),
-  CQ_HIGHEST_TXN_ID bigint,
-  CQ_META_INFO varchar(2048) for bit data,
-  CQ_HADOOP_JOB_ID varchar(32)
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE COMPLETED_COMPACTIONS (
-  CC_ID bigint PRIMARY KEY,
-  CC_DATABASE varchar(128) NOT NULL,
-  CC_TABLE varchar(128) NOT NULL,
-  CC_PARTITION varchar(767),
-  CC_STATE char(1) NOT NULL,
-  CC_TYPE char(1) NOT NULL,
-  CC_TBLPROPERTIES varchar(2048),
-  CC_WORKER_ID varchar(128),
-  CC_START bigint,
-  CC_END bigint,
-  CC_RUN_AS varchar(128),
-  CC_HIGHEST_TXN_ID bigint,
-  CC_META_INFO varchar(2048) for bit data,
-  CC_HADOOP_JOB_ID varchar(32)
-);
-
-CREATE TABLE AUX_TABLE (
-  MT_KEY1 varchar(128) NOT NULL,
-  MT_KEY2 bigint NOT NULL,
-  MT_COMMENT varchar(255),
-  PRIMARY KEY(MT_KEY1, MT_KEY2)
-);
-
---1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK
---This is a good candidate for Index orgainzed table
-CREATE TABLE WRITE_SET (
-  WS_DATABASE varchar(128) NOT NULL,
-  WS_TABLE varchar(128) NOT NULL,
-  WS_PARTITION varchar(767),
-  WS_TXNID bigint NOT NULL,
-  WS_COMMIT_ID bigint NOT NULL,
-  WS_OPERATION_TYPE char(1) NOT NULL
-);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
index 3e87091..cbf5be1 100644
--- a/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
@@ -1,7 +1,6 @@
 -- Upgrade MetaStore schema from 2.1.0 to 2.2.0
 RUN '037-HIVE-14496.derby.sql';
 RUN '038-HIVE-10562.derby.sql';
-RUN '039-HIVE-12274.derby.sql';
 
 RUN '037-HIVE-14637.derby.sql';
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/upgrade-2.2.0-to-2.3.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/upgrade-2.2.0-to-2.3.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.2.0-to-2.3.0.derby.sql
deleted file mode 100644
index 81b9973..0000000
--- a/metastore/scripts/upgrade/derby/upgrade-2.2.0-to-2.3.0.derby.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Upgrade MetaStore schema from 2.2.0 to 2.3.0
-RUN '040-HIVE-16399.derby.sql';
-
-UPDATE "APP".VERSION SET SCHEMA_VERSION='2.3.0', VERSION_COMMENT='Hive release version 2.3.0' where VER_ID=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql
deleted file mode 100644
index 3bba523..0000000
--- a/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql
+++ /dev/null
@@ -1,3 +0,0 @@
--- Upgrade MetaStore schema from 2.3.0 to 3.0.0
-
-UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/derby/upgrade.order.derby
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/upgrade.order.derby b/metastore/scripts/upgrade/derby/upgrade.order.derby
index d7091b5..420174a 100644
--- a/metastore/scripts/upgrade/derby/upgrade.order.derby
+++ b/metastore/scripts/upgrade/derby/upgrade.order.derby
@@ -12,5 +12,3 @@
 1.2.0-to-2.0.0
 2.0.0-to-2.1.0
 2.1.0-to-2.2.0
-2.2.0-to-2.3.0
-2.3.0-to-3.0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mssql/024-HIVE-12274.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/024-HIVE-12274.mssql.sql b/metastore/scripts/upgrade/mssql/024-HIVE-12274.mssql.sql
deleted file mode 100644
index 58da02c..0000000
--- a/metastore/scripts/upgrade/mssql/024-HIVE-12274.mssql.sql
+++ /dev/null
@@ -1,18 +0,0 @@
-ALTER TABLE "SERDE_PARAMS" ALTER COLUMN "PARAM_VALUE" VARCHAR(MAX);
-ALTER TABLE "TABLE_PARAMS" ALTER COLUMN "PARAM_VALUE" VARCHAR(MAX);
-ALTER TABLE "SD_PARAMS" ALTER COLUMN "PARAM_VALUE" VARCHAR(MAX);
-ALTER TABLE "COLUMNS_V2" ALTER COLUMN "TYPE_NAME" VARCHAR(MAX);
-
-ALTER TABLE "TBLS" ALTER COLUMN "TBL_NAME" nvarchar(256);
-ALTER TABLE "NOTIFICATION_LOG" ALTER COLUMN "TBL_NAME" nvarchar(256);
-ALTER TABLE "PARTITION_EVENTS" ALTER COLUMN "TBL_NAME" nvarchar(256);
-ALTER TABLE "TAB_COL_STATS" ALTER COLUMN "TABLE_NAME" nvarchar(256);
-ALTER TABLE "PART_COL_STATS" ALTER COLUMN "TABLE_NAME" nvarchar(256);
-ALTER TABLE "COMPLETED_TXN_COMPONENTS" ALTER COLUMN "CTC_TABLE" varchar(256);
-
-ALTER TABLE "COLUMNS_V2" ALTER COLUMN "COLUMN_NAME" nvarchar(767) NOT NULL;
-ALTER TABLE "PART_COL_PRIVS" ALTER COLUMN "COLUMN_NAME" nvarchar(767) DEFAULT NULL;
-ALTER TABLE "TBL_COL_PRIVS" ALTER COLUMN "COLUMN_NAME" nvarchar(767) DEFAULT NULL;
-ALTER TABLE "SORT_COLS" ALTER COLUMN "COLUMN_NAME" nvarchar(767) DEFAULT NULL;
-ALTER TABLE "TAB_COL_STATS" ALTER COLUMN "COLUMN_NAME" nvarchar(767) NOT NULL;
-ALTER TABLE "PART_COL_STATS" ALTER COLUMN "COLUMN_NAME" nvarchar(767) NOT NULL;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mssql/025-HIVE-16399.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/025-HIVE-16399.mssql.sql b/metastore/scripts/upgrade/mssql/025-HIVE-16399.mssql.sql
deleted file mode 100644
index f6cc31f..0000000
--- a/metastore/scripts/upgrade/mssql/025-HIVE-16399.mssql.sql
+++ /dev/null
@@ -1 +0,0 @@
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
index 7d133f4..c576615 100644
--- a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
@@ -75,7 +75,7 @@ CREATE TABLE PART_COL_STATS
 (
     CS_ID bigint NOT NULL,
     AVG_COL_LEN float NULL,
-    "COLUMN_NAME" nvarchar(767) NOT NULL,
+    "COLUMN_NAME" nvarchar(1000) NOT NULL,
     COLUMN_TYPE nvarchar(128) NOT NULL,
     DB_NAME nvarchar(128) NOT NULL,
     BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
@@ -92,7 +92,7 @@ CREATE TABLE PART_COL_STATS
     NUM_TRUES bigint NULL,
     PART_ID bigint NULL,
     PARTITION_NAME nvarchar(767) NOT NULL,
-    "TABLE_NAME" nvarchar(256) NOT NULL
+    "TABLE_NAME" nvarchar(128) NOT NULL
 );
 
 ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -184,7 +184,7 @@ ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_
 CREATE TABLE PART_COL_PRIVS
 (
     PART_COLUMN_GRANT_ID bigint NOT NULL,
-    "COLUMN_NAME" nvarchar(767) NULL,
+    "COLUMN_NAME" nvarchar(1000) NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
     GRANTOR nvarchar(128) NULL,
@@ -218,7 +218,7 @@ CREATE TABLE TAB_COL_STATS
 (
     CS_ID bigint NOT NULL,
     AVG_COL_LEN float NULL,
-    "COLUMN_NAME" nvarchar(767) NOT NULL,
+    "COLUMN_NAME" nvarchar(1000) NOT NULL,
     COLUMN_TYPE nvarchar(128) NOT NULL,
     DB_NAME nvarchar(128) NOT NULL,
     BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
@@ -234,7 +234,7 @@ CREATE TABLE TAB_COL_STATS
     NUM_NULLS bigint NOT NULL,
     NUM_TRUES bigint NULL,
     TBL_ID bigint NULL,
-    "TABLE_NAME" nvarchar(256) NOT NULL
+    "TABLE_NAME" nvarchar(128) NOT NULL
 );
 
 ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -283,7 +283,7 @@ ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
 CREATE TABLE TBL_COL_PRIVS
 (
     TBL_COLUMN_GRANT_ID bigint NOT NULL,
-    "COLUMN_NAME" nvarchar(767) NULL,
+    "COLUMN_NAME" nvarchar(1000) NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
     GRANTOR nvarchar(128) NULL,
@@ -355,7 +355,7 @@ CREATE TABLE TBLS
     OWNER nvarchar(767) NULL,
     RETENTION int NOT NULL,
     SD_ID bigint NULL,
-    TBL_NAME nvarchar(256) NULL,
+    TBL_NAME nvarchar(128) NULL,
     TBL_TYPE nvarchar(128) NULL,
     VIEW_EXPANDED_TEXT text NULL,
     VIEW_ORIGINAL_TEXT text NULL,
@@ -390,7 +390,7 @@ CREATE TABLE PARTITION_EVENTS
     EVENT_TIME bigint NOT NULL,
     EVENT_TYPE int NOT NULL,
     PARTITION_NAME nvarchar(767) NULL,
-    TBL_NAME nvarchar(256) NULL
+    TBL_NAME nvarchar(128) NULL
 );
 
 ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
@@ -399,7 +399,7 @@ ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PAR
 CREATE TABLE SORT_COLS
 (
     SD_ID bigint NOT NULL,
-    "COLUMN_NAME" nvarchar(767) NULL,
+    "COLUMN_NAME" nvarchar(1000) NULL,
     "ORDER" int NOT NULL,
     INTEGER_IDX int NOT NULL
 );
@@ -473,7 +473,7 @@ CREATE TABLE SD_PARAMS
 (
     SD_ID bigint NOT NULL,
     PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE varchar(max) NULL
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
@@ -536,8 +536,8 @@ CREATE TABLE COLUMNS_V2
 (
     CD_ID bigint NOT NULL,
     COMMENT nvarchar(256) NULL,
-    "COLUMN_NAME" nvarchar(767) NOT NULL,
-    TYPE_NAME varchar(max) NOT NULL,
+    "COLUMN_NAME" nvarchar(1000) NOT NULL,
+    TYPE_NAME nvarchar(4000) NOT NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -548,7 +548,7 @@ CREATE TABLE SERDE_PARAMS
 (
     SERDE_ID bigint NOT NULL,
     PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE varchar(max) NULL
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
@@ -568,7 +568,7 @@ CREATE TABLE TABLE_PARAMS
 (
     TBL_ID bigint NOT NULL,
     PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE varchar(max) NULL
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
@@ -580,7 +580,7 @@ CREATE TABLE NOTIFICATION_LOG
     EVENT_TIME int NOT NULL,
     EVENT_TYPE nvarchar(32) NOT NULL,
     DB_NAME nvarchar(128) NULL,
-    TBL_NAME nvarchar(256) NULL,
+    TBL_NAME nvarchar(128) NULL,
     MESSAGE_FORMAT nvarchar(16)
     MESSAGE text NULL
 );

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mssql/hive-schema-2.3.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.3.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.3.0.mssql.sql
deleted file mode 100644
index 8a80a50..0000000
--- a/metastore/scripts/upgrade/mssql/hive-schema-2.3.0.mssql.sql
+++ /dev/null
@@ -1,1023 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-------------------------------------------------------------------
--- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15)
-------------------------------------------------------------------
--- Complete schema required for the following classes:-
---     org.apache.hadoop.hive.metastore.model.MColumnDescriptor
---     org.apache.hadoop.hive.metastore.model.MDBPrivilege
---     org.apache.hadoop.hive.metastore.model.MDatabase
---     org.apache.hadoop.hive.metastore.model.MDelegationToken
---     org.apache.hadoop.hive.metastore.model.MFieldSchema
---     org.apache.hadoop.hive.metastore.model.MFunction
---     org.apache.hadoop.hive.metastore.model.MGlobalPrivilege
---     org.apache.hadoop.hive.metastore.model.MIndex
---     org.apache.hadoop.hive.metastore.model.MMasterKey
---     org.apache.hadoop.hive.metastore.model.MOrder
---     org.apache.hadoop.hive.metastore.model.MPartition
---     org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege
---     org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics
---     org.apache.hadoop.hive.metastore.model.MPartitionEvent
---     org.apache.hadoop.hive.metastore.model.MPartitionPrivilege
---     org.apache.hadoop.hive.metastore.model.MResourceUri
---     org.apache.hadoop.hive.metastore.model.MRole
---     org.apache.hadoop.hive.metastore.model.MRoleMap
---     org.apache.hadoop.hive.metastore.model.MSerDeInfo
---     org.apache.hadoop.hive.metastore.model.MStorageDescriptor
---     org.apache.hadoop.hive.metastore.model.MStringList
---     org.apache.hadoop.hive.metastore.model.MTable
---     org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege
---     org.apache.hadoop.hive.metastore.model.MTableColumnStatistics
---     org.apache.hadoop.hive.metastore.model.MTablePrivilege
---     org.apache.hadoop.hive.metastore.model.MType
---     org.apache.hadoop.hive.metastore.model.MVersionTable
---
--- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
-CREATE TABLE MASTER_KEYS
-(
-    KEY_ID int NOT NULL,
-    MASTER_KEY nvarchar(767) NULL
-);
-
-ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
-
--- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
-CREATE TABLE IDXS
-(
-    INDEX_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    DEFERRED_REBUILD bit NOT NULL,
-    INDEX_HANDLER_CLASS nvarchar(4000) NULL,
-    INDEX_NAME nvarchar(128) NULL,
-    INDEX_TBL_ID bigint NULL,
-    LAST_ACCESS_TIME int NOT NULL,
-    ORIG_TBL_ID bigint NULL,
-    SD_ID bigint NULL
-);
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
-
--- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
-CREATE TABLE PART_COL_STATS
-(
-    CS_ID bigint NOT NULL,
-    AVG_COL_LEN float NULL,
-    "COLUMN_NAME" nvarchar(767) NOT NULL,
-    COLUMN_TYPE nvarchar(128) NOT NULL,
-    DB_NAME nvarchar(128) NOT NULL,
-    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
-    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
-    DOUBLE_HIGH_VALUE float NULL,
-    DOUBLE_LOW_VALUE float NULL,
-    LAST_ANALYZED bigint NOT NULL,
-    LONG_HIGH_VALUE bigint NULL,
-    LONG_LOW_VALUE bigint NULL,
-    MAX_COL_LEN bigint NULL,
-    NUM_DISTINCTS bigint NULL,
-    NUM_FALSES bigint NULL,
-    NUM_NULLS bigint NOT NULL,
-    NUM_TRUES bigint NULL,
-    PART_ID bigint NULL,
-    PARTITION_NAME nvarchar(767) NOT NULL,
-    "TABLE_NAME" nvarchar(256) NOT NULL
-);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
-
-CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
-
--- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-CREATE TABLE PART_PRIVS
-(
-    PART_GRANT_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PART_ID bigint NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    PART_PRIV nvarchar(128) NULL
-);
-
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
-
--- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
-CREATE TABLE SKEWED_STRING_LIST
-(
-    STRING_LIST_ID bigint NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
-
--- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE TABLE ROLES
-(
-    ROLE_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    OWNER_NAME nvarchar(128) NULL,
-    ROLE_NAME nvarchar(128) NULL
-);
-
-ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
-
--- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
-CREATE TABLE PARTITIONS
-(
-    PART_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    LAST_ACCESS_TIME int NOT NULL,
-    PART_NAME nvarchar(767) NULL,
-    SD_ID bigint NULL,
-    TBL_ID bigint NULL
-);
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
-
--- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
-CREATE TABLE CDS
-(
-    CD_ID bigint NOT NULL
-);
-
-ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
-
--- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable]
-CREATE TABLE VERSION
-(
-    VER_ID bigint NOT NULL,
-    SCHEMA_VERSION nvarchar(127) NOT NULL,
-    VERSION_COMMENT nvarchar(255) NOT NULL
-);
-
-ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
-
--- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE TABLE GLOBAL_PRIVS
-(
-    USER_GRANT_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    USER_PRIV nvarchar(128) NULL
-);
-
-ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
-
--- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-CREATE TABLE PART_COL_PRIVS
-(
-    PART_COLUMN_GRANT_ID bigint NOT NULL,
-    "COLUMN_NAME" nvarchar(767) NULL,
-    CREATE_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PART_ID bigint NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    PART_COL_PRIV nvarchar(128) NULL
-);
-
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
-
--- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-CREATE TABLE DB_PRIVS
-(
-    DB_GRANT_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    DB_ID bigint NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    DB_PRIV nvarchar(128) NULL
-);
-
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
-
--- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
-CREATE TABLE TAB_COL_STATS
-(
-    CS_ID bigint NOT NULL,
-    AVG_COL_LEN float NULL,
-    "COLUMN_NAME" nvarchar(767) NOT NULL,
-    COLUMN_TYPE nvarchar(128) NOT NULL,
-    DB_NAME nvarchar(128) NOT NULL,
-    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
-    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
-    DOUBLE_HIGH_VALUE float NULL,
-    DOUBLE_LOW_VALUE float NULL,
-    LAST_ANALYZED bigint NOT NULL,
-    LONG_HIGH_VALUE bigint NULL,
-    LONG_LOW_VALUE bigint NULL,
-    MAX_COL_LEN bigint NULL,
-    NUM_DISTINCTS bigint NULL,
-    NUM_FALSES bigint NULL,
-    NUM_NULLS bigint NOT NULL,
-    NUM_TRUES bigint NULL,
-    TBL_ID bigint NULL,
-    "TABLE_NAME" nvarchar(256) NOT NULL
-);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
-
--- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
-CREATE TABLE TYPES
-(
-    TYPES_ID bigint NOT NULL,
-    TYPE_NAME nvarchar(128) NULL,
-    TYPE1 nvarchar(767) NULL,
-    TYPE2 nvarchar(767) NULL
-);
-
-ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
-
--- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-CREATE TABLE TBL_PRIVS
-(
-    TBL_GRANT_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    TBL_PRIV nvarchar(128) NULL,
-    TBL_ID bigint NULL
-);
-
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
-
--- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE TABLE DBS
-(
-    DB_ID bigint NOT NULL,
-    "DESC" nvarchar(4000) NULL,
-    DB_LOCATION_URI nvarchar(4000) NOT NULL,
-    "NAME" nvarchar(128) NULL,
-    OWNER_NAME nvarchar(128) NULL,
-    OWNER_TYPE nvarchar(10) NULL
-);
-
-ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
-
--- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-CREATE TABLE TBL_COL_PRIVS
-(
-    TBL_COLUMN_GRANT_ID bigint NOT NULL,
-    "COLUMN_NAME" nvarchar(767) NULL,
-    CREATE_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    TBL_COL_PRIV nvarchar(128) NULL,
-    TBL_ID bigint NULL
-);
-
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
-
--- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-CREATE TABLE DELEGATION_TOKENS
-(
-    TOKEN_IDENT nvarchar(767) NOT NULL,
-    TOKEN nvarchar(767) NULL
-);
-
-ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
-
--- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-CREATE TABLE SERDES
-(
-    SERDE_ID bigint NOT NULL,
-    "NAME" nvarchar(128) NULL,
-    SLIB nvarchar(4000) NULL
-);
-
-ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
-
--- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction]
-CREATE TABLE FUNCS
-(
-    FUNC_ID bigint NOT NULL,
-    CLASS_NAME nvarchar(4000) NULL,
-    CREATE_TIME int NOT NULL,
-    DB_ID bigint NULL,
-    FUNC_NAME nvarchar(128) NULL,
-    FUNC_TYPE int NOT NULL,
-    OWNER_NAME nvarchar(128) NULL,
-    OWNER_TYPE nvarchar(10) NULL
-);
-
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
-
--- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
-CREATE TABLE ROLE_MAP
-(
-    ROLE_GRANT_ID bigint NOT NULL,
-    ADD_TIME int NOT NULL,
-    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR nvarchar(128) NULL,
-    GRANTOR_TYPE nvarchar(128) NULL,
-    PRINCIPAL_NAME nvarchar(128) NULL,
-    PRINCIPAL_TYPE nvarchar(128) NULL,
-    ROLE_ID bigint NULL
-);
-
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
-
--- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
-CREATE TABLE TBLS
-(
-    TBL_ID bigint NOT NULL,
-    CREATE_TIME int NOT NULL,
-    DB_ID bigint NULL,
-    LAST_ACCESS_TIME int NOT NULL,
-    OWNER nvarchar(767) NULL,
-    RETENTION int NOT NULL,
-    SD_ID bigint NULL,
-    TBL_NAME nvarchar(256) NULL,
-    TBL_TYPE nvarchar(128) NULL,
-    VIEW_EXPANDED_TEXT text NULL,
-    VIEW_ORIGINAL_TEXT text NULL,
-    IS_REWRITE_ENABLED bit NOT NULL
-);
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
-
--- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-CREATE TABLE SDS
-(
-    SD_ID bigint NOT NULL,
-    CD_ID bigint NULL,
-    INPUT_FORMAT nvarchar(4000) NULL,
-    IS_COMPRESSED bit NOT NULL,
-    IS_STOREDASSUBDIRECTORIES bit NOT NULL,
-    LOCATION nvarchar(4000) NULL,
-    NUM_BUCKETS int NOT NULL,
-    OUTPUT_FORMAT nvarchar(4000) NULL,
-    SERDE_ID bigint NULL
-);
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
-
--- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE TABLE PARTITION_EVENTS
-(
-    PART_NAME_ID bigint NOT NULL,
-    DB_NAME nvarchar(128) NULL,
-    EVENT_TIME bigint NOT NULL,
-    EVENT_TYPE int NOT NULL,
-    PARTITION_NAME nvarchar(767) NULL,
-    TBL_NAME nvarchar(256) NULL
-);
-
-ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
-
--- Table SORT_COLS for join relationship
-CREATE TABLE SORT_COLS
-(
-    SD_ID bigint NOT NULL,
-    "COLUMN_NAME" nvarchar(767) NULL,
-    "ORDER" int NOT NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table SKEWED_COL_NAMES for join relationship
-CREATE TABLE SKEWED_COL_NAMES
-(
-    SD_ID bigint NOT NULL,
-    SKEWED_COL_NAME nvarchar(255) NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table SKEWED_COL_VALUE_LOC_MAP for join relationship
-CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
-(
-    SD_ID bigint NOT NULL,
-    STRING_LIST_ID_KID bigint NOT NULL,
-    LOCATION nvarchar(4000) NULL
-);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
-
--- Table SKEWED_STRING_LIST_VALUES for join relationship
-CREATE TABLE SKEWED_STRING_LIST_VALUES
-(
-    STRING_LIST_ID bigint NOT NULL,
-    STRING_LIST_VALUE nvarchar(255) NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
-
--- Table PARTITION_KEY_VALS for join relationship
-CREATE TABLE PARTITION_KEY_VALS
-(
-    PART_ID bigint NOT NULL,
-    PART_KEY_VAL nvarchar(255) NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
-
--- Table PARTITION_KEYS for join relationship
-CREATE TABLE PARTITION_KEYS
-(
-    TBL_ID bigint NOT NULL,
-    PKEY_COMMENT nvarchar(4000) NULL,
-    PKEY_NAME nvarchar(128) NOT NULL,
-    PKEY_TYPE nvarchar(767) NOT NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
-
--- Table SKEWED_VALUES for join relationship
-CREATE TABLE SKEWED_VALUES
-(
-    SD_ID_OID bigint NOT NULL,
-    STRING_LIST_ID_EID bigint NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
-
--- Table SD_PARAMS for join relationship
-CREATE TABLE SD_PARAMS
-(
-    SD_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE varchar(max) NULL
-);
-
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
-
--- Table FUNC_RU for join relationship
-CREATE TABLE FUNC_RU
-(
-    FUNC_ID bigint NOT NULL,
-    RESOURCE_TYPE int NOT NULL,
-    RESOURCE_URI nvarchar(4000) NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX);
-
--- Table TYPE_FIELDS for join relationship
-CREATE TABLE TYPE_FIELDS
-(
-    TYPE_NAME bigint NOT NULL,
-    COMMENT nvarchar(256) NULL,
-    FIELD_NAME nvarchar(128) NOT NULL,
-    FIELD_TYPE nvarchar(767) NOT NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
-
--- Table BUCKETING_COLS for join relationship
-CREATE TABLE BUCKETING_COLS
-(
-    SD_ID bigint NOT NULL,
-    BUCKET_COL_NAME nvarchar(255) NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table DATABASE_PARAMS for join relationship
-CREATE TABLE DATABASE_PARAMS
-(
-    DB_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(180) NOT NULL,
-    PARAM_VALUE nvarchar(4000) NULL
-);
-
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
-
--- Table INDEX_PARAMS for join relationship
-CREATE TABLE INDEX_PARAMS
-(
-    INDEX_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE nvarchar(4000) NULL
-);
-
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
-
--- Table COLUMNS_V2 for join relationship
-CREATE TABLE COLUMNS_V2
-(
-    CD_ID bigint NOT NULL,
-    COMMENT nvarchar(256) NULL,
-    "COLUMN_NAME" nvarchar(767) NOT NULL,
-    TYPE_NAME varchar(max) NOT NULL,
-    INTEGER_IDX int NOT NULL
-);
-
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
-
--- Table SERDE_PARAMS for join relationship
-CREATE TABLE SERDE_PARAMS
-(
-    SERDE_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE varchar(max) NULL
-);
-
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
-
--- Table PARTITION_PARAMS for join relationship
-CREATE TABLE PARTITION_PARAMS
-(
-    PART_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE nvarchar(4000) NULL
-);
-
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
-
--- Table TABLE_PARAMS for join relationship
-CREATE TABLE TABLE_PARAMS
-(
-    TBL_ID bigint NOT NULL,
-    PARAM_KEY nvarchar(256) NOT NULL,
-    PARAM_VALUE varchar(max) NULL
-);
-
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
-
-CREATE TABLE NOTIFICATION_LOG
-(
-    NL_ID bigint NOT NULL,
-    EVENT_ID bigint NOT NULL,
-    EVENT_TIME int NOT NULL,
-    EVENT_TYPE nvarchar(32) NOT NULL,
-    DB_NAME nvarchar(128) NULL,
-    TBL_NAME nvarchar(256) NULL,
-    MESSAGE_FORMAT nvarchar(16)
-    MESSAGE text NULL
-);
-
-ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
-
-CREATE TABLE NOTIFICATION_SEQUENCE
-(
-    NNI_ID bigint NOT NULL,
-    NEXT_EVENT_ID bigint NOT NULL
-);
-
-ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
-
--- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey]
-
--- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
-
-CREATE INDEX IDXS_N50 ON IDXS (ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N49 ON IDXS (INDEX_TBL_ID);
-
-
--- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
-
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
-
--- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
-
-CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
-
-
--- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList]
-
--- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
-
-
--- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
-
-CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
-
-CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
-
-
--- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
-
--- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable]
-
--- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
-
-CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
-
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
-
-CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
-
-
--- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);
-
-
--- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
-CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME);
-
-
--- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
-
-CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME");
-
-
--- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
-
-
--- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-
--- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-
--- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction]
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
-
-CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID);
-
-CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
-
-
--- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ;
-
-CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
-
-CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
-
-CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
-
-CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
-
-CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
-
-
--- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
-
-CREATE INDEX SDS_N50 ON SDS (CD_ID);
-
-CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
-
-
--- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
-
-
--- Constraints for table SORT_COLS
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
-
-
--- Constraints for table SKEWED_COL_NAMES
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID);
-
-
--- Constraints for table SKEWED_COL_VALUE_LOC_MAP
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
-
-CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID);
-
-CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID);
-
-
--- Constraints for table SKEWED_STRING_LIST_VALUES
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
-
-CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID);
-
-
--- Constraints for table PARTITION_KEY_VALS
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
-
-CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
-
-
--- Constraints for table PARTITION_KEYS
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
-
-
--- Constraints for table SKEWED_VALUES
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ;
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
-
-CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID);
-
-CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID);
-
-
--- Constraints for table SD_PARAMS
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
-
-
--- Constraints for table FUNC_RU
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ;
-
-CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
-
-
--- Constraints for table TYPE_FIELDS
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ;
-
-CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
-
-
--- Constraints for table BUCKETING_COLS
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
-
-CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
-
-
--- Constraints for table DATABASE_PARAMS
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
-
-CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
-
-
--- Constraints for table INDEX_PARAMS
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) ;
-
-CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
-
-
--- Constraints for table COLUMNS_V2
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
-
-CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
-
-
--- Constraints for table SERDE_PARAMS
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
-
-CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
-
-
--- Constraints for table PARTITION_PARAMS
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
-
-CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
-
-
--- Constraints for table TABLE_PARAMS
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
-
-CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
-
-
-
--- -----------------------------------------------------------------------------------------------------------------------------------------------
--- Transaction and Lock Tables
--- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
--- -----------------------------------------------------------------------------------------------------------------------------------------------
-CREATE TABLE COMPACTION_QUEUE(
-	CQ_ID bigint NOT NULL,
-	CQ_DATABASE nvarchar(128) NOT NULL,
-	CQ_TABLE nvarchar(128) NOT NULL,
-	CQ_PARTITION nvarchar(767) NULL,
-	CQ_STATE char(1) NOT NULL,
-	CQ_TYPE char(1) NOT NULL,
-	CQ_TBLPROPERTIES nvarchar(2048) NULL,
-	CQ_WORKER_ID nvarchar(128) NULL,
-	CQ_START bigint NULL,
-	CQ_RUN_AS nvarchar(128) NULL,
-	CQ_HIGHEST_TXN_ID bigint NULL,
-    CQ_META_INFO varbinary(2048) NULL,
-	CQ_HADOOP_JOB_ID nvarchar(128) NULL,
-PRIMARY KEY CLUSTERED 
-(
-	CQ_ID ASC
-)
-);
-
-CREATE TABLE COMPLETED_COMPACTIONS (
-	CC_ID bigint NOT NULL,
-	CC_DATABASE nvarchar(128) NOT NULL,
-	CC_TABLE nvarchar(128) NOT NULL,
-	CC_PARTITION nvarchar(767) NULL,
-	CC_STATE char(1) NOT NULL,
-	CC_TYPE char(1) NOT NULL,
-	CC_TBLPROPERTIES nvarchar(2048) NULL,
-	CC_WORKER_ID nvarchar(128) NULL,
-	CC_START bigint NULL,
-	CC_END bigint NULL,
-	CC_RUN_AS nvarchar(128) NULL,
-	CC_HIGHEST_TXN_ID bigint NULL,
-    CC_META_INFO varbinary(2048) NULL,
-	CC_HADOOP_JOB_ID nvarchar(128) NULL,
-PRIMARY KEY CLUSTERED 
-(
-	CC_ID ASC
-)
-);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS(
-	CTC_TXNID bigint NULL,
-	CTC_DATABASE nvarchar(128) NOT NULL,
-	CTC_TABLE nvarchar(128) NULL,
-	CTC_PARTITION nvarchar(767) NULL
-);
-
-CREATE TABLE HIVE_LOCKS(
-	HL_LOCK_EXT_ID bigint NOT NULL,
-	HL_LOCK_INT_ID bigint NOT NULL,
-	HL_TXNID bigint NULL,
-	HL_DB nvarchar(128) NOT NULL,
-	HL_TABLE nvarchar(128) NULL,
-	HL_PARTITION nvarchar(767) NULL,
-	HL_LOCK_STATE char(1) NOT NULL,
-	HL_LOCK_TYPE char(1) NOT NULL,
-	HL_LAST_HEARTBEAT bigint NOT NULL,
-	HL_ACQUIRED_AT bigint NULL,
-	HL_USER nvarchar(128) NOT NULL,
-	HL_HOST nvarchar(128) NOT NULL,
-    HL_HEARTBEAT_COUNT int NULL,
-    HL_AGENT_INFO nvarchar(128) NULL,
-    HL_BLOCKEDBY_EXT_ID bigint NULL,
-    HL_BLOCKEDBY_INT_ID bigint NULL,
-PRIMARY KEY CLUSTERED 
-(
-	HL_LOCK_EXT_ID ASC,
-	HL_LOCK_INT_ID ASC
-)
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
-	NCQ_NEXT bigint NOT NULL
-);
-
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE NEXT_LOCK_ID(
-	NL_NEXT bigint NOT NULL
-);
-
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE NEXT_TXN_ID(
-	NTXN_NEXT bigint NOT NULL
-);
-
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE TXNS(
-	TXN_ID bigint NOT NULL,
-	TXN_STATE char(1) NOT NULL,
-	TXN_STARTED bigint NOT NULL,
-	TXN_LAST_HEARTBEAT bigint NOT NULL,
-	TXN_USER nvarchar(128) NOT NULL,
-	TXN_HOST nvarchar(128) NOT NULL,
-    TXN_AGENT_INFO nvarchar(128) NULL,
-    TXN_META_INFO nvarchar(128) NULL,
-    TXN_HEARTBEAT_COUNT int NULL,
-PRIMARY KEY CLUSTERED 
-(
-	TXN_ID ASC
-)
-);
-
-CREATE TABLE TXN_COMPONENTS(
-	TC_TXNID bigint NULL,
-	TC_DATABASE nvarchar(128) NOT NULL,
-	TC_TABLE nvarchar(128) NULL,
-	TC_PARTITION nvarchar(767) NULL,
-	TC_OPERATION_TYPE char(1) NOT NULL
-);
-
-ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
-
-CREATE TABLE AUX_TABLE (
-  MT_KEY1 nvarchar(128) NOT NULL,
-  MT_KEY2 bigint NOT NULL,
-  MT_COMMENT nvarchar(255) NULL,
-  PRIMARY KEY CLUSTERED
-(
-    MT_KEY1 ASC,
-    MT_KEY2 ASC
-)
-);
-
-CREATE TABLE KEY_CONSTRAINTS
-(
-  CHILD_CD_ID BIGINT,
-  CHILD_INTEGER_IDX INT,
-  CHILD_TBL_ID BIGINT,
-  PARENT_CD_ID BIGINT NOT NULL,
-  PARENT_INTEGER_IDX INT NOT NULL,
-  PARENT_TBL_ID BIGINT NOT NULL,
-  POSITION INT NOT NULL,
-  CONSTRAINT_NAME VARCHAR(400) NOT NULL,
-  CONSTRAINT_TYPE SMALLINT NOT NULL,
-  UPDATE_RULE SMALLINT,
-  DELETE_RULE SMALLINT,
-  ENABLE_VALIDATE_RELY SMALLINT NOT NULL
-) ;
-
-ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
-
-CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
-
-CREATE TABLE WRITE_SET (
-  WS_DATABASE nvarchar(128) NOT NULL,
-  WS_TABLE nvarchar(128) NOT NULL,
-  WS_PARTITION nvarchar(767),
-  WS_TXNID bigint NOT NULL,
-  WS_COMMIT_ID bigint NOT NULL,
-  WS_OPERATION_TYPE char(1) NOT NULL
-);
-
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.3.0', 'Hive release version 2.3.0');


[12/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java b/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
index fcf6f27..b259dfa 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
@@ -527,7 +527,7 @@ public class StatObjectConverter {
       Object llow, Object lhigh, Object dlow, Object dhigh, Object declow, Object dechigh,
       Object nulls, Object dist, Object avglen, Object maxlen, Object trues, Object falses,
       Object avgLong, Object avgDouble, Object avgDecimal, Object sumDist,
-      boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException {
+      boolean useDensityFunctionForNDVEstimation) throws MetaException {
     colType = colType.toLowerCase();
     if (colType.equals("boolean")) {
       BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
@@ -561,29 +561,23 @@ public class StatObjectConverter {
       }
       long lowerBound = MetaStoreDirectSql.extractSqlLong(dist);
       long higherBound = MetaStoreDirectSql.extractSqlLong(sumDist);
-      long rangeBound = Long.MAX_VALUE;
-      if (lhigh != null && llow != null) {
-        rangeBound = MetaStoreDirectSql.extractSqlLong(lhigh)
-            - MetaStoreDirectSql.extractSqlLong(llow) + 1;
-      }
-      long estimation;
       if (useDensityFunctionForNDVEstimation && lhigh != null && llow != null && avgLong != null
           && MetaStoreDirectSql.extractSqlDouble(avgLong) != 0.0) {
         // We have estimation, lowerbound and higherbound. We use estimation if
         // it is between lowerbound and higherbound.
-        estimation = MetaStoreDirectSql
+        long estimation = MetaStoreDirectSql
             .extractSqlLong((MetaStoreDirectSql.extractSqlLong(lhigh) - MetaStoreDirectSql
                 .extractSqlLong(llow)) / MetaStoreDirectSql.extractSqlDouble(avgLong));
         if (estimation < lowerBound) {
-          estimation = lowerBound;
+          longStats.setNumDVs(lowerBound);
         } else if (estimation > higherBound) {
-          estimation = higherBound;
+          longStats.setNumDVs(higherBound);
+        } else {
+          longStats.setNumDVs(estimation);
         }
       } else {
-        estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
+        longStats.setNumDVs(lowerBound);
       }
-      estimation = Math.min(estimation, rangeBound);
-      longStats.setNumDVs(estimation);
       data.setLongStats(longStats);
     } else if (colType.equals("date")) {
       DateColumnStatsData dateStats = new DateColumnStatsData();
@@ -596,29 +590,23 @@ public class StatObjectConverter {
       }
       long lowerBound = MetaStoreDirectSql.extractSqlLong(dist);
       long higherBound = MetaStoreDirectSql.extractSqlLong(sumDist);
-      long rangeBound = Long.MAX_VALUE;
-      if (lhigh != null && llow != null) {
-        rangeBound = MetaStoreDirectSql.extractSqlLong(lhigh)
-            - MetaStoreDirectSql.extractSqlLong(llow) + 1;
-      }
-      long estimation;
       if (useDensityFunctionForNDVEstimation && lhigh != null && llow != null && avgLong != null
           && MetaStoreDirectSql.extractSqlDouble(avgLong) != 0.0) {
         // We have estimation, lowerbound and higherbound. We use estimation if
         // it is between lowerbound and higherbound.
-        estimation = MetaStoreDirectSql
+        long estimation = MetaStoreDirectSql
             .extractSqlLong((MetaStoreDirectSql.extractSqlLong(lhigh) - MetaStoreDirectSql
                 .extractSqlLong(llow)) / MetaStoreDirectSql.extractSqlDouble(avgLong));
         if (estimation < lowerBound) {
-          estimation = lowerBound;
+          dateStats.setNumDVs(lowerBound);
         } else if (estimation > higherBound) {
-          estimation = higherBound;
+          dateStats.setNumDVs(higherBound);
+        } else {
+          dateStats.setNumDVs(estimation);
         }
       } else {
-        estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
+        dateStats.setNumDVs(lowerBound);
       }
-      estimation = Math.min(estimation, rangeBound);
-      dateStats.setNumDVs(estimation);
       data.setDateStats(dateStats);
     } else if (colType.equals("double") || colType.equals("float")) {
       DoubleColumnStatsData doubleStats = new DoubleColumnStatsData();
@@ -644,7 +632,7 @@ public class StatObjectConverter {
           doubleStats.setNumDVs(estimation);
         }
       } else {
-        doubleStats.setNumDVs((long) (lowerBound + (higherBound - lowerBound) * ndvTuner));
+        doubleStats.setNumDVs(lowerBound);
       }
       data.setDoubleStats(doubleStats);
     } else if (colType.startsWith("decimal")) {
@@ -685,7 +673,7 @@ public class StatObjectConverter {
           decimalStats.setNumDVs(estimation);
         }
       } else {
-        decimalStats.setNumDVs((long) (lowerBound + (higherBound - lowerBound) * ndvTuner));
+        decimalStats.setNumDVs(lowerBound);
       }
       data.setDecimalStats(decimalStats);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
index 8275664..2bb7197 100755
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
@@ -152,6 +152,11 @@ public class Warehouse {
     return whRoot;
   }
 
+  public Path getTablePath(String whRootString, String tableName) throws MetaException {
+    Path whRoot = getDnsPath(new Path(whRootString));
+    return new Path(whRoot, MetaStoreUtils.encodeTableName(tableName.toLowerCase()));
+  }
+
   public Path getDatabasePath(Database db) throws MetaException {
     if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
       return getWhRoot();
@@ -166,14 +171,7 @@ public class Warehouse {
     return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
   }
 
-  /**
-   * Returns the default location of the table path using the parent database's location
-   * @param db Database where the table is created
-   * @param tableName table name
-   * @return
-   * @throws MetaException
-   */
-  public Path getDefaultTablePath(Database db, String tableName)
+  public Path getTablePath(Database db, String tableName)
       throws MetaException {
     return getDnsPath(new Path(getDatabasePath(db), MetaStoreUtils.encodeTableName(tableName.toLowerCase())));
   }
@@ -186,11 +184,13 @@ public class Warehouse {
     return partition.getDbName() + "." + partition.getTableName() + partition.getValues();
   }
 
-  public boolean mkdirs(Path f) throws MetaException {
+  public boolean mkdirs(Path f, boolean inheritPermCandidate) throws MetaException {
+    boolean inheritPerms = HiveConf.getBoolVar(conf,
+      HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS) && inheritPermCandidate;
     FileSystem fs = null;
     try {
       fs = getFs(f);
-      return FileUtils.mkdir(fs, f, conf);
+      return FileUtils.mkdir(fs, f, inheritPerms, conf);
     } catch (IOException e) {
       MetaStoreUtils.logAndThrowMetaException(e);
     }
@@ -198,9 +198,13 @@ public class Warehouse {
   }
 
   public boolean renameDir(Path sourcePath, Path destPath) throws MetaException {
+    return renameDir(sourcePath, destPath, false);
+  }
+
+  public boolean renameDir(Path sourcePath, Path destPath, boolean inheritPerms) throws MetaException {
     try {
       FileSystem fs = getFs(sourcePath);
-      return FileUtils.rename(fs, sourcePath, destPath, conf);
+      return FileUtils.renameWithPerms(fs, sourcePath, destPath, inheritPerms, conf);
     } catch (Exception ex) {
       MetaStoreUtils.logAndThrowMetaException(ex);
     }
@@ -455,67 +459,16 @@ public class Warehouse {
     return partSpec;
   }
 
-  /**
-   * Returns the default partition path of a table within a given database and partition key value
-   * pairs. It uses the database location and appends it the table name and the partition key,value
-   * pairs to create the Path for the partition directory
-   *
-   * @param db - parent database which is used to get the base location of the partition directory
-   * @param tableName - table name for the partitions
-   * @param pm - Partition key value pairs
-   * @return
-   * @throws MetaException
-   */
-  public Path getDefaultPartitionPath(Database db, String tableName,
+  public Path getPartitionPath(Database db, String tableName,
       Map<String, String> pm) throws MetaException {
-    return getPartitionPath(getDefaultTablePath(db, tableName), pm);
+    return new Path(getTablePath(db, tableName), makePartPath(pm));
   }
 
-  /**
-   * Returns the path object for the given partition key-value pairs and the base location
-   *
-   * @param tblPath - the base location for the partitions. Typically the table location
-   * @param pm - Partition key value pairs
-   * @return
-   * @throws MetaException
-   */
   public Path getPartitionPath(Path tblPath, Map<String, String> pm)
       throws MetaException {
     return new Path(tblPath, makePartPath(pm));
   }
 
-  /**
-   * Given a database, a table and the partition key value pairs this method returns the Path object
-   * corresponding to the partition key value pairs. It uses the table location if available else
-   * uses the database location for constructing the path corresponding to the partition key-value
-   * pairs
-   *
-   * @param db - Parent database of the given table
-   * @param table - Table for which the partition key-values are given
-   * @param vals - List of values for the partition keys
-   * @return Path corresponding to the partition key-value pairs
-   * @throws MetaException
-   */
-  public Path getPartitionPath(Database db, Table table, List<String> vals)
-      throws MetaException {
-    List<FieldSchema> partKeys = table.getPartitionKeys();
-    if (partKeys == null || (partKeys.size() != vals.size())) {
-      throw new MetaException("Invalid number of partition keys found for " + table.getTableName());
-    }
-    Map<String, String> pm = new LinkedHashMap<>(vals.size());
-    int i = 0;
-    for (FieldSchema key : partKeys) {
-      pm.put(key.getName(), vals.get(i));
-      i++;
-    }
-
-    if (table.getSd().getLocation() != null) {
-      return getPartitionPath(getDnsPath(new Path(table.getSd().getLocation())), pm);
-    } else {
-      return getDefaultPartitionPath(db, table.getTableName(), pm);
-    }
-  }
-
   public boolean isDir(Path f) throws MetaException {
     FileSystem fs = null;
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java
deleted file mode 100644
index 45ed1e7..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.cache;
-
-import java.util.Arrays;
-
-/**
- * byte array with comparator
- */
-public class ByteArrayWrapper {
-  byte[] wrapped;
-
-  ByteArrayWrapper(byte[] b) {
-    wrapped = b;
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (other instanceof ByteArrayWrapper) {
-      return Arrays.equals(((ByteArrayWrapper)other).wrapped, wrapped);
-    } else {
-      return false;
-    }
-  }
-
-  @Override
-  public int hashCode() {
-    return Arrays.hashCode(wrapped);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
deleted file mode 100644
index b438479..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.cache;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.cache.CachedStore.PartitionWrapper;
-import org.apache.hadoop.hive.metastore.cache.CachedStore.TableWrapper;
-import org.apache.hive.common.util.HiveStringUtils;
-
-public class CacheUtils {
-  private static final String delimit = "\u0001";
-
-  public static String buildKey(String dbName, String tableName) {
-    return dbName + delimit + tableName;
-  }
-
-  public static String buildKey(String dbName, String tableName, List<String> partVals) {
-    String key = buildKey(dbName, tableName);
-    if (partVals == null || partVals.size() == 0) {
-      return key;
-    }
-    for (int i = 0; i < partVals.size(); i++) {
-      key += partVals.get(i);
-      if (i != partVals.size() - 1) {
-        key += delimit;
-      }
-    }
-    return key;
-  }
-
-  public static String buildKey(String dbName, String tableName, List<String> partVals, String colName) {
-    String key = buildKey(dbName, tableName, partVals);
-    return key + delimit + colName;
-  }
-
-  public static Table assemble(TableWrapper wrapper) {
-    Table t = wrapper.getTable().deepCopy();
-    if (wrapper.getSdHash()!=null) {
-      StorageDescriptor sdCopy = SharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy();
-      if (sdCopy.getBucketCols()==null) {
-        sdCopy.setBucketCols(new ArrayList<String>());
-      }
-      if (sdCopy.getSortCols()==null) {
-        sdCopy.setSortCols(new ArrayList<Order>());
-      }
-      if (sdCopy.getSkewedInfo()==null) {
-        sdCopy.setSkewedInfo(new SkewedInfo(new ArrayList<String>(),
-            new ArrayList<List<String>>(), new HashMap<List<String>,String>()));
-      }
-      sdCopy.setLocation(wrapper.getLocation());
-      sdCopy.setParameters(wrapper.getParameters());
-      t.setSd(sdCopy);
-    }
-    return t;
-  }
-
-  public static Partition assemble(PartitionWrapper wrapper) {
-    Partition p = wrapper.getPartition().deepCopy();
-    if (wrapper.getSdHash()!=null) {
-      StorageDescriptor sdCopy = SharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy();
-      if (sdCopy.getBucketCols()==null) {
-        sdCopy.setBucketCols(new ArrayList<String>());
-      }
-      if (sdCopy.getSortCols()==null) {
-        sdCopy.setSortCols(new ArrayList<Order>());
-      }
-      if (sdCopy.getSkewedInfo()==null) {
-        sdCopy.setSkewedInfo(new SkewedInfo(new ArrayList<String>(),
-            new ArrayList<List<String>>(), new HashMap<List<String>,String>()));
-      }
-      sdCopy.setLocation(wrapper.getLocation());
-      sdCopy.setParameters(wrapper.getParameters());
-      p.setSd(sdCopy);
-    }
-    return p;
-  }
-
-  public static boolean matches(String name, String pattern) {
-    String[] subpatterns = pattern.trim().split("\\|");
-    for (String subpattern : subpatterns) {
-      subpattern = "(?i)" + subpattern.replaceAll("\\?", ".{1}").replaceAll("\\*", ".*")
-          .replaceAll("\\^", "\\\\^").replaceAll("\\$", "\\\\$");;
-      if (Pattern.matches(subpattern, HiveStringUtils.normalizeIdentifier(name))) {
-        return true;
-      }
-    }
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
deleted file mode 100644
index 85ea619..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ /dev/null
@@ -1,1622 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.cache;
-
-import java.nio.ByteBuffer;
-import java.util.*;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.FileMetadataHandler;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.ObjectStore;
-import org.apache.hadoop.hive.metastore.PartFilterExprUtil;
-import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
-import org.apache.hadoop.hive.metastore.RawStore;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Date;
-import org.apache.hadoop.hive.metastore.api.DateColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.Decimal;
-import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.model.MTableWrite;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.apache.hive.common.util.HiveStringUtils;
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-
-// TODO filter->expr
-// TODO functionCache
-// TODO constraintCache
-// TODO need sd nested copy?
-// TODO String intern
-// TODO restructure HBaseStore
-// TODO monitor event queue
-// TODO initial load slow?
-// TODO size estimation
-// TODO factor in extrapolation logic (using partitions found) during aggregate stats calculation
-// TODO factor in NDV estimation (density based estimation) logic when merging NDVs from 2 colStats object
-// TODO refactor to use same common code with StatObjectConverter (for merging 2 col stats objects)
-
-public class CachedStore implements RawStore, Configurable {
-  private static ScheduledExecutorService cacheUpdateMaster = null;
-  private static AtomicReference<Thread> runningMasterThread = new AtomicReference<Thread>(null);
-  RawStore rawStore;
-  Configuration conf;
-  private PartitionExpressionProxy expressionProxy = null;
-  static boolean firstTime = true;
-
-  static final private Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName());
-
-  static class TableWrapper {
-    Table t;
-    String location;
-    Map<String, String> parameters;
-    byte[] sdHash;
-    TableWrapper(Table t, byte[] sdHash, String location, Map<String, String> parameters) {
-      this.t = t;
-      this.sdHash = sdHash;
-      this.location = location;
-      this.parameters = parameters;
-    }
-    public Table getTable() {
-      return t;
-    }
-    public byte[] getSdHash() {
-      return sdHash;
-    }
-    public String getLocation() {
-      return location;
-    }
-    public Map<String, String> getParameters() {
-      return parameters;
-    }
-  }
-
-  static class PartitionWrapper {
-    Partition p;
-    String location;
-    Map<String, String> parameters;
-    byte[] sdHash;
-    PartitionWrapper(Partition p, byte[] sdHash, String location, Map<String, String> parameters) {
-      this.p = p;
-      this.sdHash = sdHash;
-      this.location = location;
-      this.parameters = parameters;
-    }
-    public Partition getPartition() {
-      return p;
-    }
-    public byte[] getSdHash() {
-      return sdHash;
-    }
-    public String getLocation() {
-      return location;
-    }
-    public Map<String, String> getParameters() {
-      return parameters;
-    }
-  }
-
-  static class StorageDescriptorWrapper {
-    StorageDescriptor sd;
-    int refCount = 0;
-    StorageDescriptorWrapper(StorageDescriptor sd, int refCount) {
-      this.sd = sd;
-      this.refCount = refCount;
-    }
-    public StorageDescriptor getSd() {
-      return sd;
-    }
-    public int getRefCount() {
-      return refCount;
-    }
-  }
-
-  public CachedStore() {
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    String rawStoreClassName = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CACHED_RAW_STORE_IMPL,
-        ObjectStore.class.getName());
-    try {
-      rawStore = ((Class<? extends RawStore>) MetaStoreUtils.getClass(
-          rawStoreClassName)).newInstance();
-    } catch (Exception e) {
-      throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
-    }
-    rawStore.setConf(conf);
-    Configuration oldConf = this.conf;
-    this.conf = conf;
-    if (expressionProxy != null && conf != oldConf) {
-      LOG.warn("Unexpected setConf when we were already configured");
-    }
-    if (expressionProxy == null || conf != oldConf) {
-      expressionProxy = PartFilterExprUtil.createExpressionProxy(conf);
-    }
-    if (firstTime) {
-      try {
-        LOG.info("Prewarming CachedStore");
-        prewarm();
-        LOG.info("CachedStore initialized");
-      } catch (Exception e) {
-        throw new RuntimeException(e);
-      }
-      firstTime = false;
-    }
-  }
-
-  private void prewarm() throws Exception {
-    List<String> dbNames = rawStore.getAllDatabases();
-    for (String dbName : dbNames) {
-      Database db = rawStore.getDatabase(dbName);
-      SharedCache.addDatabaseToCache(HiveStringUtils.normalizeIdentifier(dbName), db);
-      List<String> tblNames = rawStore.getAllTables(dbName);
-      for (String tblName : tblNames) {
-        Table table = rawStore.getTable(dbName, tblName);
-        SharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(dbName),
-            HiveStringUtils.normalizeIdentifier(tblName), table);
-        List<Partition> partitions = rawStore.getPartitions(dbName, tblName, Integer.MAX_VALUE);
-        for (Partition partition : partitions) {
-          SharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName),
-              HiveStringUtils.normalizeIdentifier(tblName), partition);
-        }
-        Map<String, ColumnStatisticsObj> aggrStatsPerPartition = rawStore
-            .getAggrColStatsForTablePartitions(dbName, tblName);
-        SharedCache.addPartitionColStatsToCache(aggrStatsPerPartition);
-      }
-    }
-    // Start the cache update master-worker threads
-    startCacheUpdateService();
-  }
-
-  private synchronized void startCacheUpdateService() {
-    if (cacheUpdateMaster == null) {
-      cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() {
-        public Thread newThread(Runnable r) {
-          Thread t = Executors.defaultThreadFactory().newThread(r);
-          t.setDaemon(true);
-          return t;
-        }
-      });
-      cacheUpdateMaster.scheduleAtFixedRate(new CacheUpdateMasterWork(this), 0, HiveConf
-          .getTimeVar(conf, HiveConf.ConfVars.METASTORE_CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY,
-              TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
-    }
-  }
-
-  static class CacheUpdateMasterWork implements Runnable {
-
-    private CachedStore cachedStore;
-
-    public CacheUpdateMasterWork(CachedStore cachedStore) {
-      this.cachedStore = cachedStore;
-    }
-
-    @Override
-    public void run() {
-      runningMasterThread.set(Thread.currentThread());
-      RawStore rawStore = cachedStore.getRawStore();
-      try {
-        List<String> dbNames = rawStore.getAllDatabases();
-        // Update the database in cache
-        if (!updateDatabases(rawStore, dbNames)) {
-          return;
-        }
-        // Update the tables and their partitions in cache
-        if (!updateTables(rawStore, dbNames)) {
-          return;
-        }
-      } catch (MetaException e) {
-        LOG.error("Updating CachedStore: error getting database names", e);
-      }
-    }
-
-    private boolean updateDatabases(RawStore rawStore, List<String> dbNames) {
-      if (dbNames != null) {
-        List<Database> databases = new ArrayList<Database>();
-        for (String dbName : dbNames) {
-          // If a preemption of this thread was requested, simply return before proceeding
-          if (Thread.interrupted()) {
-            return false;
-          }
-          Database db;
-          try {
-            db = rawStore.getDatabase(dbName);
-            databases.add(db);
-          } catch (NoSuchObjectException e) {
-            LOG.info("Updating CachedStore: database - " + dbName + " does not exist.", e);
-          }
-        }
-        // Update the cached database objects
-        SharedCache.refreshDatabases(databases);
-      }
-      return true;
-    }
-
-    private boolean updateTables(RawStore rawStore, List<String> dbNames) {
-      if (dbNames != null) {
-        List<Table> tables = new ArrayList<Table>();
-        for (String dbName : dbNames) {
-          try {
-            List<String> tblNames = rawStore.getAllTables(dbName);
-            for (String tblName : tblNames) {
-              // If a preemption of this thread was requested, simply return before proceeding
-              if (Thread.interrupted()) {
-                return false;
-              }
-              Table table = rawStore.getTable(dbName, tblName);
-              tables.add(table);
-            }
-            // Update the cached database objects
-            SharedCache.refreshTables(dbName, tables);
-            for (String tblName : tblNames) {
-              // If a preemption of this thread was requested, simply return before proceeding
-              if (Thread.interrupted()) {
-                return false;
-              }
-              List<Partition> partitions =
-                  rawStore.getPartitions(dbName, tblName, Integer.MAX_VALUE);
-              SharedCache.refreshPartitions(dbName, tblName, partitions);
-            }
-          } catch (MetaException | NoSuchObjectException e) {
-            LOG.error("Updating CachedStore: unable to read table", e);
-            return false;
-          }
-        }
-      }
-      return true;
-    }
-  }
-
-  // Interrupt the cache update background thread
-  // Fire and forget (the master will respond appropriately when it gets a chance)
-  // All writes to the cache go through synchronized methods, so fire & forget is fine.
-  private void interruptCacheUpdateMaster() {
-    if (runningMasterThread.get() != null) {
-      runningMasterThread.get().interrupt();
-    }
-  }
-
-  @Override
-  public Configuration getConf() {
-    return rawStore.getConf();
-  }
-
-  @Override
-  public void shutdown() {
-    rawStore.shutdown();
-  }
-
-  @Override
-  public boolean openTransaction() {
-    return rawStore.openTransaction();
-  }
-
-  @Override
-  public boolean commitTransaction() {
-    return rawStore.commitTransaction();
-  }
-
-  @Override
-  public Boolean commitTransactionExpectDeadlock() {
-    return null;
-  }
-
-  @Override
-  public void rollbackTransaction() {
-    rawStore.rollbackTransaction();
-  }
-
-  @Override
-  public void createDatabase(Database db)
-      throws InvalidObjectException, MetaException {
-    rawStore.createDatabase(db);
-    interruptCacheUpdateMaster();
-    SharedCache.addDatabaseToCache(HiveStringUtils.normalizeIdentifier(db.getName()), db.deepCopy());
-  }
-
-  @Override
-  public Database getDatabase(String dbName) throws NoSuchObjectException {
-    Database db = SharedCache.getDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbName));
-    if (db == null) {
-      throw new NoSuchObjectException();
-    }
-    return SharedCache.getDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbName));
-  }
-
-  @Override
-  public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException {
-    boolean succ = rawStore.dropDatabase(dbname);
-    if (succ) {
-      interruptCacheUpdateMaster();
-      SharedCache.removeDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbname));
-    }
-    return succ;
-  }
-
-  @Override
-  public boolean alterDatabase(String dbName, Database db)
-      throws NoSuchObjectException, MetaException {
-    boolean succ = rawStore.alterDatabase(dbName, db);
-    if (succ) {
-      interruptCacheUpdateMaster();
-      SharedCache.alterDatabaseInCache(HiveStringUtils.normalizeIdentifier(dbName), db);
-    }
-    return succ;
-  }
-
-  @Override
-  public List<String> getDatabases(String pattern) throws MetaException {
-    List<String> results = new ArrayList<String>();
-    for (String dbName : SharedCache.listCachedDatabases()) {
-      dbName = HiveStringUtils.normalizeIdentifier(dbName);
-      if (CacheUtils.matches(dbName, pattern)) {
-        results.add(dbName);
-      }
-    }
-    return results;
-  }
-
-  @Override
-  public List<String> getAllDatabases() throws MetaException {
-    return SharedCache.listCachedDatabases();
-  }
-
-  @Override
-  public boolean createType(Type type) {
-    return rawStore.createType(type);
-  }
-
-  @Override
-  public Type getType(String typeName) {
-    return rawStore.getType(typeName);
-  }
-
-  @Override
-  public boolean dropType(String typeName) {
-    return rawStore.dropType(typeName);
-  }
-
-  private void validateTableType(Table tbl) {
-    // If the table has property EXTERNAL set, update table type
-    // accordingly
-    String tableType = tbl.getTableType();
-    boolean isExternal = "TRUE".equals(tbl.getParameters().get("EXTERNAL"));
-    if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
-      if (isExternal) {
-        tableType = TableType.EXTERNAL_TABLE.toString();
-      }
-    }
-    if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
-      if (!isExternal) {
-        tableType = TableType.MANAGED_TABLE.toString();
-      }
-    }
-    tbl.setTableType(tableType);
-  }
-
-  @Override
-  public void createTable(Table tbl)
-      throws InvalidObjectException, MetaException {
-    rawStore.createTable(tbl);
-    interruptCacheUpdateMaster();
-    validateTableType(tbl);
-    SharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(tbl.getDbName()),
-        HiveStringUtils.normalizeIdentifier(tbl.getTableName()), tbl);
-  }
-
-  @Override
-  public boolean dropTable(String dbName, String tableName)
-      throws MetaException, NoSuchObjectException, InvalidObjectException,
-      InvalidInputException {
-    boolean succ = rawStore.dropTable(dbName, tableName);
-    if (succ) {
-      interruptCacheUpdateMaster();
-      SharedCache.removeTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-          HiveStringUtils.normalizeIdentifier(tableName));
-    }
-    return succ;
-  }
-
-  @Override
-  public Table getTable(String dbName, String tableName) throws MetaException {
-    Table tbl = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tableName));
-    if (tbl != null) {
-      tbl.unsetPrivileges();
-      tbl.setRewriteEnabled(tbl.isRewriteEnabled());
-    }
-    return tbl;
-  }
-
-  @Override
-  public boolean addPartition(Partition part)
-      throws InvalidObjectException, MetaException {
-    boolean succ = rawStore.addPartition(part);
-    if (succ) {
-      interruptCacheUpdateMaster();
-      SharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(part.getDbName()),
-          HiveStringUtils.normalizeIdentifier(part.getTableName()), part);
-    }
-    return succ;
-  }
-
-  @Override
-  public boolean addPartitions(String dbName, String tblName,
-      List<Partition> parts) throws InvalidObjectException, MetaException {
-    boolean succ = rawStore.addPartitions(dbName, tblName, parts);
-    if (succ) {
-      interruptCacheUpdateMaster();
-      for (Partition part : parts) {
-        SharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName),
-            HiveStringUtils.normalizeIdentifier(tblName), part);
-      }
-    }
-    return succ;
-  }
-
-  @Override
-  public boolean addPartitions(String dbName, String tblName,
-      PartitionSpecProxy partitionSpec, boolean ifNotExists)
-      throws InvalidObjectException, MetaException {
-    boolean succ = rawStore.addPartitions(dbName, tblName, partitionSpec, ifNotExists);
-    if (succ) {
-      interruptCacheUpdateMaster();
-      PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
-      while (iterator.hasNext()) {
-        Partition part = iterator.next();
-        SharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName),
-            HiveStringUtils.normalizeIdentifier(tblName), part);
-      }
-    }
-    return succ;
-  }
-
-  @Override
-  public Partition getPartition(String dbName, String tableName,
-      List<String> part_vals) throws MetaException, NoSuchObjectException {
-    Partition part = SharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tableName), part_vals);
-    if (part != null) {
-      part.unsetPrivileges();
-    }
-    return part;
-  }
-
-  @Override
-  public boolean doesPartitionExist(String dbName, String tableName,
-      List<String> part_vals) throws MetaException, NoSuchObjectException {
-    return SharedCache.existPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tableName), part_vals);
-  }
-
-  @Override
-  public boolean dropPartition(String dbName, String tableName,
-      List<String> part_vals) throws MetaException, NoSuchObjectException,
-      InvalidObjectException, InvalidInputException {
-    boolean succ = rawStore.dropPartition(dbName, tableName, part_vals);
-    if (succ) {
-      interruptCacheUpdateMaster();
-      SharedCache.removePartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-          HiveStringUtils.normalizeIdentifier(tableName), part_vals);
-    }
-    return succ;
-  }
-
-  @Override
-  public List<Partition> getPartitions(String dbName, String tableName, int max)
-      throws MetaException, NoSuchObjectException {
-    List<Partition> parts = SharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tableName), max);
-    if (parts != null) {
-      for (Partition part : parts) {
-        part.unsetPrivileges();
-      }
-    }
-    return parts;
-  }
-
-  @Override
-  public void alterTable(String dbName, String tblName, Table newTable)
-      throws InvalidObjectException, MetaException {
-    rawStore.alterTable(dbName, tblName, newTable);
-    interruptCacheUpdateMaster();
-    validateTableType(newTable);
-    SharedCache.alterTableInCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName), newTable);
-  }
-
-  @Override
-  public List<String> getTables(String dbName, String pattern)
-      throws MetaException {
-    List<String> tableNames = new ArrayList<String>();
-    for (Table table : SharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) {
-      if (CacheUtils.matches(table.getTableName(), pattern)) {
-        tableNames.add(table.getTableName());
-      }
-    }
-    return tableNames;
-  }
-
-  @Override
-  public List<String> getTables(String dbName, String pattern,
-      TableType tableType) throws MetaException {
-    List<String> tableNames = new ArrayList<String>();
-    for (Table table : SharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) {
-      if (CacheUtils.matches(table.getTableName(), pattern) &&
-          table.getTableType().equals(tableType.toString())) {
-        tableNames.add(table.getTableName());
-      }
-    }
-    return tableNames;
-  }
-
-  @Override
-  public List<TableMeta> getTableMeta(String dbNames, String tableNames,
-      List<String> tableTypes) throws MetaException {
-    return SharedCache.getTableMeta(HiveStringUtils.normalizeIdentifier(dbNames),
-        HiveStringUtils.normalizeIdentifier(tableNames), tableTypes);
-  }
-
-  @Override
-  public List<Table> getTableObjectsByName(String dbName,
-      List<String> tblNames) throws MetaException, UnknownDBException {
-    List<Table> tables = new ArrayList<Table>();
-    for (String tblName : tblNames) {
-      tables.add(SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-          HiveStringUtils.normalizeIdentifier(tblName)));
-    }
-    return tables;
-  }
-
-  @Override
-  public List<String> getAllTables(String dbName) throws MetaException {
-    List<String> tblNames = new ArrayList<String>();
-    for (Table tbl : SharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) {
-      tblNames.add(HiveStringUtils.normalizeIdentifier(tbl.getTableName()));
-    }
-    return tblNames;
-  }
-
-  @Override
-  public List<String> listTableNamesByFilter(String dbName, String filter,
-      short max_tables) throws MetaException, UnknownDBException {
-    List<String> tableNames = new ArrayList<String>();
-    int count = 0;
-    for (Table table : SharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) {
-      if (CacheUtils.matches(table.getTableName(), filter)
-          && (max_tables == -1 || count < max_tables)) {
-        tableNames.add(table.getTableName());
-        count++;
-      }
-    }
-    return tableNames;
-  }
-
-  @Override
-  public List<String> listPartitionNames(String dbName, String tblName,
-      short max_parts) throws MetaException {
-    List<String> partitionNames = new ArrayList<String>();
-    Table t = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName));
-    int count = 0;
-    for (Partition part : SharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName), max_parts)) {
-      if (max_parts == -1 || count < max_parts) {
-        partitionNames.add(Warehouse.makePartName(t.getPartitionKeys(), part.getValues()));
-      }
-    }
-    return partitionNames;
-  }
-
-  @Override
-  public List<String> listPartitionNamesByFilter(String db_name,
-      String tbl_name, String filter, short max_parts) throws MetaException {
-    // TODO Translate filter -> expr
-    return null;
-  }
-
-  @Override
-  public void alterPartition(String dbName, String tblName,
-      List<String> partVals, Partition newPart)
-      throws InvalidObjectException, MetaException {
-    rawStore.alterPartition(dbName, tblName, partVals, newPart);
-    interruptCacheUpdateMaster();
-    SharedCache.alterPartitionInCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart);
-  }
-
-  @Override
-  public void alterPartitions(String dbName, String tblName,
-      List<List<String>> partValsList, List<Partition> newParts)
-      throws InvalidObjectException, MetaException {
-    rawStore.alterPartitions(dbName, tblName, partValsList, newParts);
-    interruptCacheUpdateMaster();
-    for (int i=0;i<partValsList.size();i++) {
-      List<String> partVals = partValsList.get(i);
-      Partition newPart = newParts.get(i);
-      SharedCache.alterPartitionInCache(HiveStringUtils.normalizeIdentifier(dbName),
-          HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart);
-    }
-  }
-
-  @Override
-  public boolean addIndex(Index index)
-      throws InvalidObjectException, MetaException {
-    return rawStore.addIndex(index);
-  }
-
-  @Override
-  public Index getIndex(String dbName, String origTableName, String indexName)
-      throws MetaException {
-    return rawStore.getIndex(dbName, origTableName, indexName);
-  }
-
-  @Override
-  public boolean dropIndex(String dbName, String origTableName,
-      String indexName) throws MetaException {
-    return rawStore.dropIndex(dbName, origTableName, indexName);
-  }
-
-  @Override
-  public List<Index> getIndexes(String dbName, String origTableName, int max)
-      throws MetaException {
-    return rawStore.getIndexes(dbName, origTableName, max);
-  }
-
-  @Override
-  public List<String> listIndexNames(String dbName, String origTableName,
-      short max) throws MetaException {
-    return rawStore.listIndexNames(dbName, origTableName, max);
-  }
-
-  @Override
-  public void alterIndex(String dbname, String baseTblName, String name,
-      Index newIndex) throws InvalidObjectException, MetaException {
-    rawStore.alterIndex(dbname, baseTblName, name, newIndex);
-  }
-
-  private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr,
-      String defaultPartName, short maxParts, List<String> result) throws MetaException, NoSuchObjectException {
-    List<Partition> parts = SharedCache.listCachedPartitions(
-        HiveStringUtils.normalizeIdentifier(table.getDbName()),
-        HiveStringUtils.normalizeIdentifier(table.getTableName()), maxParts);
-    for (Partition part : parts) {
-      result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
-    }
-    List<String> columnNames = new ArrayList<String>();
-    List<PrimitiveTypeInfo> typeInfos = new ArrayList<PrimitiveTypeInfo>();
-    for (FieldSchema fs : table.getPartitionKeys()) {
-      columnNames.add(fs.getName());
-      typeInfos.add(TypeInfoFactory.getPrimitiveTypeInfo(fs.getType()));
-    }
-    if (defaultPartName == null || defaultPartName.isEmpty()) {
-      defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME);
-    }
-    return expressionProxy.filterPartitionsByExpr(
-        columnNames, typeInfos, expr, defaultPartName, result);
-  }
-
-  @Override
-  public List<Partition> getPartitionsByFilter(String dbName, String tblName,
-      String filter, short maxParts)
-      throws MetaException, NoSuchObjectException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
-  public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
-      String defaultPartitionName, short maxParts, List<Partition> result)
-      throws TException {
-    List<String> partNames = new LinkedList<String>();
-    Table table = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tblName));
-    boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(
-        table, expr, defaultPartitionName, maxParts, partNames);
-    for (String partName : partNames) {
-      Partition part = SharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-          HiveStringUtils.normalizeIdentifier(tblName), partNameToVals(partName));
-      result.add(part);
-    }
-    return hasUnknownPartitions;
-  }
-
-  @Override
-  public int getNumPartitionsByFilter(String dbName, String tblName,
-      String filter) throws MetaException, NoSuchObjectException {
-    Table table = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName));
-    // TODO filter -> expr
-    return 0;
-  }
-
-  @Override
-  public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr)
-      throws MetaException, NoSuchObjectException {
-    String defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME);
-    List<String> partNames = new LinkedList<String>();
-    Table table = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName));
-    getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames);
-    return partNames.size();
-  }
-
-  public static List<String> partNameToVals(String name) {
-    if (name == null) return null;
-    List<String> vals = new ArrayList<String>();
-    String[] kvp = name.split("/");
-    for (String kv : kvp) {
-      vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1)));
-    }
-    return vals;
-  }
-
-  @Override
-  public List<Partition> getPartitionsByNames(String dbName, String tblName,
-      List<String> partNames) throws MetaException, NoSuchObjectException {
-    List<Partition> partitions = new ArrayList<Partition>();
-    for (String partName : partNames) {
-      Partition part = SharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-          HiveStringUtils.normalizeIdentifier(tblName), partNameToVals(partName));
-      if (part!=null) {
-        partitions.add(part);
-      }
-    }
-    return partitions;
-  }
-
-  @Override
-  public Table markPartitionForEvent(String dbName, String tblName,
-      Map<String, String> partVals, PartitionEventType evtType)
-      throws MetaException, UnknownTableException, InvalidPartitionException,
-      UnknownPartitionException {
-    return rawStore.markPartitionForEvent(dbName, tblName, partVals, evtType);
-  }
-
-  @Override
-  public boolean isPartitionMarkedForEvent(String dbName, String tblName,
-      Map<String, String> partName, PartitionEventType evtType)
-      throws MetaException, UnknownTableException, InvalidPartitionException,
-      UnknownPartitionException {
-    return rawStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType);
-  }
-
-  @Override
-  public boolean addRole(String rowName, String ownerName)
-      throws InvalidObjectException, MetaException, NoSuchObjectException {
-    return rawStore.addRole(rowName, ownerName);
-  }
-
-  @Override
-  public boolean removeRole(String roleName)
-      throws MetaException, NoSuchObjectException {
-    return rawStore.removeRole(roleName);
-  }
-
-  @Override
-  public boolean grantRole(Role role, String userName,
-      PrincipalType principalType, String grantor, PrincipalType grantorType,
-      boolean grantOption)
-      throws MetaException, NoSuchObjectException, InvalidObjectException {
-    return rawStore.grantRole(role, userName, principalType, grantor, grantorType, grantOption);
-  }
-
-  @Override
-  public boolean revokeRole(Role role, String userName,
-      PrincipalType principalType, boolean grantOption)
-      throws MetaException, NoSuchObjectException {
-    return rawStore.revokeRole(role, userName, principalType, grantOption);
-  }
-
-  @Override
-  public PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
-      List<String> groupNames) throws InvalidObjectException, MetaException {
-    return rawStore.getUserPrivilegeSet(userName, groupNames);
-  }
-
-  @Override
-  public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName,
-      List<String> groupNames) throws InvalidObjectException, MetaException {
-    return rawStore.getDBPrivilegeSet(dbName, userName, groupNames);
-  }
-
-  @Override
-  public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName,
-      String tableName, String userName, List<String> groupNames)
-      throws InvalidObjectException, MetaException {
-    return rawStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames);
-  }
-
-  @Override
-  public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName,
-      String tableName, String partition, String userName,
-      List<String> groupNames) throws InvalidObjectException, MetaException {
-    return rawStore.getPartitionPrivilegeSet(dbName, tableName, partition, userName, groupNames);
-  }
-
-  @Override
-  public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName,
-      String tableName, String partitionName, String columnName,
-      String userName, List<String> groupNames)
-      throws InvalidObjectException, MetaException {
-    return rawStore.getColumnPrivilegeSet(dbName, tableName, partitionName, columnName, userName, groupNames);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPrincipalGlobalGrants(
-      String principalName, PrincipalType principalType) {
-    return rawStore.listPrincipalGlobalGrants(principalName, principalType);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
-      PrincipalType principalType, String dbName) {
-    return rawStore.listPrincipalDBGrants(principalName, principalType, dbName);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
-      PrincipalType principalType, String dbName, String tableName) {
-    return rawStore.listAllTableGrants(principalName, principalType, dbName, tableName);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPrincipalPartitionGrants(
-      String principalName, PrincipalType principalType, String dbName,
-      String tableName, List<String> partValues, String partName) {
-    return rawStore.listPrincipalPartitionGrants(principalName, principalType, dbName, tableName, partValues, partName);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(
-      String principalName, PrincipalType principalType, String dbName,
-      String tableName, String columnName) {
-    return rawStore.listPrincipalTableColumnGrants(principalName, principalType, dbName, tableName, columnName);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
-      String principalName, PrincipalType principalType, String dbName,
-      String tableName, List<String> partValues, String partName,
-      String columnName) {
-    return rawStore.listPrincipalPartitionColumnGrants(principalName, principalType, dbName, tableName, partValues, partName, columnName);
-  }
-
-  @Override
-  public boolean grantPrivileges(PrivilegeBag privileges)
-      throws InvalidObjectException, MetaException, NoSuchObjectException {
-    return rawStore.grantPrivileges(privileges);
-  }
-
-  @Override
-  public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
-      throws InvalidObjectException, MetaException, NoSuchObjectException {
-    return rawStore.revokePrivileges(privileges, grantOption);
-  }
-
-  @Override
-  public Role getRole(String roleName) throws NoSuchObjectException {
-    return rawStore.getRole(roleName);
-  }
-
-  @Override
-  public List<String> listRoleNames() {
-    return rawStore.listRoleNames();
-  }
-
-  @Override
-  public List<Role> listRoles(String principalName,
-      PrincipalType principalType) {
-    return rawStore.listRoles(principalName, principalType);
-  }
-
-  @Override
-  public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
-      PrincipalType principalType) {
-    return rawStore.listRolesWithGrants(principalName, principalType);
-  }
-
-  @Override
-  public List<RolePrincipalGrant> listRoleMembers(String roleName) {
-    return rawStore.listRoleMembers(roleName);
-  }
-
-  @Override
-  public Partition getPartitionWithAuth(String dbName, String tblName,
-      List<String> partVals, String userName, List<String> groupNames)
-      throws MetaException, NoSuchObjectException, InvalidObjectException {
-    Partition p = SharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName), partVals);
-    if (p!=null) {
-      Table t = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-          HiveStringUtils.normalizeIdentifier(tblName));
-      String partName = Warehouse.makePartName(t.getPartitionKeys(), partVals);
-      PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName,
-          userName, groupNames);
-      p.setPrivileges(privs);
-    }
-    return p;
-  }
-
-  @Override
-  public List<Partition> getPartitionsWithAuth(String dbName, String tblName,
-      short maxParts, String userName, List<String> groupNames)
-      throws MetaException, NoSuchObjectException, InvalidObjectException {
-    Table t = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName));
-    List<Partition> partitions = new ArrayList<Partition>();
-    int count = 0;
-    for (Partition part : SharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName), maxParts)) {
-      if (maxParts == -1 || count < maxParts) {
-        String partName = Warehouse.makePartName(t.getPartitionKeys(), part.getValues());
-        PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName,
-            userName, groupNames);
-        part.setPrivileges(privs);
-        partitions.add(part);
-        count++;
-      }
-    }
-    return partitions;
-  }
-
-  @Override
-  public List<String> listPartitionNamesPs(String dbName, String tblName,
-      List<String> partVals, short maxParts)
-      throws MetaException, NoSuchObjectException {
-    List<String> partNames = new ArrayList<String>();
-    int count = 0;
-    Table t = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName));
-    for (Partition part : SharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName), maxParts)) {
-      boolean psMatch = true;
-      for (int i=0;i<partVals.size();i++) {
-        String psVal = partVals.get(i);
-        String partVal = part.getValues().get(i);
-        if (psVal!=null && !psVal.isEmpty() && !psVal.equals(partVal)) {
-          psMatch = false;
-          break;
-        }
-      }
-      if (!psMatch) {
-        break;
-      }
-      if (maxParts == -1 || count < maxParts) {
-        partNames.add(Warehouse.makePartName(t.getPartitionKeys(), part.getValues()));
-        count++;
-      }
-    }
-    return partNames;
-  }
-
-  @Override
-  public List<Partition> listPartitionsPsWithAuth(String dbName,
-      String tblName, List<String> partVals, short maxParts, String userName,
-      List<String> groupNames)
-      throws MetaException, InvalidObjectException, NoSuchObjectException {
-    List<Partition> partitions = new ArrayList<Partition>();
-    Table t = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName));
-    int count = 0;
-    for (Partition part : SharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName),
-        HiveStringUtils.normalizeIdentifier(tblName), maxParts)) {
-      boolean psMatch = true;
-      for (int i=0;i<partVals.size();i++) {
-        String psVal = partVals.get(i);
-        String partVal = part.getValues().get(i);
-        if (psVal!=null && !psVal.isEmpty() && !psVal.equals(partVal)) {
-          psMatch = false;
-          break;
-        }
-      }
-      if (!psMatch) {
-        continue;
-      }
-      if (maxParts == -1 || count < maxParts) {
-        String partName = Warehouse.makePartName(t.getPartitionKeys(), part.getValues());
-        PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName,
-            userName, groupNames);
-        part.setPrivileges(privs);
-        partitions.add(part);
-      }
-    }
-    return partitions;
-  }
-
-  @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics colStats)
-      throws NoSuchObjectException, MetaException, InvalidObjectException,
-      InvalidInputException {
-    boolean succ = rawStore.updateTableColumnStatistics(colStats);
-    if (succ) {
-      SharedCache.updateTableColumnStatistics(HiveStringUtils.normalizeIdentifier(colStats.getStatsDesc().getDbName()),
-          HiveStringUtils.normalizeIdentifier(colStats.getStatsDesc().getTableName()), colStats.getStatsObj());
-    }
-    return succ;
-  }
-
-  @Override
-  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats,
-      List<String> partVals) throws NoSuchObjectException, MetaException,
-      InvalidObjectException, InvalidInputException {
-    boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals);
-    if (succ) {
-      SharedCache.updatePartitionColumnStatistics(HiveStringUtils.normalizeIdentifier(colStats.getStatsDesc().getDbName()),
-          HiveStringUtils.normalizeIdentifier(colStats.getStatsDesc().getTableName()), partVals, colStats.getStatsObj());
-    }
-    return succ;
-  }
-
-  @Override
-  public ColumnStatistics getTableColumnStatistics(String dbName,
-      String tableName, List<String> colName)
-      throws MetaException, NoSuchObjectException {
-    return rawStore.getTableColumnStatistics(dbName, tableName, colName);
-  }
-
-  @Override
-  public List<ColumnStatistics> getPartitionColumnStatistics(String dbName,
-      String tblName, List<String> partNames, List<String> colNames)
-      throws MetaException, NoSuchObjectException {
-    return rawStore.getPartitionColumnStatistics(dbName, tblName, partNames, colNames);
-  }
-
-  @Override
-  public boolean deletePartitionColumnStatistics(String dbName,
-      String tableName, String partName, List<String> partVals, String colName)
-      throws NoSuchObjectException, MetaException, InvalidObjectException,
-      InvalidInputException {
-    return rawStore.deletePartitionColumnStatistics(dbName, tableName, partName, partVals, colName);
-  }
-
-  @Override
-  public boolean deleteTableColumnStatistics(String dbName, String tableName,
-      String colName) throws NoSuchObjectException, MetaException,
-      InvalidObjectException, InvalidInputException {
-    return rawStore.deleteTableColumnStatistics(dbName, tableName, colName);
-  }
-
-  @Override
-  public long cleanupEvents() {
-    return rawStore.cleanupEvents();
-  }
-
-  @Override
-  public boolean addToken(String tokenIdentifier, String delegationToken) {
-    return rawStore.addToken(tokenIdentifier, delegationToken);
-  }
-
-  @Override
-  public boolean removeToken(String tokenIdentifier) {
-    return rawStore.removeToken(tokenIdentifier);
-  }
-
-  @Override
-  public String getToken(String tokenIdentifier) {
-    return rawStore.getToken(tokenIdentifier);
-  }
-
-  @Override
-  public List<String> getAllTokenIdentifiers() {
-    return rawStore.getAllTokenIdentifiers();
-  }
-
-  @Override
-  public int addMasterKey(String key) throws MetaException {
-    return rawStore.addMasterKey(key);
-  }
-
-  @Override
-  public void updateMasterKey(Integer seqNo, String key)
-      throws NoSuchObjectException, MetaException {
-    rawStore.updateMasterKey(seqNo, key);
-  }
-
-  @Override
-  public boolean removeMasterKey(Integer keySeq) {
-    return rawStore.removeMasterKey(keySeq);
-  }
-
-  @Override
-  public String[] getMasterKeys() {
-    return rawStore.getMasterKeys();
-  }
-
-  @Override
-  public void verifySchema() throws MetaException {
-    rawStore.verifySchema();
-  }
-
-  @Override
-  public String getMetaStoreSchemaVersion() throws MetaException {
-    return rawStore.getMetaStoreSchemaVersion();
-  }
-
-  @Override
-  public void setMetaStoreSchemaVersion(String version, String comment)
-      throws MetaException {
-    rawStore.setMetaStoreSchemaVersion(version, comment);
-  }
-
-  @Override
-  public void dropPartitions(String dbName, String tblName,
-      List<String> partNames) throws MetaException, NoSuchObjectException {
-    rawStore.dropPartitions(dbName, tblName, partNames);
-    interruptCacheUpdateMaster();
-    for (String partName : partNames) {
-      List<String> vals = partNameToVals(partName);
-      SharedCache.removePartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
-          HiveStringUtils.normalizeIdentifier(tblName), vals);
-    }
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
-      String principalName, PrincipalType principalType) {
-    return rawStore.listPrincipalDBGrantsAll(principalName, principalType);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
-      String principalName, PrincipalType principalType) {
-    return rawStore.listPrincipalTableGrantsAll(principalName, principalType);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
-      String principalName, PrincipalType principalType) {
-    return rawStore.listPrincipalPartitionGrantsAll(principalName, principalType);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
-      String principalName, PrincipalType principalType) {
-    return rawStore.listPrincipalTableColumnGrantsAll(principalName, principalType);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
-      String principalName, PrincipalType principalType) {
-    return rawStore.listPrincipalPartitionColumnGrantsAll(principalName, principalType);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listGlobalGrantsAll() {
-    return rawStore.listGlobalGrantsAll();
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listDBGrantsAll(String dbName) {
-    return rawStore.listDBGrantsAll(dbName);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String dbName,
-      String tableName, String partitionName, String columnName) {
-    return rawStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listTableGrantsAll(String dbName,
-      String tableName) {
-    return rawStore.listTableGrantsAll(dbName, tableName);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listPartitionGrantsAll(String dbName,
-      String tableName, String partitionName) {
-    return rawStore.listPartitionGrantsAll(dbName, tableName, partitionName);
-  }
-
-  @Override
-  public List<HiveObjectPrivilege> listTableColumnGrantsAll(String dbName,
-      String tableName, String columnName) {
-    return rawStore.listTableColumnGrantsAll(dbName, tableName, columnName);
-  }
-
-  @Override
-  public void createFunction(Function func)
-      throws InvalidObjectException, MetaException {
-    // TODO fucntionCache
-    rawStore.createFunction(func);
-  }
-
-  @Override
-  public void alterFunction(String dbName, String funcName,
-      Function newFunction) throws InvalidObjectException, MetaException {
-    // TODO fucntionCache
-    rawStore.alterFunction(dbName, funcName, newFunction);
-  }
-
-  @Override
-  public void dropFunction(String dbName, String funcName) throws MetaException,
-      NoSuchObjectException, InvalidObjectException, InvalidInputException {
-    // TODO fucntionCache
-    rawStore.dropFunction(dbName, funcName);
-  }
-
-  @Override
-  public Function getFunction(String dbName, String funcName)
-      throws MetaException {
-    // TODO fucntionCache
-    return rawStore.getFunction(dbName, funcName);
-  }
-
-  @Override
-  public List<Function> getAllFunctions() throws MetaException {
-    // TODO fucntionCache
-    return rawStore.getAllFunctions();
-  }
-
-  @Override
-  public List<String> getFunctions(String dbName, String pattern)
-      throws MetaException {
-    // TODO fucntionCache
-    return rawStore.getFunctions(dbName, pattern);
-  }
-
-  @Override
-  public AggrStats get_aggr_stats_for(String dbName, String tblName,
-      List<String> partNames, List<String> colNames)
-      throws MetaException, NoSuchObjectException {
-    List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(colNames.size());
-    for (String colName : colNames) {
-      colStats.add(mergeColStatsForPartitions(HiveStringUtils.normalizeIdentifier(dbName),
-          HiveStringUtils.normalizeIdentifier(tblName), partNames, colName));
-    }
-    // TODO: revisit the partitions not found case for extrapolation
-    return new AggrStats(colStats, partNames.size());
-  }
-
-  private ColumnStatisticsObj mergeColStatsForPartitions(String dbName, String tblName,
-      List<String> partNames, String colName) throws MetaException {
-    ColumnStatisticsObj colStats = null;
-    for (String partName : partNames) {
-      String colStatsCacheKey = CacheUtils.buildKey(dbName, tblName, partNameToVals(partName), colName);
-      ColumnStatisticsObj colStatsForPart = SharedCache.getCachedPartitionColStats(
-          colStatsCacheKey);
-      if (colStats == null) {
-        colStats = colStatsForPart;
-      } else {
-        colStats = mergeColStatsObj(colStats, colStatsForPart);
-      }
-    }
-    return colStats;
-  }
-
-  private ColumnStatisticsObj mergeColStatsObj(ColumnStatisticsObj colStats1,
-      ColumnStatisticsObj colStats2) throws MetaException {
-    if ((!colStats1.getColType().equalsIgnoreCase(colStats2.getColType()))
-        && (!colStats1.getColName().equalsIgnoreCase(colStats2.getColName()))) {
-      throw new MetaException("Can't merge column stats for two partitions for different columns.");
-    }
-    ColumnStatisticsData csd = new ColumnStatisticsData();
-    ColumnStatisticsObj cso = new ColumnStatisticsObj(colStats1.getColName(),
-        colStats1.getColType(), csd);
-    ColumnStatisticsData csData1 = colStats1.getStatsData();
-    ColumnStatisticsData csData2 = colStats2.getStatsData();
-    String colType = colStats1.getColType().toLowerCase();
-    if (colType.equals("boolean")) {
-      BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
-      boolStats.setNumFalses(csData1.getBooleanStats().getNumFalses()
-          + csData2.getBooleanStats().getNumFalses());
-      boolStats.setNumTrues(csData1.getBooleanStats().getNumTrues()
-          + csData2.getBooleanStats().getNumTrues());
-      boolStats.setNumNulls(csData1.getBooleanStats().getNumNulls()
-          + csData2.getBooleanStats().getNumNulls());
-      csd.setBooleanStats(boolStats);
-    } else if (colType.equals("string") || colType.startsWith("varchar")
-        || colType.startsWith("char")) {
-      StringColumnStatsData stringStats = new StringColumnStatsData();
-      stringStats.setNumNulls(csData1.getStringStats().getNumNulls()
-          + csData2.getStringStats().getNumNulls());
-      stringStats.setAvgColLen(Math.max(csData1.getStringStats().getAvgColLen(), csData2
-          .getStringStats().getAvgColLen()));
-      stringStats.setMaxColLen(Math.max(csData1.getStringStats().getMaxColLen(), csData2
-          .getStringStats().getMaxColLen()));
-      stringStats.setNumDVs(Math.max(csData1.getStringStats().getNumDVs(), csData2.getStringStats()
-          .getNumDVs()));
-      csd.setStringStats(stringStats);
-    } else if (colType.equals("binary")) {
-      BinaryColumnStatsData binaryStats = new BinaryColumnStatsData();
-      binaryStats.setNumNulls(csData1.getBinaryStats().getNumNulls()
-          + csData2.getBinaryStats().getNumNulls());
-      binaryStats.setAvgColLen(Math.max(csData1.getBinaryStats().getAvgColLen(), csData2
-          .getBinaryStats().getAvgColLen()));
-      binaryStats.setMaxColLen(Math.max(csData1.getBinaryStats().getMaxColLen(), csData2
-          .getBinaryStats().getMaxColLen()));
-      csd.setBinaryStats(binaryStats);
-    } else if (colType.equals("bigint") || colType.equals("int") || colType.equals("smallint")
-        || colType.equals("tinyint") || colType.equals("timestamp")) {
-      LongColumnStatsData longStats = new LongColumnStatsData();
-      longStats.setNumNulls(csData1.getLongStats().getNumNulls()
-          + csData2.getLongStats().getNumNulls());
-      longStats.setHighValue(Math.max(csData1.getLongStats().getHighValue(), csData2.getLongStats()
-          .getHighValue()));
-      longStats.setLowValue(Math.min(csData1.getLongStats().getLowValue(), csData2.getLongStats()
-          .getLowValue()));
-      longStats.setNumDVs(Math.max(csData1.getLongStats().getNumDVs(), csData2.getLongStats()
-          .getNumDVs()));
-      csd.setLongStats(longStats);
-    } else if (colType.equals("date")) {
-      DateColumnStatsData dateStats = new DateColumnStatsData();
-      dateStats.setNumNulls(csData1.getDateStats().getNumNulls()
-          + csData2.getDateStats().getNumNulls());
-      dateStats.setHighValue(new Date(Math.max(csData1.getDateStats().getHighValue()
-          .getDaysSinceEpoch(), csData2.getDateStats().getHighValue().getDaysSinceEpoch())));
-      dateStats.setHighValue(new Date(Math.min(csData1.getDateStats().getLowValue()
-          .getDaysSinceEpoch(), csData2.getDateStats().getLowValue().getDaysSinceEpoch())));
-      dateStats.setNumDVs(Math.max(csData1.getDateStats().getNumDVs(), csData2.getDateStats()
-          .getNumDVs()));
-      csd.setDateStats(dateStats);
-    } else if (colType.equals("double") || colType.equals("float")) {
-      DoubleColumnStatsData doubleStats = new DoubleColumnStatsData();
-      doubleStats.setNumNulls(csData1.getDoubleStats().getNumNulls()
-          + csData2.getDoubleStats().getNumNulls());
-      doubleStats.setHighValue(Math.max(csData1.getDoubleStats().getHighValue(), csData2
-          .getDoubleStats().getHighValue()));
-      doubleStats.setLowValue(Math.min(csData1.getDoubleStats().getLowValue(), csData2
-          .getDoubleStats().getLowValue()));
-      doubleStats.setNumDVs(Math.max(csData1.getDoubleStats().getNumDVs(), csData2.getDoubleStats()
-          .getNumDVs()));
-      csd.setDoubleStats(doubleStats);
-    } else if (colType.startsWith("decimal")) {
-      DecimalColumnStatsData decimalStats = new DecimalColumnStatsData();
-      decimalStats.setNumNulls(csData1.getDecimalStats().getNumNulls()
-          + csData2.getDecimalStats().getNumNulls());
-      Decimal high = (csData1.getDecimalStats().getHighValue()
-          .compareTo(csData2.getDecimalStats().getHighValue()) > 0) ? csData1.getDecimalStats()
-          .getHighValue() : csData2.getDecimalStats().getHighValue();
-      decimalStats.setHighValue(high);
-      Decimal low = (csData1.getDecimalStats().getLowValue()
-          .compareTo(csData2.getDecimalStats().getLowValue()) < 0) ? csData1.getDecimalStats()
-          .getLowValue() : csData2.getDecimalStats().getLowValue();
-      decimalStats.setLowValue(low);
-      decimalStats.setNumDVs(Math.max(csData1.getDecimalStats().getNumDVs(), csData2
-          .getDecimalStats().getNumDVs()));
-      csd.setDecimalStats(decimalStats);
-    }
-    return cso;
-  }
-
-  @Override
-  public NotificationEventResponse getNextNotification(
-      NotificationEventRequest rqst) {
-    return rawStore.getNextNotification(rqst);
-  }
-
-  @Override
-  public void addNotificationEvent(NotificationEvent event) {
-    rawStore.addNotificationEvent(event);
-  }
-
-  @Override
-  public void cleanNotificationEvents(int olderThan) {
-    rawStore.cleanNotificationEvents(olderThan);
-  }
-
-  @Override
-  public CurrentNotificationEventId getCurrentNotificationEventId() {
-    return rawStore.getCurrentNotificationEventId();
-  }
-
-  @Override
-  public void flushCache() {
-    rawStore.flushCache();
-  }
-
-  @Override
-  public ByteBuffer[] getFileMetadata(List<Long> fileIds) throws MetaException {
-    return rawStore.getFileMetadata(fileIds);
-  }
-
-  @Override
-  public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata,
-      FileMetadataExprType type) throws MetaException {
-    rawStore.putFileMetadata(fileIds, metadata, type);
-  }
-
-  @Override
-  public boolean isFileMetadataSupported() {
-    return rawStore.isFileMetadataSupported();
-  }
-
-  @Override
-  public void getFileMetadataByExpr(List<Long> fileIds,
-      FileMetadataExprType type, byte[] expr, ByteBuffer[] metadatas,
-      ByteBuffer[] exprResults, boolean[] eliminated) throws MetaException {
-    rawStore.getFileMetadataByExpr(fileIds, type, expr, metadatas, exprResults, eliminated);
-  }
-
-  @Override
-  public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
-    return rawStore.getFileMetadataHandler(type);
-  }
-
-  @Override
-  public int getTableCount() throws MetaException {
-    return SharedCache.getCachedTableCount();
-  }
-
-  @Override
-  public int getPartitionCount() throws MetaException {
-    return SharedCache.getCachedPartitionCount();
-  }
-
-  @Override
-  public int getDatabaseCount() throws MetaException {
-    return SharedCache.getCachedDatabaseCount();
-  }
-
-  @Override
-  public List<SQLPrimaryKey> getPrimaryKeys(String db_name, String tbl_name)
-      throws MetaException {
-    // TODO constraintCache
-    return rawStore.getPrimaryKeys(db_name, tbl_name);
-  }
-
-  @Override
-  public List<SQLForeignKey> getForeignKeys(String parent_db_name,
-      String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
-      throws MetaException {
-    // TODO constraintCache
-    return rawStore.getForeignKeys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name);
-  }
-
-  @Override
-  public void createTableWithConstraints(Table tbl,
-      List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys)
-      throws InvalidObjectException, MetaException {
-    // TODO constraintCache
-    rawStore.createTableWithConstraints(tbl, primaryKeys, foreignKeys);
-    SharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(tbl.getDbName()),
-        HiveStringUtils.normalizeIdentifier(tbl.getTableName()), tbl);
-  }
-
-  @Override
-  public void dropConstraint(String dbName, String tableName,
-      String constraintName) throws NoSuchObjectException {
-    // TODO constraintCache
-    rawStore.dropConstraint(dbName, tableName, constraintName);
-  }
-
-  @Override
-  public void addPrimaryKeys(List<SQLPrimaryKey> pks)
-      throws InvalidObjectException, MetaException {
-    // TODO constraintCache
-    rawStore.addPrimaryKeys(pks);
-  }
-
-  @Override
-  public void addForeignKeys(List<SQLForeignKey> fks)
-      throws InvalidObjectException, MetaException {
-    // TODO constraintCache
-    rawStore.addForeignKeys(fks);
-  }
-
-  @Override
-  public void updateTableWrite(MTableWrite tw) {
-
-  }
-
-  @Override
-  public MTableWrite getTableWrite(String dbName, String tblName, long writeId) throws MetaException {
-    return null;
-  }
-
-  @Override
-  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
-
-  }
-
-  @Override
-  public List<Long> getTableWriteIds(String dbName, String tblName, long watermarkId, long nextWriteId, char state) throws MetaException {
-    return null;
-  }
-
-  @Override
-  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
-    return null;
-  }
-
-  @Override
-  public List<MTableWrite> getTableWrites(String dbName, String tblName, long from, long to) throws MetaException {
-    return null;
-  }
-
-  @Override
-  public Collection<String> getAllPartitionLocations(String dbName, String tblName) {
-    return null;
-  }
-
-  @Override
-  public void deleteTableWrites(String dbName, String tblName, long from, long to) throws MetaException {
-
-  }
-
-  @Override
-  public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(
-      String dbName, String tableName)
-      throws MetaException, NoSuchObjectException {
-    return rawStore.getAggrColStatsForTablePartitions(dbName, tableName);
-  }
-
-  public RawStore getRawStore() {
-    return rawStore;
-  }
-
-  @VisibleForTesting
-  public void setRawStore(RawStore rawStore) {
-    this.rawStore = rawStore;
-  }
-}


[46/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
new file mode 100644
index 0000000..718791c
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
@@ -0,0 +1,356 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.jsonexplain.tez;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.hadoop.hive.common.jsonexplain.tez.Vertex.VertexType;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+
+public final class Op {
+  public final String name;
+  // tezJsonParser
+  public final TezJsonParser parser;
+  public final String operatorId;
+  public Op parent;
+  public final List<Op> children;
+  public final Map<String, String> attrs;
+  // the jsonObject for this operator
+  public final JSONObject opObject;
+  // the vertex that this operator belongs to
+  public final Vertex vertex;
+  // the vertex that this operator output to
+  public final String outputVertexName;
+  // the Operator type
+  public final OpType type;
+
+  public enum OpType {
+    MAPJOIN, MERGEJOIN, RS, OTHERS
+  };
+
+  public Op(String name, String id, String outputVertexName, List<Op> children,
+      Map<String, String> attrs, JSONObject opObject, Vertex vertex, TezJsonParser tezJsonParser)
+      throws JSONException {
+    super();
+    this.name = name;
+    this.operatorId = id;
+    this.type = deriveOpType(operatorId);
+    this.outputVertexName = outputVertexName;
+    this.children = children;
+    this.attrs = attrs;
+    this.opObject = opObject;
+    this.vertex = vertex;
+    this.parser = tezJsonParser;
+  }
+
+  private OpType deriveOpType(String operatorId) {
+    if (operatorId != null) {
+      if (operatorId.startsWith(OpType.MAPJOIN.toString())) {
+        return OpType.MAPJOIN;
+      } else if (operatorId.startsWith(OpType.MERGEJOIN.toString())) {
+        return OpType.MERGEJOIN;
+      } else if (operatorId.startsWith(OpType.RS.toString())) {
+        return OpType.RS;
+      } else {
+        return OpType.OTHERS;
+      }
+    } else {
+      return OpType.OTHERS;
+    }
+  }
+
+  private void inlineJoinOp() throws Exception {
+    // inline map join operator
+    if (this.type == OpType.MAPJOIN) {
+      JSONObject joinObj = opObject.getJSONObject(this.name);
+      // get the map for posToVertex
+      JSONObject verticeObj = joinObj.getJSONObject("input vertices:");
+      Map<String, Vertex> posToVertex = new LinkedHashMap<>();
+      for (String pos : JSONObject.getNames(verticeObj)) {
+        String vertexName = verticeObj.getString(pos);
+        // update the connection
+        Connection c = null;
+        for (Connection connection : vertex.parentConnections) {
+          if (connection.from.name.equals(vertexName)) {
+            posToVertex.put(pos, connection.from);
+            c = connection;
+            break;
+          }
+        }
+        if (c != null) {
+          parser.addInline(this, c);
+        }
+      }
+      // update the attrs
+      this.attrs.remove("input vertices:");
+      // update the keys to use operator name
+      JSONObject keys = joinObj.getJSONObject("keys:");
+      // find out the vertex for the big table
+      Set<Vertex> parentVertexes = new HashSet<>();
+      for (Connection connection : vertex.parentConnections) {
+        parentVertexes.add(connection.from);
+      }
+      parentVertexes.removeAll(posToVertex.values());
+      Map<String, String> posToOpId = new LinkedHashMap<>();
+      if (keys.length() != 0) {
+        for (String key : JSONObject.getNames(keys)) {
+          // first search from the posToVertex
+          if (posToVertex.containsKey(key)) {
+            Vertex vertex = posToVertex.get(key);
+            if (vertex.rootOps.size() == 1) {
+              posToOpId.put(key, vertex.rootOps.get(0).operatorId);
+            } else if ((vertex.rootOps.size() == 0 && vertex.vertexType == VertexType.UNION)) {
+              posToOpId.put(key, vertex.name);
+            } else {
+              Op singleRSOp = vertex.getSingleRSOp();
+              if (singleRSOp != null) {
+                posToOpId.put(key, singleRSOp.operatorId);
+              } else {
+                throw new Exception(
+                    "There are none or more than one root operators in a single vertex "
+                        + vertex.name
+                        + " when hive explain user is trying to identify the operator id.");
+              }
+            }
+          }
+          // then search from parent
+          else if (parent != null) {
+            posToOpId.put(key, parent.operatorId);
+          }
+          // then assume it is from its own vertex
+          else if (parentVertexes.size() == 1) {
+            Vertex vertex = parentVertexes.iterator().next();
+            parentVertexes.clear();
+            if (vertex.rootOps.size() == 1) {
+              posToOpId.put(key, vertex.rootOps.get(0).operatorId);
+            } else if ((vertex.rootOps.size() == 0 && vertex.vertexType == VertexType.UNION)) {
+              posToOpId.put(key, vertex.name);
+            } else {
+              Op singleRSOp = vertex.getSingleRSOp();
+              if (singleRSOp != null) {
+                posToOpId.put(key, singleRSOp.operatorId);
+              } else {
+                throw new Exception(
+                    "There are none or more than one root operators in a single vertex "
+                        + vertex.name
+                        + " when hive explain user is trying to identify the operator id.");
+              }
+            }
+          }
+          // finally throw an exception
+          else {
+            throw new Exception(
+                "Can not find the source operator on one of the branches of map join.");
+          }
+        }
+      }
+      this.attrs.remove("keys:");
+      StringBuffer sb = new StringBuffer();
+      JSONArray conditionMap = joinObj.getJSONArray("condition map:");
+      for (int index = 0; index < conditionMap.length(); index++) {
+        JSONObject cond = conditionMap.getJSONObject(index);
+        String k = (String) cond.keys().next();
+        JSONObject condObject = new JSONObject((String)cond.get(k));
+        String type = condObject.getString("type");
+        String left = condObject.getString("left");
+        String right = condObject.getString("right");
+        if (keys.length() != 0) {
+          sb.append(posToOpId.get(left) + "." + keys.get(left) + "=" + posToOpId.get(right) + "."
+              + keys.get(right) + "(" + type + "),");
+        } else {
+          // probably a cross product
+          sb.append("(" + type + "),");
+        }
+      }
+      this.attrs.remove("condition map:");
+      this.attrs.put("Conds:", sb.substring(0, sb.length() - 1));
+    }
+    // should be merge join
+    else {
+      Map<String, String> posToOpId = new LinkedHashMap<>();
+      if (vertex.mergeJoinDummyVertexs.size() == 0) {
+        if (vertex.tagToInput.size() != vertex.parentConnections.size()) {
+          throw new Exception("tagToInput size " + vertex.tagToInput.size()
+              + " is different from parentConnections size " + vertex.parentConnections.size());
+        }
+        for (Entry<String, String> entry : vertex.tagToInput.entrySet()) {
+          Connection c = null;
+          for (Connection connection : vertex.parentConnections) {
+            if (connection.from.name.equals(entry.getValue())) {
+              Vertex v = connection.from;
+              if (v.rootOps.size() == 1) {
+                posToOpId.put(entry.getKey(), v.rootOps.get(0).operatorId);
+              } else if ((v.rootOps.size() == 0 && v.vertexType == VertexType.UNION)) {
+                posToOpId.put(entry.getKey(), v.name);
+              } else {
+                Op singleRSOp = v.getSingleRSOp();
+                if (singleRSOp != null) {
+                  posToOpId.put(entry.getKey(), singleRSOp.operatorId);
+                } else {
+                  throw new Exception(
+                      "There are none or more than one root operators in a single vertex " + v.name
+                          + " when hive explain user is trying to identify the operator id.");
+                }
+              }
+              c = connection;
+              break;
+            }
+          }
+          if (c == null) {
+            throw new Exception("Can not find " + entry.getValue()
+                + " while parsing keys of merge join operator");
+          }
+        }
+      } else {
+        posToOpId.put(vertex.tag, this.parent.operatorId);
+        for (Vertex v : vertex.mergeJoinDummyVertexs) {
+          if (v.rootOps.size() != 1) {
+            throw new Exception("Can not find a single root operators in a single vertex " + v.name
+                + " when hive explain user is trying to identify the operator id.");
+          }
+          posToOpId.put(v.tag, v.rootOps.get(0).operatorId);
+        }
+      }
+      JSONObject joinObj = opObject.getJSONObject(this.name);
+      // update the keys to use operator name
+      JSONObject keys = joinObj.getJSONObject("keys:");
+      if (keys.length() != 0) {
+        for (String key : JSONObject.getNames(keys)) {
+          if (!posToOpId.containsKey(key)) {
+            throw new Exception(
+                "Can not find the source operator on one of the branches of merge join.");
+          }
+        }
+        // inline merge join operator in a self-join
+        if (this.vertex != null) {
+          for (Vertex v : this.vertex.mergeJoinDummyVertexs) {
+            parser.addInline(this, new Connection(null, v));
+          }
+        }
+      }
+      // update the attrs
+      this.attrs.remove("keys:");
+      StringBuffer sb = new StringBuffer();
+      JSONArray conditionMap = joinObj.getJSONArray("condition map:");
+      for (int index = 0; index < conditionMap.length(); index++) {
+        JSONObject cond = conditionMap.getJSONObject(index);
+        String k = (String) cond.keys().next();
+        JSONObject condObject = new JSONObject((String)cond.get(k));
+        String type = condObject.getString("type");
+        String left = condObject.getString("left");
+        String right = condObject.getString("right");
+        if (keys.length() != 0) {
+          sb.append(posToOpId.get(left) + "." + keys.get(left) + "=" + posToOpId.get(right) + "."
+              + keys.get(right) + "(" + type + "),");
+        } else {
+          // probably a cross product
+          sb.append("(" + type + "),");
+        }
+      }
+      this.attrs.remove("condition map:");
+      this.attrs.put("Conds:", sb.substring(0, sb.length() - 1));
+    }
+  }
+
+  private String getNameWithOpIdStats() {
+    StringBuffer sb = new StringBuffer();
+    sb.append(TezJsonParserUtils.renameReduceOutputOperator(name, vertex));
+    if (operatorId != null) {
+      sb.append(" [" + operatorId + "]");
+    }
+    if (!TezJsonParserUtils.OperatorNoStats.contains(name) && attrs.containsKey("Statistics:")) {
+      sb.append(" (" + attrs.get("Statistics:") + ")");
+    }
+    attrs.remove("Statistics:");
+    return sb.toString();
+  }
+
+  /**
+   * @param printer
+   * @param indentFlag
+   * @param branchOfJoinOp
+   *          This parameter is used to show if it is a branch of a Join
+   *          operator so that we can decide the corresponding indent.
+   * @throws Exception
+   */
+  public void print(Printer printer, int indentFlag, boolean branchOfJoinOp) throws Exception {
+    // print name
+    if (parser.printSet.contains(this)) {
+      printer.println(TezJsonParser.prefixString(indentFlag) + " Please refer to the previous "
+          + this.getNameWithOpIdStats());
+      return;
+    }
+    parser.printSet.add(this);
+    if (!branchOfJoinOp) {
+      printer.println(TezJsonParser.prefixString(indentFlag) + this.getNameWithOpIdStats());
+    } else {
+      printer.println(TezJsonParser.prefixString(indentFlag, "<-") + this.getNameWithOpIdStats());
+    }
+    branchOfJoinOp = false;
+    // if this operator is a Map Join Operator or a Merge Join Operator
+    if (this.type == OpType.MAPJOIN || this.type == OpType.MERGEJOIN) {
+      inlineJoinOp();
+      branchOfJoinOp = true;
+    }
+    // if this operator is the last operator, we summarize the non-inlined
+    // vertex
+    List<Connection> noninlined = new ArrayList<>();
+    if (this.parent == null) {
+      if (this.vertex != null) {
+        for (Connection connection : this.vertex.parentConnections) {
+          if (!parser.isInline(connection.from)) {
+            noninlined.add(connection);
+          }
+        }
+      }
+    }
+    // print attr
+    indentFlag++;
+    if (!attrs.isEmpty()) {
+      printer.println(TezJsonParser.prefixString(indentFlag)
+          + TezJsonParserUtils.attrsToString(attrs));
+    }
+    // print inline vertex
+    if (parser.inlineMap.containsKey(this)) {
+      for (int index = 0; index < parser.inlineMap.get(this).size(); index++) {
+        Connection connection = parser.inlineMap.get(this).get(index);
+        connection.from.print(printer, indentFlag, connection.type, this.vertex);
+      }
+    }
+    // print parent op, i.e., where data comes from
+    if (this.parent != null) {
+      this.parent.print(printer, indentFlag, branchOfJoinOp);
+    }
+    // print next vertex
+    else {
+      for (int index = 0; index < noninlined.size(); index++) {
+        Vertex v = noninlined.get(index).from;
+        v.print(printer, indentFlag, noninlined.get(index).type, this.vertex);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java
new file mode 100644
index 0000000..d3c91d6
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.jsonexplain.tez;
+
+public final class Printer {
+  public static final String lineSeparator = System.getProperty("line.separator");;
+  private final StringBuilder builder = new StringBuilder();
+
+  public void print(String string) {
+    builder.append(string);
+  }
+
+  public void println(String string) {
+    builder.append(string);
+    builder.append(lineSeparator);
+  }
+
+  public void println() {
+    builder.append(lineSeparator);
+  }
+  
+  public String toString() {
+    return builder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
new file mode 100644
index 0000000..63937f8
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
@@ -0,0 +1,262 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.jsonexplain.tez;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.jsonexplain.tez.Vertex.VertexType;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+
+public final class Stage {
+  //external name is used to show at the console
+  String externalName;
+  //internal name is used to track the stages
+  public final String internalName;
+  //tezJsonParser
+  public final TezJsonParser parser;
+  // upstream stages, e.g., root stage
+  public final List<Stage> parentStages = new ArrayList<>();
+  // downstream stages.
+  public final List<Stage> childStages = new ArrayList<>();
+  public final Map<String, Vertex> vertexs =new LinkedHashMap<>();
+  public final Map<String, String> attrs = new TreeMap<>();
+  Map<Vertex, List<Connection>> tezStageDependency;
+  // some stage may contain only a single operator, e.g., create table operator,
+  // fetch operator.
+  Op op;
+
+  public Stage(String name, TezJsonParser tezJsonParser) {
+    super();
+    internalName = name;
+    externalName = name;
+    parser = tezJsonParser;
+  }
+
+  public void addDependency(JSONObject object, Map<String, Stage> stages) throws JSONException {
+    if (object.has("DEPENDENT STAGES")) {
+      String names = object.getString("DEPENDENT STAGES");
+      for (String name : names.split(",")) {
+        Stage parent = stages.get(name.trim());
+        this.parentStages.add(parent);
+        parent.childStages.add(this);
+      }
+    }
+    if (object.has("CONDITIONAL CHILD TASKS")) {
+      String names = object.getString("CONDITIONAL CHILD TASKS");
+      this.externalName = this.internalName + "(CONDITIONAL CHILD TASKS: " + names + ")";
+      for (String name : names.split(",")) {
+        Stage child = stages.get(name.trim());
+        child.externalName = child.internalName + "(CONDITIONAL)";
+        child.parentStages.add(this);
+        this.childStages.add(child);
+      }
+    }
+  }
+
+  /**
+   * @param object
+   * @throws Exception
+   *           If the object of stage contains "Tez", we need to extract the
+   *           vertices and edges Else we need to directly extract operators
+   *           and/or attributes.
+   */
+  public void extractVertex(JSONObject object) throws Exception {
+    if (object.has("Tez")) {
+      this.tezStageDependency = new TreeMap<>();
+      JSONObject tez = (JSONObject) object.get("Tez");
+      JSONObject vertices = tez.getJSONObject("Vertices:");
+      if (tez.has("Edges:")) {
+        JSONObject edges = tez.getJSONObject("Edges:");
+        // iterate for the first time to get all the vertices
+        for (String to : JSONObject.getNames(edges)) {
+          vertexs.put(to, new Vertex(to, vertices.getJSONObject(to), parser));
+        }
+        // iterate for the second time to get all the vertex dependency
+        for (String to : JSONObject.getNames(edges)) {
+          Object o = edges.get(to);
+          Vertex v = vertexs.get(to);
+          // 1 to 1 mapping
+          if (o instanceof JSONObject) {
+            JSONObject obj = (JSONObject) o;
+            String parent = obj.getString("parent");
+            Vertex parentVertex = vertexs.get(parent);
+            if (parentVertex == null) {
+              parentVertex = new Vertex(parent, vertices.getJSONObject(parent), parser);
+              vertexs.put(parent, parentVertex);
+            }
+            String type = obj.getString("type");
+            // for union vertex, we reverse the dependency relationship
+            if (!"CONTAINS".equals(type)) {
+              v.addDependency(new Connection(type, parentVertex));
+              parentVertex.setType(type);
+              parentVertex.children.add(v);
+            } else {
+              parentVertex.addDependency(new Connection(type, v));
+              v.children.add(parentVertex);
+            }
+            this.tezStageDependency.put(v, Arrays.asList(new Connection(type, parentVertex)));
+          } else {
+            // 1 to many mapping
+            JSONArray from = (JSONArray) o;
+            List<Connection> list = new ArrayList<>();
+            for (int index = 0; index < from.length(); index++) {
+              JSONObject obj = from.getJSONObject(index);
+              String parent = obj.getString("parent");
+              Vertex parentVertex = vertexs.get(parent);
+              if (parentVertex == null) {
+                parentVertex = new Vertex(parent, vertices.getJSONObject(parent), parser);
+                vertexs.put(parent, parentVertex);
+              }
+              String type = obj.getString("type");
+              if (!"CONTAINS".equals(type)) {
+                v.addDependency(new Connection(type, parentVertex));
+                parentVertex.setType(type);
+                parentVertex.children.add(v);
+              } else {
+                parentVertex.addDependency(new Connection(type, v));
+                v.children.add(parentVertex);
+              }
+              list.add(new Connection(type, parentVertex));
+            }
+            this.tezStageDependency.put(v, list);
+          }
+        }
+      } else {
+        for (String vertexName : JSONObject.getNames(vertices)) {
+          vertexs.put(vertexName, new Vertex(vertexName, vertices.getJSONObject(vertexName), parser));
+        }
+      }
+      // The opTree in vertex is extracted
+      for (Vertex v : vertexs.values()) {
+        if (v.vertexType == VertexType.MAP || v.vertexType == VertexType.REDUCE) {
+          v.extractOpTree();
+          v.checkMultiReduceOperator();
+        }
+      }
+    } else {
+      String[] names = JSONObject.getNames(object);
+      if (names != null) {
+        for (String name : names) {
+          if (name.contains("Operator")) {
+            this.op = extractOp(name, object.getJSONObject(name));
+          } else {
+            if (!object.get(name).toString().isEmpty()) {
+              attrs.put(name, object.get(name).toString());
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * @param opName
+   * @param opObj
+   * @return
+   * @throws Exception
+   *           This method address the create table operator, fetch operator,
+   *           etc
+   */
+  Op extractOp(String opName, JSONObject opObj) throws Exception {
+    Map<String, String> attrs = new TreeMap<>();
+    Vertex v = null;
+    if (opObj.length() > 0) {
+      String[] names = JSONObject.getNames(opObj);
+      for (String name : names) {
+        Object o = opObj.get(name);
+        if (isPrintable(o) && !o.toString().isEmpty()) {
+          attrs.put(name, o.toString());
+        } else if (o instanceof JSONObject) {
+          JSONObject attrObj = (JSONObject) o;
+          if (attrObj.length() > 0) {
+            if (name.equals("Processor Tree:")) {
+              JSONObject object = new JSONObject(new LinkedHashMap<>());
+              object.put(name, attrObj);
+              v = new Vertex(null, object, parser);
+              v.extractOpTree();
+            } else {
+              for (String attrName : JSONObject.getNames(attrObj)) {
+                if (!attrObj.get(attrName).toString().isEmpty()) {
+                  attrs.put(attrName, attrObj.get(attrName).toString());
+                }
+              }
+            }
+          }
+        } else {
+          throw new Exception("Unsupported object in " + this.internalName);
+        }
+      }
+    }
+    Op op = new Op(opName, null, null, null, attrs, null, v, parser);
+    if (v != null) {
+      parser.addInline(op, new Connection(null, v));
+    }
+    return op;
+  }
+
+  private boolean isPrintable(Object val) {
+    if (val instanceof Boolean || val instanceof String || val instanceof Integer
+        || val instanceof Long || val instanceof Byte || val instanceof Float
+        || val instanceof Double || val instanceof Path) {
+      return true;
+    }
+    if (val != null && val.getClass().isPrimitive()) {
+      return true;
+    }
+    return false;
+  }
+
+  public void print(Printer printer, int indentFlag) throws Exception {
+    // print stagename
+    if (parser.printSet.contains(this)) {
+      printer.println(TezJsonParser.prefixString(indentFlag) + " Please refer to the previous "
+          + externalName);
+      return;
+    }
+    parser.printSet.add(this);
+    printer.println(TezJsonParser.prefixString(indentFlag) + externalName);
+    // print vertexes
+    indentFlag++;
+    for (Vertex candidate : this.vertexs.values()) {
+      if (!parser.isInline(candidate) && candidate.children.isEmpty()) {
+        candidate.print(printer, indentFlag, null, null);
+      }
+    }
+    if (!attrs.isEmpty()) {
+      printer.println(TezJsonParser.prefixString(indentFlag)
+          + TezJsonParserUtils.attrsToString(attrs));
+    }
+    if (op != null) {
+      op.print(printer, indentFlag, false);
+    }
+    indentFlag++;
+    // print dependent stages
+    for (Stage stage : this.parentStages) {
+      stage.print(printer, indentFlag);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
index 294dc6b..ea86048 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
@@ -18,29 +18,146 @@
 
 package org.apache.hadoop.hive.common.jsonexplain.tez;
 
-import org.apache.hadoop.hive.common.jsonexplain.DagJsonParser;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
 
+import org.apache.hadoop.hive.common.jsonexplain.JsonParser;
+import org.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-public class TezJsonParser extends DagJsonParser {
+public final class TezJsonParser implements JsonParser {
+  public final Map<String, Stage> stages = new LinkedHashMap<>();
+  protected final Logger LOG;
+  // the objects that have been printed.
+  public final Set<Object> printSet = new LinkedHashSet<>();
+  // the vertex that should be inlined. <Operator, list of Vertex that is
+  // inlined>
+  public final Map<Op, List<Connection>> inlineMap = new LinkedHashMap<>();
 
-  @Override
-  public String mapEdgeType(String edgeName) {
-    switch (edgeName) {
-      case "BROADCAST_EDGE":
-        return "BROADCAST";
-      case "SIMPLE_EDGE":
-        return "SHUFFLE";
-      case "CUSTOM_SIMPLE_EDGE":
-        return "PARTITION_ONLY_SHUFFLE";
-      case "CUSTOM_EDGE":
-        return "MULTICAST";
-      default:
-        return "UNKNOWN";
+  public TezJsonParser() {
+    super();
+    LOG = LoggerFactory.getLogger(this.getClass().getName());
+  }
+
+  public void extractStagesAndPlans(JSONObject inputObject) throws Exception {
+    // extract stages
+    JSONObject dependency = inputObject.getJSONObject("STAGE DEPENDENCIES");
+    if (dependency != null && dependency.length() > 0) {
+      // iterate for the first time to get all the names of stages.
+      for (String stageName : JSONObject.getNames(dependency)) {
+        this.stages.put(stageName, new Stage(stageName, this));
+      }
+      // iterate for the second time to get all the dependency.
+      for (String stageName : JSONObject.getNames(dependency)) {
+        JSONObject dependentStageNames = dependency.getJSONObject(stageName);
+        this.stages.get(stageName).addDependency(dependentStageNames, this.stages);
+      }
+    }
+    // extract stage plans
+    JSONObject stagePlans = inputObject.getJSONObject("STAGE PLANS");
+    if (stagePlans != null && stagePlans.length() > 0) {
+      for (String stageName : JSONObject.getNames(stagePlans)) {
+        JSONObject stagePlan = stagePlans.getJSONObject(stageName);
+        this.stages.get(stageName).extractVertex(stagePlan);
+      }
+    }
+  }
+
+  /**
+   * @param indentFlag
+   *          help to generate correct indent
+   * @return
+   */
+  public static String prefixString(int indentFlag) {
+    StringBuilder sb = new StringBuilder();
+    for (int index = 0; index < indentFlag; index++) {
+      sb.append("  ");
+    }
+    return sb.toString();
+  }
+
+  /**
+   * @param indentFlag
+   * @param tail
+   *          help to generate correct indent with a specific tail
+   * @return
+   */
+  public static String prefixString(int indentFlag, String tail) {
+    StringBuilder sb = new StringBuilder();
+    for (int index = 0; index < indentFlag; index++) {
+      sb.append("  ");
     }
+    int len = sb.length();
+    return sb.replace(len - tail.length(), len, tail).toString();
   }
 
   @Override
-  public String getFrameworkName() {
-    return "Tez";
+  public void print(JSONObject inputObject, PrintStream outputStream) throws Exception {
+    LOG.info("JsonParser is parsing:" + inputObject.toString());
+    this.extractStagesAndPlans(inputObject);
+    Printer printer = new Printer();
+    // print out the cbo info
+    if (inputObject.has("cboInfo")) {
+      printer.println(inputObject.getString("cboInfo"));
+      printer.println();
+    }
+    // print out the vertex dependency in root stage
+    for (Stage candidate : this.stages.values()) {
+      if (candidate.tezStageDependency != null && candidate.tezStageDependency.size() > 0) {
+        printer.println("Vertex dependency in root stage");
+        for (Entry<Vertex, List<Connection>> entry : candidate.tezStageDependency.entrySet()) {
+          StringBuilder sb = new StringBuilder();
+          sb.append(entry.getKey().name);
+          sb.append(" <- ");
+          boolean printcomma = false;
+          for (Connection connection : entry.getValue()) {
+            if (printcomma) {
+              sb.append(", ");
+            } else {
+              printcomma = true;
+            }
+            sb.append(connection.from.name + " (" + connection.type + ")");
+          }
+          printer.println(sb.toString());
+        }
+        printer.println();
+      }
+    }
+    // print out all the stages that have no childStages.
+    for (Stage candidate : this.stages.values()) {
+      if (candidate.childStages.isEmpty()) {
+        candidate.print(printer, 0);
+      }
+    }
+    outputStream.println(printer.toString());
+  }
+
+  public void addInline(Op op, Connection connection) {
+    List<Connection> list = inlineMap.get(op);
+    if (list == null) {
+      list = new ArrayList<>();
+      list.add(connection);
+      inlineMap.put(op, list);
+    } else {
+      list.add(connection);
+    }
+  }
+
+  public boolean isInline(Vertex v) {
+    for (List<Connection> list : inlineMap.values()) {
+      for (Connection connection : list) {
+        if (connection.from.equals(v)) {
+          return true;
+        }
+      }
+    }
+    return false;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParserUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParserUtils.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParserUtils.java
new file mode 100644
index 0000000..363a422
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParserUtils.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.jsonexplain.tez;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+
+public class TezJsonParserUtils {
+
+  public static List<String> OperatorNoStats = Arrays.asList(new String[] { "File Output Operator",
+      "Reduce Output Operator" });
+
+  public static String renameReduceOutputOperator(String operatorName, Vertex vertex) {
+    if (operatorName.equals("Reduce Output Operator") && vertex.edgeType != null) {
+      return vertex.edgeType.name();
+    } else {
+      return operatorName;
+    }
+  }
+
+  public static String attrsToString(Map<String, String> attrs) {
+    StringBuffer sb = new StringBuffer();
+    boolean first = true;
+    for (Entry<String, String> entry : attrs.entrySet()) {
+      if (first) {
+        first = false;
+      } else {
+        sb.append(",");
+      }
+      sb.append(entry.getKey() + entry.getValue());
+    }
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
new file mode 100644
index 0000000..3d559bd
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
@@ -0,0 +1,331 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.jsonexplain.tez;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.hadoop.hive.common.jsonexplain.tez.Op.OpType;
+import org.apache.hadoop.util.hash.Hash;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+
+public final class Vertex implements Comparable<Vertex>{
+  public final String name;
+  //tezJsonParser
+  public final TezJsonParser parser;
+  // vertex's parent connections.
+  public final List<Connection> parentConnections = new ArrayList<>();
+  // vertex's children vertex.
+  public final List<Vertex> children = new ArrayList<>();
+  // the jsonObject for this vertex
+  public final JSONObject vertexObject;
+  // whether this vertex is dummy (which does not really exists but is created),
+  // e.g., a dummy vertex for a mergejoin branch
+  public boolean dummy;
+  // the rootOps in this vertex
+  public final List<Op> rootOps = new ArrayList<>();
+  // we create a dummy vertex for a mergejoin branch for a self join if this
+  // vertex is a mergejoin
+  public final List<Vertex> mergeJoinDummyVertexs = new ArrayList<>();
+  // whether this vertex has multiple reduce operators
+  public boolean hasMultiReduceOp = false;
+  // execution mode
+  public String executionMode = "";
+  // tagToInput for reduce work
+  public Map<String, String> tagToInput = new LinkedHashMap<>();
+  // tag
+  public String tag;
+
+  public static enum VertexType {
+    MAP, REDUCE, UNION, UNKNOWN
+  };
+  public VertexType vertexType;
+
+  public static enum EdgeType {
+    BROADCAST, SHUFFLE, MULTICAST, PARTITION_ONLY_SHUFFLE, UNKNOWN
+  };
+  public EdgeType edgeType;
+
+  public Vertex(String name, JSONObject vertexObject, TezJsonParser tezJsonParser) {
+    super();
+    this.name = name;
+    if (this.name != null) {
+      if (this.name.contains("Map")) {
+        this.vertexType = VertexType.MAP;
+      } else if (this.name.contains("Reduce")) {
+        this.vertexType = VertexType.REDUCE;
+      } else if (this.name.contains("Union")) {
+        this.vertexType = VertexType.UNION;
+      } else {
+        this.vertexType = VertexType.UNKNOWN;
+      }
+    } else {
+      this.vertexType = VertexType.UNKNOWN;
+    }
+    this.dummy = false;
+    this.vertexObject = vertexObject;
+    parser = tezJsonParser;
+  }
+
+  public void addDependency(Connection connection) throws JSONException {
+    this.parentConnections.add(connection);
+  }
+
+  /**
+   * @throws JSONException
+   * @throws JsonParseException
+   * @throws JsonMappingException
+   * @throws IOException
+   * @throws Exception
+   *           We assume that there is a single top-level Map Operator Tree or a
+   *           Reduce Operator Tree in a vertex
+   */
+  public void extractOpTree() throws JSONException, JsonParseException, JsonMappingException,
+      IOException, Exception {
+    if (vertexObject.length() != 0) {
+      for (String key : JSONObject.getNames(vertexObject)) {
+        if (key.equals("Map Operator Tree:")) {
+          extractOp(vertexObject.getJSONArray(key).getJSONObject(0));
+        } else if (key.equals("Reduce Operator Tree:") || key.equals("Processor Tree:")) {
+          extractOp(vertexObject.getJSONObject(key));
+        } else if (key.equals("Join:")) {
+          // this is the case when we have a map-side SMB join
+          // one input of the join is treated as a dummy vertex
+          JSONArray array = vertexObject.getJSONArray(key);
+          for (int index = 0; index < array.length(); index++) {
+            JSONObject mpOpTree = array.getJSONObject(index);
+            Vertex v = new Vertex(null, mpOpTree, parser);
+            v.extractOpTree();
+            v.dummy = true;
+            mergeJoinDummyVertexs.add(v);
+          }
+        } else if (key.equals("Merge File Operator")) {
+          JSONObject opTree = vertexObject.getJSONObject(key);
+          if (opTree.has("Map Operator Tree:")) {
+            extractOp(opTree.getJSONArray("Map Operator Tree:").getJSONObject(0));
+          } else {
+            throw new Exception("Merge File Operator does not have a Map Operator Tree");
+          }
+        } else if (key.equals("Execution mode:")) {
+          executionMode = " " + vertexObject.getString(key);
+        } else if (key.equals("tagToInput:")) {
+          JSONObject tagToInput = vertexObject.getJSONObject(key);
+          for (String tag : JSONObject.getNames(tagToInput)) {
+            this.tagToInput.put(tag, (String) tagToInput.get(tag));
+          }
+        } else if (key.equals("tag:")) {
+          this.tag = vertexObject.getString(key);
+        } else {
+          throw new Exception("Unsupported operator tree in vertex " + this.name);
+        }
+      }
+    }
+  }
+
+  /**
+   * @param operator
+   * @param parent
+   * @return
+   * @throws JSONException
+   * @throws JsonParseException
+   * @throws JsonMappingException
+   * @throws IOException
+   * @throws Exception
+   *           assumption: each operator only has one parent but may have many
+   *           children
+   */
+  Op extractOp(JSONObject operator) throws JSONException, JsonParseException, JsonMappingException,
+      IOException, Exception {
+    String[] names = JSONObject.getNames(operator);
+    if (names.length != 1) {
+      throw new Exception("Expect only one operator in " + operator.toString());
+    } else {
+      String opName = names[0];
+      JSONObject attrObj = (JSONObject) operator.get(opName);
+      Map<String, String> attrs = new TreeMap<>();
+      List<Op> children = new ArrayList<>();
+      String id = null;
+      String outputVertexName = null;
+      for (String attrName : JSONObject.getNames(attrObj)) {
+        if (attrName.equals("children")) {
+          Object childrenObj = attrObj.get(attrName);
+          if (childrenObj instanceof JSONObject) {
+            if (((JSONObject) childrenObj).length() != 0) {
+              children.add(extractOp((JSONObject) childrenObj));
+            }
+          } else if (childrenObj instanceof JSONArray) {
+            if (((JSONArray) childrenObj).length() != 0) {
+              JSONArray array = ((JSONArray) childrenObj);
+              for (int index = 0; index < array.length(); index++) {
+                children.add(extractOp(array.getJSONObject(index)));
+              }
+            }
+          } else {
+            throw new Exception("Unsupported operator " + this.name
+                + "'s children operator is neither a jsonobject nor a jsonarray");
+          }
+        } else {
+          if (attrName.equals("OperatorId:")) {
+            id = attrObj.get(attrName).toString();
+          } else if (attrName.equals("outputname:")) {
+            outputVertexName = attrObj.get(attrName).toString();
+          } else {
+            if (!attrObj.get(attrName).toString().isEmpty()) {
+              attrs.put(attrName, attrObj.get(attrName).toString());
+            }
+          }
+        }
+      }
+      Op op = new Op(opName, id, outputVertexName, children, attrs, operator, this, parser);
+      if (!children.isEmpty()) {
+        for (Op child : children) {
+          child.parent = op;
+        }
+      } else {
+        this.rootOps.add(op);
+      }
+      return op;
+    }
+  }
+
+  public void print(Printer printer, int indentFlag, String type, Vertex callingVertex)
+      throws JSONException, Exception {
+    // print vertexname
+    if (parser.printSet.contains(this) && !hasMultiReduceOp) {
+      if (type != null) {
+        printer.println(TezJsonParser.prefixString(indentFlag, "<-")
+            + " Please refer to the previous " + this.name + " [" + type + "]");
+      } else {
+        printer.println(TezJsonParser.prefixString(indentFlag, "<-")
+            + " Please refer to the previous " + this.name);
+      }
+      return;
+    }
+    parser.printSet.add(this);
+    if (type != null) {
+      printer.println(TezJsonParser.prefixString(indentFlag, "<-") + this.name + " [" + type + "]"
+          + this.executionMode);
+    } else if (this.name != null) {
+      printer.println(TezJsonParser.prefixString(indentFlag) + this.name + this.executionMode);
+    }
+    // print operators
+    if (hasMultiReduceOp && !(callingVertex.vertexType == VertexType.UNION)) {
+      // find the right op
+      Op choose = null;
+      for (Op op : this.rootOps) {
+        if (op.outputVertexName.equals(callingVertex.name)) {
+          choose = op;
+        }
+      }
+      if (choose != null) {
+        choose.print(printer, indentFlag, false);
+      } else {
+        throw new Exception("Can not find the right reduce output operator for vertex " + this.name);
+      }
+    } else {
+      for (Op op : this.rootOps) {
+        // dummy vertex is treated as a branch of a join operator
+        if (this.dummy) {
+          op.print(printer, indentFlag, true);
+        } else {
+          op.print(printer, indentFlag, false);
+        }
+      }
+    }
+    if (vertexType == VertexType.UNION) {
+      // print dependent vertexs
+      indentFlag++;
+      for (int index = 0; index < this.parentConnections.size(); index++) {
+        Connection connection = this.parentConnections.get(index);
+        connection.from.print(printer, indentFlag, connection.type, this);
+      }
+    }
+  }
+
+  /**
+   * We check if a vertex has multiple reduce operators.
+   */
+  public void checkMultiReduceOperator() {
+    // check if it is a reduce vertex and its children is more than 1;
+    if (!this.name.contains("Reduce") || this.rootOps.size() < 2) {
+      return;
+    }
+    // check if all the child ops are reduce output operators
+    for (Op op : this.rootOps) {
+      if (op.type != OpType.RS) {
+        return;
+      }
+    }
+    this.hasMultiReduceOp = true;
+  }
+
+  public void setType(String type) {
+    switch (type) {
+    case "BROADCAST_EDGE":
+      this.edgeType = EdgeType.BROADCAST;
+      break;
+    case "SIMPLE_EDGE":
+      this.edgeType = EdgeType.SHUFFLE;
+      break;
+    case "CUSTOM_SIMPLE_EDGE":
+      this.edgeType = EdgeType.PARTITION_ONLY_SHUFFLE;
+      break;
+    case "CUSTOM_EDGE":
+      this.edgeType = EdgeType.MULTICAST;
+      break;
+    default:
+      this.edgeType = EdgeType.UNKNOWN;
+    }
+  }
+
+  //The following code should be gone after HIVE-11075 using topological order
+  @Override
+  public int compareTo(Vertex o) {
+    return this.name.compareTo(o.name);
+  }
+
+  public Op getSingleRSOp() {
+    if (rootOps.size() == 0) {
+      return null;
+    } else {
+      Op ret = null;
+      for (Op op : rootOps) {
+        if (op.type == OpType.RS) {
+          if (ret == null) {
+            ret = op;
+          } else {
+            // find more than one RS Op
+            return null;
+          }
+        }
+      }
+      return ret;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/log/InPlaceUpdate.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/log/InPlaceUpdate.java b/common/src/java/org/apache/hadoop/hive/common/log/InPlaceUpdate.java
index 1e026a7..6db5c18 100644
--- a/common/src/java/org/apache/hadoop/hive/common/log/InPlaceUpdate.java
+++ b/common/src/java/org/apache/hadoop/hive/common/log/InPlaceUpdate.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hadoop.hive.common.log;
 
 import com.google.common.base.Function;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/log/ProgressMonitor.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/log/ProgressMonitor.java b/common/src/java/org/apache/hadoop/hive/common/log/ProgressMonitor.java
index e7661b4..ee02ccb 100644
--- a/common/src/java/org/apache/hadoop/hive/common/log/ProgressMonitor.java
+++ b/common/src/java/org/apache/hadoop/hive/common/log/ProgressMonitor.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hadoop.hive.common.log;
 
 import java.util.Collections;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java
index 2d6c1b4..e8abf6c 100644
--- a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java
+++ b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java
@@ -44,8 +44,6 @@ import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
 import com.google.common.collect.Lists;
 
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -192,8 +190,22 @@ public class CodahaleMetrics implements org.apache.hadoop.hive.common.metrics.co
     registerAll("threads", new ThreadStatesGaugeSet());
     registerAll("classLoading", new ClassLoadingGaugeSet());
 
-    //initialize reporters
-    initReporting();
+    //Metrics reporter
+    Set<MetricsReporting> finalReporterList = new HashSet<MetricsReporting>();
+    List<String> metricsReporterNames = Lists.newArrayList(
+      Splitter.on(",").trimResults().omitEmptyStrings().split(conf.getVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER)));
+
+    if(metricsReporterNames != null) {
+      for (String metricsReportingName : metricsReporterNames) {
+        try {
+          MetricsReporting reporter = MetricsReporting.valueOf(metricsReportingName.trim().toUpperCase());
+          finalReporterList.add(reporter);
+        } catch (IllegalArgumentException e) {
+          LOGGER.warn("Metrics reporter skipped due to invalid configured reporter: " + metricsReportingName);
+        }
+      }
+    }
+    initReporting(finalReporterList);
   }
 
 
@@ -373,99 +385,107 @@ public class CodahaleMetrics implements org.apache.hadoop.hive.common.metrics.co
   }
 
   /**
-   * Initializes reporters from HIVE_CODAHALE_METRICS_REPORTER_CLASSES or HIVE_METRICS_REPORTER if the former is not defined.
-   * Note: if both confs are defined, only  HIVE_CODAHALE_METRICS_REPORTER_CLASSES will be used.
+   * Should be only called once to initialize the reporters
    */
-  private void initReporting() {
-
-    if (!(initCodahaleMetricsReporterClasses() || initMetricsReporter())) {
-      LOGGER.warn("Unable to initialize metrics reporting");
-    }
-    if (reporters.isEmpty()) {
-      // log a warning incase no reporters were successfully added
-      LOGGER.warn("No reporters configured for codahale metrics!");
+  private void initReporting(Set<MetricsReporting> reportingSet) {
+    for (MetricsReporting reporting : reportingSet) {
+      switch(reporting) {
+        case CONSOLE:
+          final ConsoleReporter consoleReporter = ConsoleReporter.forRegistry(metricRegistry)
+            .convertRatesTo(TimeUnit.SECONDS)
+            .convertDurationsTo(TimeUnit.MILLISECONDS)
+            .build();
+          consoleReporter.start(1, TimeUnit.SECONDS);
+          reporters.add(consoleReporter);
+          break;
+        case JMX:
+          final JmxReporter jmxReporter = JmxReporter.forRegistry(metricRegistry)
+            .convertRatesTo(TimeUnit.SECONDS)
+            .convertDurationsTo(TimeUnit.MILLISECONDS)
+            .build();
+          jmxReporter.start();
+          reporters.add(jmxReporter);
+          break;
+        case JSON_FILE:
+          final JsonFileReporter jsonFileReporter = new JsonFileReporter();
+          jsonFileReporter.start();
+          reporters.add(jsonFileReporter);
+          break;
+        case HADOOP2:
+          String applicationName = conf.get(HiveConf.ConfVars.HIVE_METRICS_HADOOP2_COMPONENT_NAME.varname);
+          long reportingInterval = HiveConf.toTime(
+              conf.get(HiveConf.ConfVars.HIVE_METRICS_HADOOP2_INTERVAL.varname),
+              TimeUnit.SECONDS, TimeUnit.SECONDS);
+          final HadoopMetrics2Reporter metrics2Reporter = HadoopMetrics2Reporter.forRegistry(metricRegistry)
+              .convertRatesTo(TimeUnit.SECONDS)
+              .convertDurationsTo(TimeUnit.MILLISECONDS)
+              .build(DefaultMetricsSystem.initialize(applicationName), // The application-level name
+                  applicationName, // Component name
+                  applicationName, // Component description
+                  "General"); // Name for each metric record
+          metrics2Reporter.start(reportingInterval, TimeUnit.SECONDS);
+          break;
+      }
     }
   }
 
-  /**
-   * Initializes reporting using HIVE_CODAHALE_METRICS_REPORTER_CLASSES.
-   * @return whether initialization was successful or not
-   */
-  private boolean initCodahaleMetricsReporterClasses() {
+  class JsonFileReporter implements Closeable {
+    private ObjectMapper jsonMapper = null;
+    private java.util.Timer timer = null;
 
-    List<String> reporterClasses = Lists.newArrayList(Splitter.on(",").trimResults().
-        omitEmptyStrings().split(conf.getVar(HiveConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES)));
-    if (reporterClasses.isEmpty()) {
-      return false;
-    }
+    public void start() {
+      this.jsonMapper = new ObjectMapper().registerModule(new MetricsModule(TimeUnit.MILLISECONDS, TimeUnit.MILLISECONDS, false));
+      this.timer = new java.util.Timer(true);
 
-    for (String reporterClass : reporterClasses) {
-      Class name = null;
-      try {
-        name = conf.getClassByName(reporterClass);
-      } catch (ClassNotFoundException e) {
-        LOGGER.error("Unable to instantiate metrics reporter class " + reporterClass +
-            " from conf HIVE_CODAHALE_METRICS_REPORTER_CLASSES", e);
-        throw new IllegalArgumentException(e);
-      }
-      try {
-        Constructor constructor = name.getConstructor(MetricRegistry.class, HiveConf.class);
-        CodahaleReporter reporter = (CodahaleReporter) constructor.newInstance(metricRegistry, conf);
-        reporter.start();
-        reporters.add(reporter);
-      } catch (NoSuchMethodException | InstantiationException |
-          IllegalAccessException | InvocationTargetException e) {
-        LOGGER.error("Unable to instantiate using constructor(MetricRegistry, HiveConf) for"
-            + " reporter " + reporterClass + " from conf HIVE_CODAHALE_METRICS_REPORTER_CLASSES",
-            e);
-        throw new IllegalArgumentException(e);
-      }
-    }
-    return true;
-  }
+      long time = conf.getTimeVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_INTERVAL, TimeUnit.MILLISECONDS);
+      final String pathString = conf.getVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_LOCATION);
+
+      timer.schedule(new TimerTask() {
+        @Override
+        public void run() {
+          BufferedWriter bw = null;
+          try {
+            String json = jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(metricRegistry);
+            Path tmpPath = new Path(pathString + ".tmp");
+            URI tmpPathURI = tmpPath.toUri();
+            FileSystem fs = null;
+            if (tmpPathURI.getScheme() == null && tmpPathURI.getAuthority() == null) {
+              //default local
+              fs = FileSystem.getLocal(conf);
+            } else {
+              fs = FileSystem.get(tmpPathURI, conf);
+            }
+            fs.delete(tmpPath, true);
+            bw = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath, true)));
+            bw.write(json);
+            bw.close();
+            fs.setPermission(tmpPath, FsPermission.createImmutable((short) 0644));
+
+            Path path = new Path(pathString);
+            fs.rename(tmpPath, path);
+            fs.setPermission(path, FsPermission.createImmutable((short) 0644));
+          } catch (Exception e) {
+            LOGGER.warn("Error writing JSON Metrics to file", e);
+          } finally {
+            try {
+              if (bw != null) {
+                bw.close();
+              }
+            } catch (IOException e) {
+              //Ignore.
+            }
+          }
 
-  /**
-   * Initializes reporting using HIVE_METRICS+REPORTER.
-   * @return whether initialization was successful or not
-   */
-  private boolean initMetricsReporter() {
 
-    List<String> metricsReporterNames = Lists.newArrayList(Splitter.on(",").trimResults().
-        omitEmptyStrings().split(conf.getVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER)));
-    if (metricsReporterNames.isEmpty()) {
-      return false;
+        }
+      }, 0, time);
     }
 
-    MetricsReporting reporter = null;
-    for (String metricsReportingName : metricsReporterNames) {
-      try {
-        reporter = MetricsReporting.valueOf(metricsReportingName.trim().toUpperCase());
-      } catch (IllegalArgumentException e) {
-        LOGGER.error("Invalid reporter name " + metricsReportingName, e);
-        throw e;
-      }
-      CodahaleReporter codahaleReporter = null;
-      switch (reporter) {
-      case CONSOLE:
-        codahaleReporter = new ConsoleMetricsReporter(metricRegistry, conf);
-        break;
-      case JMX:
-        codahaleReporter = new JmxMetricsReporter(metricRegistry, conf);
-        break;
-      case JSON_FILE:
-        codahaleReporter = new JsonFileMetricsReporter(metricRegistry, conf);
-        break;
-      case HADOOP2:
-        codahaleReporter = new Metrics2Reporter(metricRegistry, conf);
-        break;
-      default:
-        LOGGER.warn("Unhandled reporter " + reporter + " provided.");
-      }
-      if (codahaleReporter != null) {
-        codahaleReporter.start();
-        reporters.add(codahaleReporter);
+    @Override
+    public void close() {
+      if (timer != null) {
+        this.timer.cancel();
       }
     }
-    return true;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleReporter.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleReporter.java b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleReporter.java
deleted file mode 100644
index 9424f28..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleReporter.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.metrics.metrics2;
-
-import com.codahale.metrics.Reporter;
-import java.io.Closeable;
-
-public interface CodahaleReporter extends Closeable, Reporter {
-
-  /**
-   * Start the reporter.
-   */
-  public void start();
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/ConsoleMetricsReporter.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/ConsoleMetricsReporter.java b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/ConsoleMetricsReporter.java
deleted file mode 100644
index dea1848..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/ConsoleMetricsReporter.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.metrics.metrics2;
-
-import com.codahale.metrics.ConsoleReporter;
-import com.codahale.metrics.MetricRegistry;
-import com.codahale.metrics.Reporter;
-import java.io.Closeable;
-import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.hive.conf.HiveConf;
-
-
-/**
- * A wrapper around Codahale ConsoleReporter to make it a pluggable/configurable Hive Metrics reporter.
- */
-public class ConsoleMetricsReporter implements CodahaleReporter {
-
-  private final ConsoleReporter reporter;
-
-  public ConsoleMetricsReporter(MetricRegistry registry, HiveConf conf) {
-
-    reporter = ConsoleReporter.forRegistry(registry)
-        .convertRatesTo(TimeUnit.SECONDS)
-        .convertDurationsTo(TimeUnit.MILLISECONDS)
-        .build();
-
-  }
-
-  @Override
-  public void start() {
-    reporter.start(1, TimeUnit.SECONDS);
-  }
-
-  @Override
-  public void close() {
-    reporter.close();
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/JmxMetricsReporter.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/JmxMetricsReporter.java b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/JmxMetricsReporter.java
deleted file mode 100644
index f12adf9..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/JmxMetricsReporter.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.metrics.metrics2;
-
-import com.codahale.metrics.JmxReporter;
-import com.codahale.metrics.MetricRegistry;
-import com.codahale.metrics.Reporter;
-import java.io.Closeable;
-import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.hive.conf.HiveConf;
-
-/**
- * A wrapper around Codahale JmxReporter to make it a pluggable/configurable Hive Metrics reporter.
- */
-public class JmxMetricsReporter implements CodahaleReporter {
-
-  private final MetricRegistry registry;
-  private final HiveConf conf;
-  private final JmxReporter jmxReporter;
-
-  public JmxMetricsReporter(MetricRegistry registry, HiveConf conf) {
-    this.registry = registry;
-    this.conf = conf;
-
-    jmxReporter = JmxReporter.forRegistry(registry)
-        .convertRatesTo(TimeUnit.SECONDS)
-        .convertDurationsTo(TimeUnit.MILLISECONDS)
-        .build();
-  }
-
-  @Override
-  public void start() {
-    jmxReporter.start();
-  }
-
-  @Override
-  public void close() {
-    jmxReporter.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/JsonFileMetricsReporter.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/JsonFileMetricsReporter.java b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/JsonFileMetricsReporter.java
deleted file mode 100644
index c07517a..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/JsonFileMetricsReporter.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.metrics.metrics2;
-
-import com.codahale.metrics.MetricRegistry;
-import com.codahale.metrics.json.MetricsModule;
-import com.fasterxml.jackson.core.JsonProcessingException;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import java.io.BufferedWriter;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.net.URI;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * A metrics reporter for CodahaleMetrics that dumps metrics periodically into a file in JSON format.
- */
-
-public class JsonFileMetricsReporter implements CodahaleReporter {
-
-  private final MetricRegistry metricRegistry;
-  private final ObjectWriter jsonWriter;
-  private final ScheduledExecutorService executorService;
-  private final HiveConf conf;
-  private final long interval;
-  private final String pathString;
-  private final Path path;
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(JsonFileMetricsReporter.class);
-
-  public JsonFileMetricsReporter(MetricRegistry registry, HiveConf conf) {
-    this.metricRegistry = registry;
-    this.jsonWriter =
-        new ObjectMapper().registerModule(new MetricsModule(TimeUnit.MILLISECONDS,
-            TimeUnit.MILLISECONDS, false)).writerWithDefaultPrettyPrinter();
-    executorService = Executors.newSingleThreadScheduledExecutor();
-    this.conf = conf;
-
-    interval = conf.getTimeVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_INTERVAL, TimeUnit.MILLISECONDS);
-    pathString = conf.getVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_LOCATION);
-    path = new Path(pathString);
-  }
-
-  @Override
-  public void start() {
-
-    final Path tmpPath = new Path(pathString + ".tmp");
-    URI tmpPathURI = tmpPath.toUri();
-    final FileSystem fs;
-    try {
-      if (tmpPathURI.getScheme() == null && tmpPathURI.getAuthority() == null) {
-        //default local
-        fs = FileSystem.getLocal(conf);
-      } else {
-        fs = FileSystem.get(tmpPathURI, conf);
-      }
-    }
-    catch (IOException e) {
-        LOGGER.error("Unable to access filesystem for path " + tmpPath + ". Aborting reporting", e);
-        return;
-    }
-
-    Runnable task = new Runnable() {
-      public void run() {
-        try {
-          String json = null;
-          try {
-            json = jsonWriter.writeValueAsString(metricRegistry);
-          } catch (JsonProcessingException e) {
-            LOGGER.error("Unable to convert json to string ", e);
-            return;
-          }
-
-          BufferedWriter bw = null;
-          try {
-            fs.delete(tmpPath, true);
-            bw = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath, true)));
-            bw.write(json);
-            fs.setPermission(tmpPath, FsPermission.createImmutable((short) 0644));
-          } catch (IOException e) {
-            LOGGER.error("Unable to write to temp file " + tmpPath, e);
-            return;
-          } finally {
-            if (bw != null) {
-              bw.close();
-            }
-          }
-
-          try {
-            fs.rename(tmpPath, path);
-            fs.setPermission(path, FsPermission.createImmutable((short) 0644));
-          } catch (IOException e) {
-            LOGGER.error("Unable to rename temp file " + tmpPath + " to " + pathString, e);
-            return;
-          }
-        } catch (Throwable t) {
-          // catch all errors (throwable and execptions to prevent subsequent tasks from being suppressed)
-          LOGGER.error("Error executing scheduled task ", t);
-        }
-      }
-    };
-
-    executorService.scheduleWithFixedDelay(task,0, interval, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
-  public void close() {
-    executorService.shutdown();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/Metrics2Reporter.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/Metrics2Reporter.java b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/Metrics2Reporter.java
deleted file mode 100644
index 3b402d8..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/Metrics2Reporter.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.metrics.metrics2;
-
-import com.codahale.metrics.MetricRegistry;
-import com.github.joshelser.dropwizard.metrics.hadoop.HadoopMetrics2Reporter;
-import java.io.Closeable;
-import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import com.codahale.metrics.Reporter;
-
-/**
- * A wrapper around Codahale HadoopMetrics2Reporter to make it a pluggable/configurable Hive Metrics reporter.
- */
-public class Metrics2Reporter implements CodahaleReporter {
-
-  private final MetricRegistry metricRegistry;
-  private final HiveConf conf;
-  private final HadoopMetrics2Reporter reporter;
-
-  public Metrics2Reporter(MetricRegistry registry, HiveConf conf) {
-    this.metricRegistry = registry;
-    this.conf = conf;
-    String applicationName = conf.get(HiveConf.ConfVars.HIVE_METRICS_HADOOP2_COMPONENT_NAME.varname);
-
-    reporter = HadoopMetrics2Reporter.forRegistry(metricRegistry)
-        .convertRatesTo(TimeUnit.SECONDS)
-        .convertDurationsTo(TimeUnit.MILLISECONDS)
-        .build(DefaultMetricsSystem.initialize(applicationName), // The application-level name
-            applicationName, // Component name
-            applicationName, // Component description
-            "General"); // Name for each metric record
-  }
-
-  @Override
-  public void start() {
-    long reportingInterval =
-        HiveConf.toTime(conf.get(HiveConf.ConfVars.HIVE_METRICS_HADOOP2_INTERVAL.varname), TimeUnit.SECONDS, TimeUnit.SECONDS);
-    reporter.start(reportingInterval, TimeUnit.SECONDS);
-  }
-
-  @Override
-  public void close() {
-    reporter.close();
-  }
-}


[26/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
deleted file mode 100644
index 7907c59..0000000
--- a/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
+++ /dev/null
@@ -1,811 +0,0 @@
--- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE SEQUENCE_TABLE
-(
-   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
-   NEXT_VAL NUMBER NOT NULL
-);
-
-ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
-
--- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
--- This table is required if datanucleus.autoStartMechanism=SchemaTable
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE NUCLEUS_TABLES
-(
-   CLASS_NAME VARCHAR2(128) NOT NULL,
-   TABLE_NAME VARCHAR2(128) NOT NULL,
-   TYPE VARCHAR2(4) NOT NULL,
-   OWNER VARCHAR2(2) NOT NULL,
-   VERSION VARCHAR2(20) NOT NULL,
-   INTERFACE_NAME VARCHAR2(255) NULL
-);
-
-ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
-
--- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-CREATE TABLE PART_COL_PRIVS
-(
-    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(767) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_COL_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
-
--- Table CDS.
-CREATE TABLE CDS
-(
-    CD_ID NUMBER NOT NULL
-);
-
-ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
-
--- Table COLUMNS_V2 for join relationship
-CREATE TABLE COLUMNS_V2
-(
-    CD_ID NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    "COLUMN_NAME" VARCHAR2(767) NOT NULL,
-    TYPE_NAME CLOB NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
-
--- Table PARTITION_KEY_VALS for join relationship
-CREATE TABLE PARTITION_KEY_VALS
-(
-    PART_ID NUMBER NOT NULL,
-    PART_KEY_VAL VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
-
--- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE TABLE DBS
-(
-    DB_ID NUMBER NOT NULL,
-    "DESC" VARCHAR2(4000) NULL,
-    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
-    "NAME" VARCHAR2(128) NULL,
-    OWNER_NAME VARCHAR2(128) NULL,
-    OWNER_TYPE VARCHAR2(10) NULL
-);
-
-ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
-
--- Table PARTITION_PARAMS for join relationship
-CREATE TABLE PARTITION_PARAMS
-(
-    PART_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
-
--- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-CREATE TABLE SERDES
-(
-    SERDE_ID NUMBER NOT NULL,
-    "NAME" VARCHAR2(128) NULL,
-    SLIB VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
-
--- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
-CREATE TABLE TYPES
-(
-    TYPES_ID NUMBER NOT NULL,
-    TYPE_NAME VARCHAR2(128) NULL,
-    TYPE1 VARCHAR2(767) NULL,
-    TYPE2 VARCHAR2(767) NULL
-);
-
-ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
-
--- Table PARTITION_KEYS for join relationship
-CREATE TABLE PARTITION_KEYS
-(
-    TBL_ID NUMBER NOT NULL,
-    PKEY_COMMENT VARCHAR2(4000) NULL,
-    PKEY_NAME VARCHAR2(128) NOT NULL,
-    PKEY_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
-
--- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE TABLE ROLES
-(
-    ROLE_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    OWNER_NAME VARCHAR2(128) NULL,
-    ROLE_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
-
--- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
-CREATE TABLE PARTITIONS
-(
-    PART_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    PART_NAME VARCHAR2(767) NULL,
-    SD_ID NUMBER NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
-
--- Table INDEX_PARAMS for join relationship
-CREATE TABLE INDEX_PARAMS
-(
-    INDEX_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
-
--- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-CREATE TABLE TBL_COL_PRIVS
-(
-    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(767) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_COL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
-
--- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
-CREATE TABLE IDXS
-(
-    INDEX_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
-    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
-    INDEX_NAME VARCHAR2(128) NULL,
-    INDEX_TBL_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    ORIG_TBL_ID NUMBER NULL,
-    SD_ID NUMBER NULL
-);
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
-
--- Table BUCKETING_COLS for join relationship
-CREATE TABLE BUCKETING_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    BUCKET_COL_NAME VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TYPE_FIELDS for join relationship
-CREATE TABLE TYPE_FIELDS
-(
-    TYPE_NAME NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    FIELD_NAME VARCHAR2(128) NOT NULL,
-    FIELD_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
-
--- Table SD_PARAMS for join relationship
-CREATE TABLE SD_PARAMS
-(
-    SD_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE CLOB NULL
-);
-
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
-
--- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE TABLE GLOBAL_PRIVS
-(
-    USER_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    USER_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
-
--- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-CREATE TABLE SDS
-(
-    SD_ID NUMBER NOT NULL,
-    CD_ID NUMBER NULL,
-    INPUT_FORMAT VARCHAR2(4000) NULL,
-    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
-    LOCATION VARCHAR2(4000) NULL,
-    NUM_BUCKETS NUMBER (10) NOT NULL,
-    OUTPUT_FORMAT VARCHAR2(4000) NULL,
-    SERDE_ID NUMBER NULL,
-    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
-);
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
-
--- Table TABLE_PARAMS for join relationship
-CREATE TABLE TABLE_PARAMS
-(
-    TBL_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE CLOB NULL
-);
-
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
-
--- Table SORT_COLS for join relationship
-CREATE TABLE SORT_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(767) NULL,
-    "ORDER" NUMBER (10) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-CREATE TABLE TBL_PRIVS
-(
-    TBL_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
-
--- Table DATABASE_PARAMS for join relationship
-CREATE TABLE DATABASE_PARAMS
-(
-    DB_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(180) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
-
--- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
-CREATE TABLE ROLE_MAP
-(
-    ROLE_GRANT_ID NUMBER NOT NULL,
-    ADD_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    ROLE_ID NUMBER NULL
-);
-
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
-
--- Table SERDE_PARAMS for join relationship
-CREATE TABLE SERDE_PARAMS
-(
-    SERDE_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE CLOB NULL
-);
-
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
-
--- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-CREATE TABLE PART_PRIVS
-(
-    PART_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
-
--- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-CREATE TABLE DB_PRIVS
-(
-    DB_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    DB_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
-
--- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
-CREATE TABLE TBLS
-(
-    TBL_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    OWNER VARCHAR2(767) NULL,
-    RETENTION NUMBER (10) NOT NULL,
-    SD_ID NUMBER NULL,
-    TBL_NAME VARCHAR2(256) NULL,
-    TBL_TYPE VARCHAR2(128) NULL,
-    VIEW_EXPANDED_TEXT CLOB NULL,
-    VIEW_ORIGINAL_TEXT CLOB NULL,
-    IS_REWRITE_ENABLED NUMBER(1) NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
-);
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
-
--- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE TABLE PARTITION_EVENTS
-(
-    PART_NAME_ID NUMBER NOT NULL,
-    DB_NAME VARCHAR2(128) NULL,
-    EVENT_TIME NUMBER NOT NULL,
-    EVENT_TYPE NUMBER (10) NOT NULL,
-    PARTITION_NAME VARCHAR2(767) NULL,
-    TBL_NAME VARCHAR2(256) NULL
-);
-
-ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
-
--- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
-CREATE TABLE SKEWED_STRING_LIST
-(
-    STRING_LIST_ID NUMBER NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
-
-CREATE TABLE SKEWED_STRING_LIST_VALUES
-(
-    STRING_LIST_ID NUMBER NOT NULL,
-    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_NAMES
-(
-    SD_ID NUMBER NOT NULL,
-    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
-(
-    SD_ID NUMBER NOT NULL,
-    STRING_LIST_ID_KID NUMBER NOT NULL,
-    "LOCATION" VARCHAR2(4000) NULL
-);
-
-CREATE TABLE MASTER_KEYS
-(
-    KEY_ID NUMBER (10) NOT NULL,
-    MASTER_KEY VARCHAR2(767) NULL
-);
-
-CREATE TABLE DELEGATION_TOKENS
-(
-    TOKEN_IDENT VARCHAR2(767) NOT NULL,
-    TOKEN VARCHAR2(767) NULL
-);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_VALUES
-(
-    SD_ID_OID NUMBER NOT NULL,
-    STRING_LIST_ID_EID NUMBER NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
--- column statistics
-
-CREATE TABLE TAB_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(256) NOT NULL,
- COLUMN_NAME VARCHAR2(767) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- TBL_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-CREATE TABLE VERSION (
-  VER_ID NUMBER NOT NULL,
-  SCHEMA_VERSION VARCHAR(127) NOT NULL,
-  VERSION_COMMENT VARCHAR(255)
-);
-ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
-
-CREATE TABLE PART_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(256) NOT NULL,
- PARTITION_NAME VARCHAR2(767) NOT NULL,
- COLUMN_NAME VARCHAR2(767) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- PART_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
-
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
-CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
-
-CREATE TABLE FUNCS (
-  FUNC_ID NUMBER NOT NULL,
-  CLASS_NAME VARCHAR2(4000),
-  CREATE_TIME NUMBER(10) NOT NULL,
-  DB_ID NUMBER,
-  FUNC_NAME VARCHAR2(128),
-  FUNC_TYPE NUMBER(10) NOT NULL,
-  OWNER_NAME VARCHAR2(128),
-  OWNER_TYPE VARCHAR2(10)
-);
-
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
-
-CREATE TABLE FUNC_RU (
-  FUNC_ID NUMBER NOT NULL,
-  RESOURCE_TYPE NUMBER(10) NOT NULL,
-  RESOURCE_URI VARCHAR2(4000),
-  INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
-
-CREATE TABLE NOTIFICATION_LOG
-(
-    NL_ID NUMBER NOT NULL,
-    EVENT_ID NUMBER NOT NULL,
-    EVENT_TIME NUMBER(10) NOT NULL,
-    EVENT_TYPE VARCHAR2(32) NOT NULL,
-    DB_NAME VARCHAR2(128),
-    TBL_NAME VARCHAR2(256),
-    MESSAGE CLOB NULL,
-    MESSAGE_FORMAT VARCHAR(16) NULL
-);
-
-ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
-
-CREATE TABLE NOTIFICATION_SEQUENCE
-(
-    NNI_ID NUMBER NOT NULL,
-    NEXT_EVENT_ID NUMBER NOT NULL
-);
-
-ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
-
-
-
--- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
-
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table COLUMNS_V2
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
-
-
--- Constraints for table PARTITION_KEY_VALS
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
-
-
--- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
-
-
--- Constraints for table PARTITION_PARAMS
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
-
-
--- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-
--- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
-CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
-
-
--- Constraints for table PARTITION_KEYS
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
-
-
--- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
-
-
--- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
-
-CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
-
-CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
-
-
--- Constraints for table INDEX_PARAMS
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
-
-
--- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
-
-
--- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
-
-CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
-
-CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
-
-
--- Constraints for table BUCKETING_COLS
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
-
-
--- Constraints for table TYPE_FIELDS
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
-
-
--- Constraints for table SD_PARAMS
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
-
-
--- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
-CREATE INDEX SDS_N50 ON SDS (CD_ID);
-
-
--- Constraints for table TABLE_PARAMS
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
-
-
--- Constraints for table SORT_COLS
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
-
-
--- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
-
-CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DATABASE_PARAMS
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
-
-
--- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
-
-CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SERDE_PARAMS
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
-
-
--- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
-
-
--- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
-
-
--- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
-
-CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
-
-CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
-
-
--- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
-
-
--- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
-
-CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
-
-CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
-
-
--- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
-
-CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
-
-CREATE TABLE KEY_CONSTRAINTS
-(
-  CHILD_CD_ID NUMBER,
-  CHILD_INTEGER_IDX NUMBER,
-  CHILD_TBL_ID NUMBER,
-  PARENT_CD_ID NUMBER NOT NULL,
-  PARENT_INTEGER_IDX NUMBER NOT NULL,
-  PARENT_TBL_ID NUMBER NOT NULL,
-  POSITION NUMBER NOT NULL,
-  CONSTRAINT_NAME VARCHAR(400) NOT NULL,
-  CONSTRAINT_TYPE NUMBER NOT NULL,
-  UPDATE_RULE NUMBER,
-  DELETE_RULE NUMBER,
-  ENABLE_VALIDATE_RELY NUMBER NOT NULL
-) ;
-
-ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
-
-CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
-
-
-------------------------------
--- Transaction and lock tables
-------------------------------
-@hive-txn-schema-3.0.0.oracle.sql;
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/hive-txn-schema-2.2.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-txn-schema-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-txn-schema-2.2.0.oracle.sql
index 80adfc2..57d3abd 100644
--- a/metastore/scripts/upgrade/oracle/hive-txn-schema-2.2.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/hive-txn-schema-2.2.0.oracle.sql
@@ -32,7 +32,7 @@ CREATE TABLE TXNS (
 CREATE TABLE TXN_COMPONENTS (
   TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
   TC_DATABASE VARCHAR2(128) NOT NULL,
-  TC_TABLE VARCHAR2(256),
+  TC_TABLE VARCHAR2(128),
   TC_PARTITION VARCHAR2(767) NULL,
   TC_OPERATION_TYPE char(1) NOT NULL
 ) ROWDEPENDENCIES;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/hive-txn-schema-2.3.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-txn-schema-2.3.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-txn-schema-2.3.0.oracle.sql
deleted file mode 100644
index 12c24a5..0000000
--- a/metastore/scripts/upgrade/oracle/hive-txn-schema-2.3.0.oracle.sql
+++ /dev/null
@@ -1,133 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the License); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an AS IS BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
---
--- Tables for transaction management
--- 
-
-CREATE TABLE TXNS (
-  TXN_ID NUMBER(19) PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED NUMBER(19) NOT NULL,
-  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL,
-  TXN_AGENT_INFO varchar2(128),
-  TXN_META_INFO varchar2(128),
-  TXN_HEARTBEAT_COUNT number(10)
-) ROWDEPENDENCIES;
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
-  TC_DATABASE VARCHAR2(128) NOT NULL,
-  TC_TABLE VARCHAR2(256),
-  TC_PARTITION VARCHAR2(767) NULL,
-  TC_OPERATION_TYPE char(1) NOT NULL
-) ROWDEPENDENCIES;
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID NUMBER(19),
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(128),
-  CTC_PARTITION varchar(767)
-) ROWDEPENDENCIES;
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
-  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
-  HL_TXNID NUMBER(19),
-  HL_DB VARCHAR2(128) NOT NULL,
-  HL_TABLE VARCHAR2(128),
-  HL_PARTITION VARCHAR2(767),
-  HL_LOCK_STATE CHAR(1) NOT NULL,
-  HL_LOCK_TYPE CHAR(1) NOT NULL,
-  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
-  HL_ACQUIRED_AT NUMBER(19),
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  HL_HEARTBEAT_COUNT number(10),
-  HL_AGENT_INFO varchar2(128),
-  HL_BLOCKEDBY_EXT_ID number(19),
-  HL_BLOCKEDBY_INT_ID number(19),
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
-) ROWDEPENDENCIES;
-
-CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID NUMBER(19) PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_TBLPROPERTIES varchar(2048),
-  CQ_WORKER_ID varchar(128),
-  CQ_START NUMBER(19),
-  CQ_RUN_AS varchar(128),
-  CQ_HIGHEST_TXN_ID NUMBER(19),
-  CQ_META_INFO BLOB,
-  CQ_HADOOP_JOB_ID varchar2(32)
-) ROWDEPENDENCIES;
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE COMPLETED_COMPACTIONS (
-  CC_ID NUMBER(19) PRIMARY KEY,
-  CC_DATABASE varchar(128) NOT NULL,
-  CC_TABLE varchar(128) NOT NULL,
-  CC_PARTITION varchar(767),
-  CC_STATE char(1) NOT NULL,
-  CC_TYPE char(1) NOT NULL,
-  CC_TBLPROPERTIES varchar(2048),
-  CC_WORKER_ID varchar(128),
-  CC_START NUMBER(19),
-  CC_END NUMBER(19),
-  CC_RUN_AS varchar(128),
-  CC_HIGHEST_TXN_ID NUMBER(19),
-  CC_META_INFO BLOB,
-  CC_HADOOP_JOB_ID varchar2(32)
-) ROWDEPENDENCIES;
-
-CREATE TABLE AUX_TABLE (
-  MT_KEY1 varchar2(128) NOT NULL,
-  MT_KEY2 number(19) NOT NULL,
-  MT_COMMENT varchar2(255),
-  PRIMARY KEY(MT_KEY1, MT_KEY2)
-);
-
-CREATE TABLE WRITE_SET (
-  WS_DATABASE varchar2(128) NOT NULL,
-  WS_TABLE varchar2(128) NOT NULL,
-  WS_PARTITION varchar2(767),
-  WS_TXNID number(19) NOT NULL,
-  WS_COMMIT_ID number(19) NOT NULL,
-  WS_OPERATION_TYPE char(1) NOT NULL
-);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql
deleted file mode 100644
index 12c24a5..0000000
--- a/metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql
+++ /dev/null
@@ -1,133 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the License); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an AS IS BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
---
--- Tables for transaction management
--- 
-
-CREATE TABLE TXNS (
-  TXN_ID NUMBER(19) PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED NUMBER(19) NOT NULL,
-  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL,
-  TXN_AGENT_INFO varchar2(128),
-  TXN_META_INFO varchar2(128),
-  TXN_HEARTBEAT_COUNT number(10)
-) ROWDEPENDENCIES;
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
-  TC_DATABASE VARCHAR2(128) NOT NULL,
-  TC_TABLE VARCHAR2(256),
-  TC_PARTITION VARCHAR2(767) NULL,
-  TC_OPERATION_TYPE char(1) NOT NULL
-) ROWDEPENDENCIES;
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID NUMBER(19),
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(128),
-  CTC_PARTITION varchar(767)
-) ROWDEPENDENCIES;
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
-  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
-  HL_TXNID NUMBER(19),
-  HL_DB VARCHAR2(128) NOT NULL,
-  HL_TABLE VARCHAR2(128),
-  HL_PARTITION VARCHAR2(767),
-  HL_LOCK_STATE CHAR(1) NOT NULL,
-  HL_LOCK_TYPE CHAR(1) NOT NULL,
-  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
-  HL_ACQUIRED_AT NUMBER(19),
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  HL_HEARTBEAT_COUNT number(10),
-  HL_AGENT_INFO varchar2(128),
-  HL_BLOCKEDBY_EXT_ID number(19),
-  HL_BLOCKEDBY_INT_ID number(19),
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
-) ROWDEPENDENCIES;
-
-CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID NUMBER(19) PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_TBLPROPERTIES varchar(2048),
-  CQ_WORKER_ID varchar(128),
-  CQ_START NUMBER(19),
-  CQ_RUN_AS varchar(128),
-  CQ_HIGHEST_TXN_ID NUMBER(19),
-  CQ_META_INFO BLOB,
-  CQ_HADOOP_JOB_ID varchar2(32)
-) ROWDEPENDENCIES;
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE COMPLETED_COMPACTIONS (
-  CC_ID NUMBER(19) PRIMARY KEY,
-  CC_DATABASE varchar(128) NOT NULL,
-  CC_TABLE varchar(128) NOT NULL,
-  CC_PARTITION varchar(767),
-  CC_STATE char(1) NOT NULL,
-  CC_TYPE char(1) NOT NULL,
-  CC_TBLPROPERTIES varchar(2048),
-  CC_WORKER_ID varchar(128),
-  CC_START NUMBER(19),
-  CC_END NUMBER(19),
-  CC_RUN_AS varchar(128),
-  CC_HIGHEST_TXN_ID NUMBER(19),
-  CC_META_INFO BLOB,
-  CC_HADOOP_JOB_ID varchar2(32)
-) ROWDEPENDENCIES;
-
-CREATE TABLE AUX_TABLE (
-  MT_KEY1 varchar2(128) NOT NULL,
-  MT_KEY2 number(19) NOT NULL,
-  MT_COMMENT varchar2(255),
-  PRIMARY KEY(MT_KEY1, MT_KEY2)
-);
-
-CREATE TABLE WRITE_SET (
-  WS_DATABASE varchar2(128) NOT NULL,
-  WS_TABLE varchar2(128) NOT NULL,
-  WS_PARTITION varchar2(767),
-  WS_TXNID number(19) NOT NULL,
-  WS_COMMIT_ID number(19) NOT NULL,
-  WS_OPERATION_TYPE char(1) NOT NULL
-);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
index b2f35de..058c0d5 100644
--- a/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
@@ -3,7 +3,6 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual;
 @037-HIVE-14496.oracle.sql;
 @038-HIVE-14637.oracle.sql;
 @038-HIVE-10562.oracle.sql;
-@039-HIVE-12274.oracle.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/upgrade-2.2.0-to-2.3.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/upgrade-2.2.0-to-2.3.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-2.2.0-to-2.3.0.oracle.sql
deleted file mode 100644
index 64a3313..0000000
--- a/metastore/scripts/upgrade/oracle/upgrade-2.2.0-to-2.3.0.oracle.sql
+++ /dev/null
@@ -1,6 +0,0 @@
-SELECT 'Upgrading MetaStore schema from 2.2.0 to 2.3.0' AS Status from dual;
-
-@040-HIVE-16399.oracle.sql;
-
-UPDATE VERSION SET SCHEMA_VERSION='2.3.0', VERSION_COMMENT='Hive release version 2.3.0' where VER_ID=1;
-SELECT 'Finished upgrading MetaStore schema from 2.2.0 to 2.3.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
deleted file mode 100644
index 31c4f5d..0000000
--- a/metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
+++ /dev/null
@@ -1,4 +0,0 @@
-SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
-
-UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
-SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/upgrade.order.oracle
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/upgrade.order.oracle b/metastore/scripts/upgrade/oracle/upgrade.order.oracle
index a18b062..28a453f 100644
--- a/metastore/scripts/upgrade/oracle/upgrade.order.oracle
+++ b/metastore/scripts/upgrade/oracle/upgrade.order.oracle
@@ -8,5 +8,3 @@
 1.2.0-to-2.0.0
 2.0.0-to-2.1.0
 2.1.0-to-2.2.0
-2.2.0-to-2.3.0
-2.3.0-to-3.0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/038-HIVE-12274.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/038-HIVE-12274.postgres.sql b/metastore/scripts/upgrade/postgres/038-HIVE-12274.postgres.sql
deleted file mode 100644
index c3c6692..0000000
--- a/metastore/scripts/upgrade/postgres/038-HIVE-12274.postgres.sql
+++ /dev/null
@@ -1,18 +0,0 @@
-alter table "SERDE_PARAMS" alter column "PARAM_VALUE" type text using cast("PARAM_VALUE" as text);
-alter table "TABLE_PARAMS" alter column "PARAM_VALUE" type text using cast("PARAM_VALUE" as text);
-alter table "SD_PARAMS" alter column "PARAM_VALUE" type text using cast("PARAM_VALUE" as text);
-alter table "COLUMNS_V2" alter column "TYPE_NAME" type text using cast("TYPE_NAME" as text);
-
-alter table "TBLS" ALTER COLUMN "TBL_NAME" TYPE varchar(256);
-alter table "NOTIFICATION_LOG" alter column "TBL_NAME" TYPE varchar(256);
-alter table "PARTITION_EVENTS" alter column "TBL_NAME" TYPE varchar(256);
-alter table "TAB_COL_STATS" alter column "TABLE_NAME" TYPE varchar(256);
-alter table "PART_COL_STATS" alter column "TABLE_NAME" TYPE varchar(256);
-alter table COMPLETED_TXN_COMPONENTS alter column CTC_TABLE TYPE varchar(256);
-
-alter table "COLUMNS_V2" alter column "COLUMN_NAME" TYPE varchar(767);
-alter table "PART_COL_PRIVS" alter column "COLUMN_NAME" TYPE varchar(767);
-alter table "TBL_COL_PRIVS" alter column "COLUMN_NAME" TYPE varchar(767);
-alter table "SORT_COLS" alter column "COLUMN_NAME" TYPE varchar(767);
-alter table "TAB_COL_STATS" alter column "COLUMN_NAME" TYPE varchar(767);
-alter table "PART_COL_STATS" alter column "COLUMN_NAME" TYPE varchar(767);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/039-HIVE-16399.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/039-HIVE-16399.postgres.sql b/metastore/scripts/upgrade/postgres/039-HIVE-16399.postgres.sql
deleted file mode 100644
index 18157bf..0000000
--- a/metastore/scripts/upgrade/postgres/039-HIVE-16399.postgres.sql
+++ /dev/null
@@ -1 +0,0 @@
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
index 424c6a1..c2e0a77 100644
--- a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
@@ -42,8 +42,8 @@ CREATE TABLE "CDS" (
 CREATE TABLE "COLUMNS_V2" (
     "CD_ID" bigint NOT NULL,
     "COMMENT" character varying(4000),
-    "COLUMN_NAME" character varying(767) NOT NULL,
-    "TYPE_NAME" text,
+    "COLUMN_NAME" character varying(1000) NOT NULL,
+    "TYPE_NAME" character varying(4000),
     "INTEGER_IDX" integer NOT NULL
 );
 
@@ -172,7 +172,7 @@ CREATE TABLE "PARTITION_EVENTS" (
     "EVENT_TIME" bigint NOT NULL,
     "EVENT_TYPE" integer NOT NULL,
     "PARTITION_NAME" character varying(767),
-    "TBL_NAME" character varying(256)
+    "TBL_NAME" character varying(128)
 );
 
 
@@ -217,7 +217,7 @@ CREATE TABLE "PARTITION_PARAMS" (
 
 CREATE TABLE "PART_COL_PRIVS" (
     "PART_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
     "CREATE_TIME" bigint NOT NULL,
     "GRANT_OPTION" smallint NOT NULL,
     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
@@ -298,7 +298,7 @@ CREATE TABLE "SDS" (
 CREATE TABLE "SD_PARAMS" (
     "SD_ID" bigint NOT NULL,
     "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" text DEFAULT NULL
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
 );
 
 
@@ -330,7 +330,7 @@ CREATE TABLE "SERDES" (
 CREATE TABLE "SERDE_PARAMS" (
     "SERDE_ID" bigint NOT NULL,
     "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" text DEFAULT NULL
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
 );
 
 
@@ -340,7 +340,7 @@ CREATE TABLE "SERDE_PARAMS" (
 
 CREATE TABLE "SORT_COLS" (
     "SD_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
     "ORDER" bigint NOT NULL,
     "INTEGER_IDX" bigint NOT NULL
 );
@@ -353,7 +353,7 @@ CREATE TABLE "SORT_COLS" (
 CREATE TABLE "TABLE_PARAMS" (
     "TBL_ID" bigint NOT NULL,
     "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" text DEFAULT NULL
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
 );
 
 
@@ -369,7 +369,7 @@ CREATE TABLE "TBLS" (
     "OWNER" character varying(767) DEFAULT NULL::character varying,
     "RETENTION" bigint NOT NULL,
     "SD_ID" bigint,
-    "TBL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
     "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
     "VIEW_EXPANDED_TEXT" text,
     "VIEW_ORIGINAL_TEXT" text,
@@ -385,7 +385,7 @@ CREATE TABLE "TBLS" (
 
 CREATE TABLE "TBL_COL_PRIVS" (
     "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
     "CREATE_TIME" bigint NOT NULL,
     "GRANT_OPTION" smallint NOT NULL,
     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
@@ -488,8 +488,8 @@ CREATE TABLE  "DELEGATION_TOKENS"
 CREATE TABLE "TAB_COL_STATS" (
  "CS_ID" bigint NOT NULL,
  "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
  "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
  "TBL_ID" bigint NOT NULL,
  "LONG_LOW_VALUE" bigint,
@@ -523,9 +523,9 @@ CREATE TABLE "VERSION" (
 CREATE TABLE "PART_COL_STATS" (
  "CS_ID" bigint NOT NULL,
  "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
  "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
  "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
  "PART_ID" bigint NOT NULL,
  "LONG_LOW_VALUE" bigint,
@@ -576,7 +576,7 @@ CREATE TABLE "NOTIFICATION_LOG"
     "EVENT_TIME" INTEGER NOT NULL,
     "EVENT_TYPE" VARCHAR(32) NOT NULL,
     "DB_NAME" VARCHAR(128),
-    "TBL_NAME" VARCHAR(256),
+    "TBL_NAME" VARCHAR(128),
     "MESSAGE" text,
     "MESSAGE_FORMAT" VARCHAR(16),
     PRIMARY KEY ("NL_ID")


[22/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index 843f4b3..ae78b36 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -46,7 +46,6 @@ class ThriftHiveMetastoreIf : virtual public  ::facebook::fb303::FacebookService
   virtual void add_foreign_key(const AddForeignKeyRequest& req) = 0;
   virtual void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) = 0;
   virtual void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) = 0;
-  virtual void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames) = 0;
   virtual void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern) = 0;
   virtual void get_tables_by_type(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) = 0;
   virtual void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types) = 0;
@@ -283,9 +282,6 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
   void drop_table_with_environment_context(const std::string& /* dbname */, const std::string& /* name */, const bool /* deleteData */, const EnvironmentContext& /* environment_context */) {
     return;
   }
-  void truncate_table(const std::string& /* dbName */, const std::string& /* tableName */, const std::vector<std::string> & /* partNames */) {
-    return;
-  }
   void get_tables(std::vector<std::string> & /* _return */, const std::string& /* db_name */, const std::string& /* pattern */) {
     return;
   }
@@ -3675,124 +3671,6 @@ class ThriftHiveMetastore_drop_table_with_environment_context_presult {
 
 };
 
-typedef struct _ThriftHiveMetastore_truncate_table_args__isset {
-  _ThriftHiveMetastore_truncate_table_args__isset() : dbName(false), tableName(false), partNames(false) {}
-  bool dbName :1;
-  bool tableName :1;
-  bool partNames :1;
-} _ThriftHiveMetastore_truncate_table_args__isset;
-
-class ThriftHiveMetastore_truncate_table_args {
- public:
-
-  ThriftHiveMetastore_truncate_table_args(const ThriftHiveMetastore_truncate_table_args&);
-  ThriftHiveMetastore_truncate_table_args& operator=(const ThriftHiveMetastore_truncate_table_args&);
-  ThriftHiveMetastore_truncate_table_args() : dbName(), tableName() {
-  }
-
-  virtual ~ThriftHiveMetastore_truncate_table_args() throw();
-  std::string dbName;
-  std::string tableName;
-  std::vector<std::string>  partNames;
-
-  _ThriftHiveMetastore_truncate_table_args__isset __isset;
-
-  void __set_dbName(const std::string& val);
-
-  void __set_tableName(const std::string& val);
-
-  void __set_partNames(const std::vector<std::string> & val);
-
-  bool operator == (const ThriftHiveMetastore_truncate_table_args & rhs) const
-  {
-    if (!(dbName == rhs.dbName))
-      return false;
-    if (!(tableName == rhs.tableName))
-      return false;
-    if (!(partNames == rhs.partNames))
-      return false;
-    return true;
-  }
-  bool operator != (const ThriftHiveMetastore_truncate_table_args &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const ThriftHiveMetastore_truncate_table_args & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-
-class ThriftHiveMetastore_truncate_table_pargs {
- public:
-
-
-  virtual ~ThriftHiveMetastore_truncate_table_pargs() throw();
-  const std::string* dbName;
-  const std::string* tableName;
-  const std::vector<std::string> * partNames;
-
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-typedef struct _ThriftHiveMetastore_truncate_table_result__isset {
-  _ThriftHiveMetastore_truncate_table_result__isset() : o1(false) {}
-  bool o1 :1;
-} _ThriftHiveMetastore_truncate_table_result__isset;
-
-class ThriftHiveMetastore_truncate_table_result {
- public:
-
-  ThriftHiveMetastore_truncate_table_result(const ThriftHiveMetastore_truncate_table_result&);
-  ThriftHiveMetastore_truncate_table_result& operator=(const ThriftHiveMetastore_truncate_table_result&);
-  ThriftHiveMetastore_truncate_table_result() {
-  }
-
-  virtual ~ThriftHiveMetastore_truncate_table_result() throw();
-  MetaException o1;
-
-  _ThriftHiveMetastore_truncate_table_result__isset __isset;
-
-  void __set_o1(const MetaException& val);
-
-  bool operator == (const ThriftHiveMetastore_truncate_table_result & rhs) const
-  {
-    if (!(o1 == rhs.o1))
-      return false;
-    return true;
-  }
-  bool operator != (const ThriftHiveMetastore_truncate_table_result &rhs) const {
-    return !(*this == rhs);
-  }
-
-  bool operator < (const ThriftHiveMetastore_truncate_table_result & ) const;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
-
-};
-
-typedef struct _ThriftHiveMetastore_truncate_table_presult__isset {
-  _ThriftHiveMetastore_truncate_table_presult__isset() : o1(false) {}
-  bool o1 :1;
-} _ThriftHiveMetastore_truncate_table_presult__isset;
-
-class ThriftHiveMetastore_truncate_table_presult {
- public:
-
-
-  virtual ~ThriftHiveMetastore_truncate_table_presult() throw();
-  MetaException o1;
-
-  _ThriftHiveMetastore_truncate_table_presult__isset __isset;
-
-  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
-
-};
-
 typedef struct _ThriftHiveMetastore_get_tables_args__isset {
   _ThriftHiveMetastore_get_tables_args__isset() : db_name(false), pattern(false) {}
   bool db_name :1;
@@ -20238,9 +20116,6 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
   void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
   void send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
   void recv_drop_table_with_environment_context();
-  void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
-  void send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
-  void recv_truncate_table();
   void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern);
   void send_get_tables(const std::string& db_name, const std::string& pattern);
   void recv_get_tables(std::vector<std::string> & _return);
@@ -20674,7 +20549,6 @@ class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceP
   void process_add_foreign_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_drop_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_drop_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
-  void process_truncate_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_tables_by_type(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_table_meta(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
@@ -20836,7 +20710,6 @@ class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceP
     processMap_["add_foreign_key"] = &ThriftHiveMetastoreProcessor::process_add_foreign_key;
     processMap_["drop_table"] = &ThriftHiveMetastoreProcessor::process_drop_table;
     processMap_["drop_table_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_drop_table_with_environment_context;
-    processMap_["truncate_table"] = &ThriftHiveMetastoreProcessor::process_truncate_table;
     processMap_["get_tables"] = &ThriftHiveMetastoreProcessor::process_get_tables;
     processMap_["get_tables_by_type"] = &ThriftHiveMetastoreProcessor::process_get_tables_by_type;
     processMap_["get_table_meta"] = &ThriftHiveMetastoreProcessor::process_get_table_meta;
@@ -21229,15 +21102,6 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
     ifaces_[i]->drop_table_with_environment_context(dbname, name, deleteData, environment_context);
   }
 
-  void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames) {
-    size_t sz = ifaces_.size();
-    size_t i = 0;
-    for (; i < (sz - 1); ++i) {
-      ifaces_[i]->truncate_table(dbName, tableName, partNames);
-    }
-    ifaces_[i]->truncate_table(dbName, tableName, partNames);
-  }
-
   void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern) {
     size_t sz = ifaces_.size();
     size_t i = 0;
@@ -22607,9 +22471,6 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
   void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
   int32_t send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
   void recv_drop_table_with_environment_context(const int32_t seqid);
-  void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
-  int32_t send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
-  void recv_truncate_table(const int32_t seqid);
   void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern);
   int32_t send_get_tables(const std::string& db_name, const std::string& pattern);
   void recv_get_tables(std::vector<std::string> & _return, const int32_t seqid);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index 34c37e9..ced952a 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -142,11 +142,6 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
     printf("drop_table_with_environment_context\n");
   }
 
-  void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames) {
-    // Your implementation goes here
-    printf("truncate_table\n");
-  }
-
   void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern) {
     // Your implementation goes here
     printf("get_tables\n");


[04/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
index b018adb..0b1ac4b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
@@ -37,7 +37,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
 import com.google.common.annotations.VisibleForTesting;
-
 import org.apache.hadoop.hive.common.StringInternUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -46,8 +45,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.Driver.DriverState;
-import org.apache.hadoop.hive.ql.Driver.LockedDriverState;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
@@ -366,12 +363,8 @@ public class CombineHiveInputFormat<K extends WritableComparable, V extends Writ
     Map<CombinePathInputFormat, CombineFilter> poolMap =
       new HashMap<CombinePathInputFormat, CombineFilter>();
     Set<Path> poolSet = new HashSet<Path>();
-    LockedDriverState lDrvStat = LockedDriverState.getLockedDriverState();
 
     for (Path path : paths) {
-      if (lDrvStat != null && lDrvStat.driverState == DriverState.INTERRUPT)
-        throw new IOException("Operation is Canceled. ");
-
       PartitionDesc part = HiveFileFormatUtils.getFromPathRecursively(
           pathToPartitionInfo, path, IOPrepareCache.get().allocatePartitionDescMap());
       TableDesc tableDesc = part.getTableDesc();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
index f73a8e3..cc77e4c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
@@ -87,7 +87,7 @@ public final class HiveFileFormatUtils {
   public static class FileChecker {
     // we don't have many file formats that implement InputFormatChecker. We won't be holding
     // multiple instances of such classes
-    private static final int MAX_CACHE_SIZE = 16;
+    private static int MAX_CACHE_SIZE = 16;
 
     // immutable maps
     Map<Class<? extends InputFormat>, Class<? extends InputFormatChecker>> inputFormatCheckerMap;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
index 9b83cb4..c697407 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
@@ -89,13 +89,14 @@ import org.apache.hive.common.util.ReflectionUtil;
  */
 public class HiveInputFormat<K extends WritableComparable, V extends Writable>
     implements InputFormat<K, V>, JobConfigurable {
+
   private static final String CLASS_NAME = HiveInputFormat.class.getName();
   private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
 
   /**
    * A cache of InputFormat instances.
    */
-  private static final Map<Class, InputFormat<WritableComparable, Writable>> inputFormats
+  private static Map<Class, InputFormat<WritableComparable, Writable>> inputFormats
     = new ConcurrentHashMap<Class, InputFormat<WritableComparable, Writable>>();
 
   private JobConf job;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
index f41edc4..d391164 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
@@ -839,7 +839,7 @@ public class RCFile {
     // the max size of memory for buffering records before writes them out
     private int columnsBufferSize = 4 * 1024 * 1024; // 4M
     // the conf string for COLUMNS_BUFFER_SIZE
-    public static final String COLUMNS_BUFFER_SIZE_CONF_STR = "hive.io.rcfile.record.buffer.size";
+    public static String COLUMNS_BUFFER_SIZE_CONF_STR = "hive.io.rcfile.record.buffer.size";
 
     // how many records already buffered
     private int bufferedRecords = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
index cbd38ed..96ca736 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.orc.FileMetadata;
 import org.apache.orc.PhysicalWriter;
-import org.apache.orc.MemoryManager;
+import org.apache.orc.impl.MemoryManager;
 import org.apache.orc.TypeDescription;
 import org.apache.orc.impl.OrcTail;
 
@@ -258,7 +258,7 @@ public final class OrcFile extends org.apache.orc.OrcFile {
     /**
      * A package local option to set the memory manager.
      */
-    public WriterOptions memory(MemoryManager value) {
+    protected WriterOptions memory(MemoryManager value) {
       super.memory(value);
       return this;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 8fb7211..59682db 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -158,7 +158,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
   }
 
   private static final Logger LOG = LoggerFactory.getLogger(OrcInputFormat.class);
-  private static final boolean isDebugEnabled = LOG.isDebugEnabled();
+  private static boolean isDebugEnabled = LOG.isDebugEnabled();
   static final HadoopShims SHIMS = ShimLoader.getHadoopShims();
 
   private static final long DEFAULT_MIN_SPLIT_SIZE = 16 * 1024 * 1024;
@@ -1531,7 +1531,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       Reader.Options readerOptions = new Reader.Options(context.conf);
       if (readerTypes == null) {
         readerIncluded = genIncludedColumns(fileSchema, context.conf);
-        evolution = new SchemaEvolution(fileSchema, null, readerOptions.include(readerIncluded));
+        evolution = new SchemaEvolution(fileSchema, readerOptions.include(readerIncluded));
       } else {
         // The reader schema always comes in without ACID columns.
         TypeDescription readerSchema = OrcUtils.convertTypeFromProtobuf(readerTypes, 0);
@@ -1913,6 +1913,10 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     }
   }
 
+  // The schema type description does not include the ACID fields (i.e. it is the
+  // non-ACID original schema).
+  private static boolean SCHEMA_TYPES_IS_ORIGINAL = true;
+
   @Override
   public RowReader<OrcStruct> getReader(InputSplit inputSplit,
                                         Options options)

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index 5b2e9b5..0ac3ec5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -18,12 +18,9 @@
 package org.apache.hadoop.hive.ql.io.orc.encoded;
 
 import java.io.IOException;
-import java.lang.reflect.Field;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
-import java.util.IdentityHashMap;
 import java.util.List;
 
 import org.slf4j.Logger;
@@ -46,13 +43,11 @@ import org.apache.orc.impl.RecordReaderUtils;
 import org.apache.orc.impl.StreamName;
 import org.apache.orc.StripeInformation;
 import org.apache.orc.impl.BufferChunk;
+import org.apache.hadoop.hive.llap.DebugUtils;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.OrcEncodedColumnBatch;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.PoolFactory;
 import org.apache.orc.OrcProto;
 
-import sun.misc.Cleaner;
-
-
 /**
  * Encoded reader implementation.
  *
@@ -85,17 +80,6 @@ import sun.misc.Cleaner;
  */
 class EncodedReaderImpl implements EncodedReader {
   public static final Logger LOG = LoggerFactory.getLogger(EncodedReaderImpl.class);
-  private static Field cleanerField;
-  static {
-    try {
-      // TODO: To make it work for JDK9 use CleanerUtil from https://issues.apache.org/jira/browse/HADOOP-12760
-      final Class<?> dbClazz = Class.forName("java.nio.DirectByteBuffer");
-      cleanerField = dbClazz.getDeclaredField("cleaner");
-      cleanerField.setAccessible(true);
-    } catch (Throwable t) {
-      cleanerField = null;
-    }
-  }
   private static final Object POOLS_CREATION_LOCK = new Object();
   private static Pools POOLS;
   private static class Pools {
@@ -220,8 +204,8 @@ class EncodedReaderImpl implements EncodedReader {
 
   @Override
   public void readEncodedColumns(int stripeIx, StripeInformation stripe,
-      OrcProto.RowIndex[] indexes, List<OrcProto.ColumnEncoding> encodings,
-      List<OrcProto.Stream> streamList, boolean[] included, boolean[][] colRgs,
+      OrcProto.RowIndex[] indexes, List<OrcProto.ColumnEncoding> encodings, List<OrcProto.Stream> streamList,
+      boolean[] included, boolean[][] colRgs,
       Consumer<OrcEncodedColumnBatch> consumer) throws IOException {
     // Note: for now we don't have to setError here, caller will setError if we throw.
     // We are also not supposed to call setDone, since we are only part of the operation.
@@ -319,35 +303,15 @@ class EncodedReaderImpl implements EncodedReader {
       }
     }
 
-    // TODO: the memory release could be optimized - we could release original buffers after we
-    //       are fully done with each original buffer from disk. For now release all at the end;
-    //       it doesn't increase the total amount of memory we hold, just the duration a bit.
-    //       This is much simpler - we can just remember original ranges after reading them, and
-    //       release them at the end. In a few cases where it's easy to determine that a buffer
-    //       can be freed in advance, we remove it from the map.
-    IdentityHashMap<ByteBuffer, Boolean> toRelease = null;
     if (!isAllInCache.value) {
       if (!isDataReaderOpen) {
         this.dataReader.open();
         isDataReaderOpen = true;
       }
       dataReader.readFileData(toRead.next, stripeOffset, cacheWrapper.getAllocator().isDirectAlloc());
-      toRelease = new IdentityHashMap<>();
-      DiskRangeList drl = toRead.next;
-      while (drl != null) {
-        if (drl instanceof BufferChunk) {
-          toRelease.put(drl.getData(), true);
-        }
-        drl = drl.next;
-      }
     }
 
     // 3. For uncompressed case, we need some special processing before read.
-    //    Basically, we are trying to create artificial, consistent ranges to cache, as there are
-    //    no CBs in an uncompressed file. At the end of this processing, the list would contain
-    //    either cache buffers, or buffers allocated by us and not cached (if we are only reading
-    //    parts of the data for some ranges and don't want to cache it). Both are represented by
-    //    CacheChunks, so the list is just CacheChunk-s from that point on.
     DiskRangeList iter = toRead.next;  // Keep "toRead" list for future use, don't extract().
     if (codec == null) {
       for (int colIx = 0; colIx < colCtxs.length; ++colIx) {
@@ -362,12 +326,6 @@ class EncodedReaderImpl implements EncodedReader {
           }
         }
       }
-      // Release buffers as we are done with all the streams... also see toRelease comment.\
-      // With uncompressed streams, we know we are done earlier.
-      if (toRelease != null) {
-        releaseBuffers(toRelease.keySet(), true);
-        toRelease = null;
-      }
       if (isTracingEnabled) {
         LOG.trace("Disk ranges after pre-read (file " + fileKey + ", base offset "
             + stripeOffset + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next));
@@ -416,8 +374,8 @@ class EncodedReaderImpl implements EncodedReader {
               if (sctx.stripeLevelStream == null) {
                 sctx.stripeLevelStream = POOLS.csdPool.take();
                 // We will be using this for each RG while also sending RGs to processing.
-                // To avoid buffers being unlocked, run refcount one ahead; so each RG 
-                 // processing will decref once, and the
+                // To avoid buffers being unlocked, run refcount one ahead; we will not increase
+                // it when building the last RG, so each RG processing will decref once, and the
                 // last one will unlock the buffers.
                 sctx.stripeLevelStream.incRef();
                 // For stripe-level streams we don't need the extra refcount on the block.
@@ -425,12 +383,14 @@ class EncodedReaderImpl implements EncodedReader {
                 long unlockUntilCOffset = sctx.offset + sctx.length;
                 DiskRangeList lastCached = readEncodedStream(stripeOffset, iter,
                     sctx.offset, sctx.offset + sctx.length, sctx.stripeLevelStream,
-                    unlockUntilCOffset, sctx.offset, toRelease);
+                    unlockUntilCOffset, sctx.offset);
                 if (lastCached != null) {
                   iter = lastCached;
                 }
               }
-              sctx.stripeLevelStream.incRef();
+              if (!isLastRg) {
+                sctx.stripeLevelStream.incRef();
+              }
               cb = sctx.stripeLevelStream;
             } else {
               // This stream can be separated by RG using index. Let's do that.
@@ -451,7 +411,7 @@ class EncodedReaderImpl implements EncodedReader {
               boolean isStartOfStream = sctx.bufferIter == null;
               DiskRangeList lastCached = readEncodedStream(stripeOffset,
                   (isStartOfStream ? iter : sctx.bufferIter), cOffset, endCOffset, cb,
-                  unlockUntilCOffset, sctx.offset, toRelease);
+                  unlockUntilCOffset, sctx.offset);
               if (lastCached != null) {
                 sctx.bufferIter = iter = lastCached;
               }
@@ -477,27 +437,7 @@ class EncodedReaderImpl implements EncodedReader {
     }
 
     // Release the unreleased buffers. See class comment about refcounts.
-    for (int colIx = 0; colIx < colCtxs.length; ++colIx) {
-      ColumnReadContext ctx = colCtxs[colIx];
-      if (ctx == null) continue; // This column is not included.
-      for (int streamIx = 0; streamIx < ctx.streamCount; ++streamIx) {
-        StreamContext sctx = ctx.streams[streamIx];
-        if (sctx == null || sctx.stripeLevelStream == null) continue;
-        if (0 != sctx.stripeLevelStream.decRef()) continue;
-        for (MemoryBuffer buf : sctx.stripeLevelStream.getCacheBuffers()) {
-          if (LOG.isTraceEnabled()) {
-            LOG.trace("Unlocking {} at the end of processing", buf);
-          }
-          cacheWrapper.releaseBuffer(buf);
-        }
-      }
-    }
-
     releaseInitialRefcounts(toRead.next);
-    // Release buffers as we are done with all the streams... also see toRelease comment.
-    if (toRelease != null) {
-      releaseBuffers(toRelease.keySet(), true);
-    }
     releaseCacheChunksIntoObjectPool(toRead.next);
   }
 
@@ -665,8 +605,8 @@ class EncodedReaderImpl implements EncodedReader {
    *         the master list, so they are safe to keep as iterators for various streams.
    */
   public DiskRangeList readEncodedStream(long baseOffset, DiskRangeList start, long cOffset,
-      long endCOffset, ColumnStreamData csd, long unlockUntilCOffset, long streamOffset,
-      IdentityHashMap<ByteBuffer, Boolean> toRelease) throws IOException {
+      long endCOffset, ColumnStreamData csd, long unlockUntilCOffset, long streamOffset)
+          throws IOException {
     if (csd.getCacheBuffers() == null) {
       csd.setCacheBuffers(new ArrayList<MemoryBuffer>());
     } else {
@@ -675,10 +615,10 @@ class EncodedReaderImpl implements EncodedReader {
     if (cOffset == endCOffset) return null;
     boolean isCompressed = codec != null;
     List<ProcCacheChunk> toDecompress = null;
+    List<ByteBuffer> toRelease = null;
     List<IncompleteCb> badEstimates = null;
-    List<ByteBuffer> toReleaseCopies = null;
     if (isCompressed) {
-      toReleaseCopies = new ArrayList<>();
+      toRelease = !dataReader.isTrackingDiskRanges() ? null : new ArrayList<ByteBuffer>();
       toDecompress = new ArrayList<>();
       badEstimates = new ArrayList<>();
     }
@@ -696,8 +636,8 @@ class EncodedReaderImpl implements EncodedReader {
     // 2. Go thru the blocks; add stuff to results and prepare the decompression work (see below).
     try {
       lastUncompressed = isCompressed ?
-          prepareRangesForCompressedRead(cOffset, endCOffset, streamOffset, unlockUntilCOffset,
-              current, csd, toRelease, toReleaseCopies, toDecompress, badEstimates)
+          prepareRangesForCompressedRead(cOffset, endCOffset, streamOffset,
+              unlockUntilCOffset, current, csd, toRelease, toDecompress, badEstimates)
         : prepareRangesForUncompressedRead(
             cOffset, endCOffset, streamOffset, unlockUntilCOffset, current, csd);
     } catch (Exception ex) {
@@ -717,10 +657,7 @@ class EncodedReaderImpl implements EncodedReader {
       assert result == null; // We don't expect conflicts from bad estimates.
     }
 
-    if (toDecompress == null || toDecompress.isEmpty()) {
-      releaseBuffers(toReleaseCopies, false);
-      return lastUncompressed; // Nothing to do.
-    }
+    if (toDecompress == null || toDecompress.isEmpty()) return lastUncompressed; // Nothing to do.
 
     // 3. Allocate the buffers, prepare cache keys.
     // At this point, we have read all the CBs we need to read. cacheBuffers contains some cache
@@ -753,18 +690,21 @@ class EncodedReaderImpl implements EncodedReader {
       cacheWrapper.reuseBuffer(chunk.getBuffer());
     }
 
-    // 5. Release the copies we made directly to the cleaner.
-    releaseBuffers(toReleaseCopies, false);
+    // 5. Release original compressed buffers to zero-copy reader if needed.
+    if (toRelease != null) {
+      assert dataReader.isTrackingDiskRanges();
+      for (ByteBuffer buffer : toRelease) {
+        dataReader.releaseBuffer(buffer);
+      }
+    }
 
     // 6. Finally, put uncompressed data to cache.
     if (fileKey != null) {
-      long[] collisionMask = cacheWrapper.putFileData(
-          fileKey, cacheKeys, targetBuffers, baseOffset);
+      long[] collisionMask = cacheWrapper.putFileData(fileKey, cacheKeys, targetBuffers, baseOffset);
       processCacheCollisions(collisionMask, toDecompress, targetBuffers, csd.getCacheBuffers());
     }
 
-    // 7. It may happen that we know we won't use some cache buffers anymore (the alternative
-    //    is that we will use the same buffers for other streams in separate calls).
+    // 7. It may happen that we know we won't use some compression buffers anymore.
     //    Release initial refcounts.
     for (ProcCacheChunk chunk : toDecompress) {
       ponderReleaseInitialRefcount(unlockUntilCOffset, streamOffset, chunk);
@@ -773,11 +713,9 @@ class EncodedReaderImpl implements EncodedReader {
     return lastUncompressed;
   }
 
-  /** Subset of readEncodedStream specific to compressed streams, separate to avoid long methods. */
   private CacheChunk prepareRangesForCompressedRead(long cOffset, long endCOffset,
-      long streamOffset, long unlockUntilCOffset, DiskRangeList current,
-      ColumnStreamData columnStreamData, IdentityHashMap<ByteBuffer, Boolean> toRelease,
-      List<ByteBuffer> toReleaseCopies, List<ProcCacheChunk> toDecompress,
+      long streamOffset, long unlockUntilCOffset, DiskRangeList current, ColumnStreamData columnStreamData,
+      List<ByteBuffer> toRelease, List<ProcCacheChunk> toDecompress,
       List<IncompleteCb> badEstimates) throws IOException {
     if (cOffset > current.getOffset()) {
       // Target compression block is in the middle of the range; slice the range in two.
@@ -824,8 +762,8 @@ class EncodedReaderImpl implements EncodedReader {
           throw new RuntimeException(msg);
         }
         BufferChunk bc = (BufferChunk)current;
-        ProcCacheChunk newCached = addOneCompressionBuffer(bc, columnStreamData.getCacheBuffers(),
-            toDecompress, toRelease, toReleaseCopies, badEstimates);
+        ProcCacheChunk newCached = addOneCompressionBuffer(
+            bc, columnStreamData.getCacheBuffers(), toDecompress, toRelease, badEstimates);
         lastUncompressed = (newCached == null) ? lastUncompressed : newCached;
         next = (newCached != null) ? newCached.next : null;
         currentOffset = (next != null) ? next.getOffset() : -1;
@@ -839,12 +777,9 @@ class EncodedReaderImpl implements EncodedReader {
     return lastUncompressed;
   }
 
-  /** Subset of readEncodedStream specific to uncompressed streams, separate to avoid long methods. */
   private CacheChunk prepareRangesForUncompressedRead(long cOffset, long endCOffset,
-      long streamOffset, long unlockUntilCOffset, DiskRangeList current,
-      ColumnStreamData columnStreamData) throws IOException {
-    // Note: we are called after preReadUncompressedStream, so it doesn't have to do nearly as much
-    //       as prepareRangesForCompressedRead does; e.g. every buffer is already a CacheChunk.
+      long streamOffset, long unlockUntilCOffset, DiskRangeList current, ColumnStreamData columnStreamData)
+          throws IOException {
     long currentOffset = cOffset;
     CacheChunk lastUncompressed = null;
     boolean isFirst = true;
@@ -884,10 +819,11 @@ class EncodedReaderImpl implements EncodedReader {
    * We could avoid copy in non-zcr case and manage the buffer that was not allocated by our
    * allocator. Uncompressed case is not mainline though so let's not complicate it.
    */
-  private DiskRangeList preReadUncompressedStream(long baseOffset, DiskRangeList start,
-      long streamOffset, long streamEnd) throws IOException {
+  private DiskRangeList preReadUncompressedStream(long baseOffset,
+      DiskRangeList start, long streamOffset, long streamEnd) throws IOException {
     if (streamOffset == streamEnd) return null;
     List<UncompressedCacheChunk> toCache = null;
+    List<ByteBuffer> toRelease = null;
 
     // 1. Find our bearings in the stream.
     DiskRangeList current = findIntersectingPosition(start, streamOffset, streamEnd);
@@ -924,6 +860,9 @@ class EncodedReaderImpl implements EncodedReader {
       if (current.getOffset() >= partEnd) {
         continue; // We have no data at all for this part of the stream (could be unneeded), skip.
       }
+      if (toRelease == null && dataReader.isTrackingDiskRanges()) {
+        toRelease = new ArrayList<ByteBuffer>();
+      }
       // We have some disk buffers... see if we have entire part, etc.
       UncompressedCacheChunk candidateCached = null; // We will cache if we have the entire part.
       DiskRangeList next = current;
@@ -938,15 +877,21 @@ class EncodedReaderImpl implements EncodedReader {
         current = next;
         if (noMoreDataForPart) break; // Done with this part.
 
+        boolean wasSplit = false;
         if (current.getEnd() > partEnd) {
           // If the current buffer contains multiple parts, split it.
           current = current.split(partEnd);
+          wasSplit = true;
         }
         if (isTracingEnabled) {
           LOG.trace("Processing uncompressed file data at ["
               + current.getOffset() + ", " + current.getEnd() + ")");
         }
         BufferChunk curBc = (BufferChunk)current;
+        if (!wasSplit && toRelease != null) {
+          toRelease.add(curBc.getChunk()); // TODO: is it valid to give zcr the modified 2nd part?
+        }
+
         // Track if we still have the entire part.
         long hadEntirePartTo = hasEntirePartTo;
         // We have data until the end of current block if we had it until the beginning.
@@ -1007,7 +952,15 @@ class EncodedReaderImpl implements EncodedReader {
       ++ix;
     }
 
-    // 5. Put uncompressed data to cache.
+    // 5. Release original compressed buffers to zero-copy reader if needed.
+    if (toRelease != null) {
+      assert dataReader.isTrackingDiskRanges();
+      for (ByteBuffer buf : toRelease) {
+        dataReader.releaseBuffer(buf);
+      }
+    }
+
+    // 6. Finally, put uncompressed data to cache.
     if (fileKey != null) {
       long[] collisionMask = cacheWrapper.putFileData(fileKey, cacheKeys, targetBuffers, baseOffset);
       processCacheCollisions(collisionMask, toCache, targetBuffers, null);
@@ -1016,6 +969,7 @@ class EncodedReaderImpl implements EncodedReader {
     return lastUncompressed;
   }
 
+
   private int determineUncompressedPartSize() {
     // We will break the uncompressed data in the cache in the chunks that are the size
     // of the prevalent ORC compression buffer (the default), or maximum allocation (since we
@@ -1224,8 +1178,7 @@ class EncodedReaderImpl implements EncodedReader {
    */
   private ProcCacheChunk addOneCompressionBuffer(BufferChunk current,
       List<MemoryBuffer> cacheBuffers, List<ProcCacheChunk> toDecompress,
-      IdentityHashMap<ByteBuffer, Boolean> toRelease, List<ByteBuffer> toReleaseCopies,
-      List<IncompleteCb> badEstimates) throws IOException {
+      List<ByteBuffer> toRelease, List<IncompleteCb> badEstimates) throws IOException {
     ByteBuffer slice = null;
     ByteBuffer compressed = current.getChunk();
     long cbStartOffset = current.getOffset();
@@ -1248,8 +1201,12 @@ class EncodedReaderImpl implements EncodedReader {
       // Simple case - CB fits entirely in the disk range.
       slice = compressed.slice();
       slice.limit(chunkLength);
-      return addOneCompressionBlockByteBuffer(slice, isUncompressed,
+      ProcCacheChunk cc = addOneCompressionBlockByteBuffer(slice, isUncompressed,
           cbStartOffset, cbEndOffset, chunkLength, current, toDecompress, cacheBuffers);
+      if (compressed.remaining() <= 0 && dataReader.isTrackingDiskRanges()) {
+        toRelease.add(compressed);
+      }
+      return cc;
     }
     if (current.getEnd() < cbEndOffset && !current.hasContiguousNext()) {
       badEstimates.add(addIncompleteCompressionBuffer(cbStartOffset, current, 0));
@@ -1259,7 +1216,6 @@ class EncodedReaderImpl implements EncodedReader {
     // TODO: we could remove extra copy for isUncompressed case by copying directly to cache.
     // We need to consolidate 2 or more buffers into one to decompress.
     ByteBuffer copy = allocateBuffer(chunkLength, compressed.isDirect());
-    toReleaseCopies.add(copy); // We will always release copies at the end.
     int remaining = chunkLength - compressed.remaining();
     int originalPos = compressed.position();
     copy.put(compressed);
@@ -1268,8 +1224,12 @@ class EncodedReaderImpl implements EncodedReader {
     }
     DiskRangeList next = current.next;
     current.removeSelf();
-    if (originalPos == 0 && toRelease.remove(compressed)) {
-      releaseBuffer(compressed, true);
+    if (dataReader.isTrackingDiskRanges()) {
+      if (originalPos == 0) {
+        dataReader.releaseBuffer(compressed); // We copied the entire buffer.
+      } else {
+        toRelease.add(compressed); // There might be slices depending on this buffer.
+      }
     }
 
     int extraChunkCount = 0;
@@ -1286,15 +1246,15 @@ class EncodedReaderImpl implements EncodedReader {
         copy.put(slice);
         ProcCacheChunk cc = addOneCompressionBlockByteBuffer(copy, isUncompressed,
             cbStartOffset, cbEndOffset, remaining, (BufferChunk)next, toDecompress, cacheBuffers);
-        if (compressed.remaining() <= 0 && toRelease.remove(compressed)) {
-          releaseBuffer(compressed, true); // We copied the entire buffer. 
-        } // else there's more data to process; will be handled in next call.
+        if (compressed.remaining() <= 0 && dataReader.isTrackingDiskRanges()) {
+          dataReader.releaseBuffer(compressed); // We copied the entire buffer.
+        }
         return cc;
       }
       remaining -= compressed.remaining();
-      copy.put(compressed); // TODO: move into the if below; account for release call
-      if (toRelease.remove(compressed)) {
-        releaseBuffer(compressed, true); // We copied the entire buffer.
+      copy.put(compressed);
+      if (dataReader.isTrackingDiskRanges()) {
+        dataReader.releaseBuffer(compressed); // We copied the entire buffer.
       }
       DiskRangeList tmp = next;
       next = next.hasContiguousNext() ? next.next : null;
@@ -1310,38 +1270,6 @@ class EncodedReaderImpl implements EncodedReader {
     }
   }
 
-  private void releaseBuffers(Collection<ByteBuffer> toRelease, boolean isFromDataReader) {
-    if (toRelease == null) return;
-    for (ByteBuffer buf : toRelease) {
-      releaseBuffer(buf, isFromDataReader);
-    }
-  }
-
-  private void releaseBuffer(ByteBuffer bb, boolean isFromDataReader) {
-    if (isTracingEnabled) {
-      LOG.trace("Releasing the buffer " + System.identityHashCode(bb));
-    }
-    if (isFromDataReader && dataReader.isTrackingDiskRanges()) {
-      dataReader.releaseBuffer(bb);
-      return;
-    }
-    Field localCf = cleanerField;
-    if (!bb.isDirect() || localCf == null) return;
-    try {
-      Cleaner cleaner = (Cleaner) localCf.get(bb);
-      if (cleaner != null) {
-        cleaner.clean();
-      } else {
-        LOG.debug("Unable to clean a buffer using cleaner - no cleaner");
-      }
-    } catch (Exception e) {
-      // leave it for GC to clean up
-      LOG.warn("Unable to clean direct buffers using Cleaner.");
-      cleanerField = null;
-    }
-  }
-
-
   private IncompleteCb addIncompleteCompressionBuffer(
       long cbStartOffset, DiskRangeList target, int extraChunkCount) {
     IncompleteCb icb = new IncompleteCb(cbStartOffset, target.getEnd());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java
index a7bb5ee..26f1e75 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java
@@ -21,7 +21,6 @@ import java.util.Properties;
 import java.util.TimeZone;
 
 import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetTableUtils;
-import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTimeUtils;
 import org.apache.parquet.Strings;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -140,11 +139,14 @@ public class MapredParquetOutputFormat extends FileOutputFormat<NullWritable, Pa
     String timeZoneID =
         tableProperties.getProperty(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY);
     if (!Strings.isNullOrEmpty(timeZoneID)) {
-
-      NanoTimeUtils.validateTimeZone(timeZoneID);
+      if (!Arrays.asList(TimeZone.getAvailableIDs()).contains(timeZoneID)) {
+        throw new IllegalStateException("Unexpected timezone id found for parquet int96 conversion: " + timeZoneID);
+      }
       return TimeZone.getTimeZone(timeZoneID);
     }
 
-    return TimeZone.getDefault();
+    // If no timezone is defined in table properties, then adjust timestamps using
+    // PARQUET_INT96_NO_ADJUSTMENT_ZONE timezone
+    return TimeZone.getTimeZone(ParquetTableUtils.PARQUET_INT96_NO_ADJUSTMENT_ZONE);
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ParquetRecordReaderBase.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ParquetRecordReaderBase.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ParquetRecordReaderBase.java
index 2954601..8e33b7d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ParquetRecordReaderBase.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ParquetRecordReaderBase.java
@@ -20,7 +20,6 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.io.parquet.read.DataWritableReadSupport;
 import org.apache.hadoop.hive.ql.io.parquet.read.ParquetFilterPredicateConverter;
 import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetTableUtils;
-import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTimeUtils;
 import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde2.SerDeStats;
@@ -45,6 +44,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.TimeZone;
 
@@ -170,7 +170,7 @@ public class ParquetRecordReaderBase {
     boolean skipConversion = HiveConf.getBoolVar(configuration,
         HiveConf.ConfVars.HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION);
     FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
-    if (!Strings.nullToEmpty(fileMetaData.getCreatedBy()).startsWith("parquet-mr") &&
+    if (!Strings.nullToEmpty(fileMetaData.getCreatedBy()).startsWith("parquet-mr") ||
         skipConversion) {
       // Impala writes timestamp values using GMT only. We should not try to convert Impala
       // files to other type of timezones.
@@ -179,12 +179,16 @@ public class ParquetRecordReaderBase {
       // TABLE_PARQUET_INT96_TIMEZONE is a table property used to detect what timezone conversion
       // to use when reading Parquet timestamps.
       timeZoneID = configuration.get(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY,
-          TimeZone.getDefault().getID());
-      NanoTimeUtils.validateTimeZone(timeZoneID);
+          ParquetTableUtils.PARQUET_INT96_NO_ADJUSTMENT_ZONE);
+
+      if (!Arrays.asList(TimeZone.getAvailableIDs()).contains(timeZoneID)) {
+          throw new IllegalStateException("Unexpected timezone id found for parquet int96 conversion: " + timeZoneID);
+      }
     }
 
     // 'timeZoneID' should be valid, since we did not throw exception above
-    configuration.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY,timeZoneID);
+    configuration.set(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY,
+        TimeZone.getTimeZone(timeZoneID).getID());
   }
 
   public FilterCompat.Filter setFilter(final JobConf conf, MessageType schema) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
index dbd6fb3..5dc8088 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
@@ -152,26 +152,13 @@ public class NanoTimeUtils {
 
     calendar.setTimeInMillis(utcCalendar.getTimeInMillis());
 
-    Calendar adjusterCalendar = copyToCalendarWithTZ(calendar, getLocalCalendar());
+    Calendar adjusterCalendar = copyToCalendarWithTZ(calendar, Calendar.getInstance());
 
     Timestamp ts = new Timestamp(adjusterCalendar.getTimeInMillis());
     ts.setNanos((int) nanos);
     return ts;
   }
 
-  /**
-   * Check if the string id is a valid java TimeZone id.
-   * TimeZone#getTimeZone will return "GMT" if the id cannot be understood.
-   * @param timeZoneID
-   */
-  public static void validateTimeZone(String timeZoneID) {
-    if (TimeZone.getTimeZone(timeZoneID).getID().equals("GMT")
-        && !"GMT".equals(timeZoneID)) {
-      throw new IllegalStateException(
-          "Unexpected timezone id found for parquet int96 conversion: " + timeZoneID);
-    }
-  }
-
   private static Calendar copyToCalendarWithTZ(Calendar from, Calendar to) {
     if(from.getTimeZone().getID().equals(to.getTimeZone().getID())) {
       return from;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
index 312cdac..6ca1963 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
@@ -121,9 +121,8 @@ public class VectorizedParquetRecordReader extends ParquetRecordReaderBase
     try {
       serDeStats = new SerDeStats();
       projectionPusher = new ProjectionPusher();
-      ParquetInputSplit inputSplit = getSplit(oldInputSplit, conf);
-      if (inputSplit != null) {
-        initialize(inputSplit, conf);
+      if (oldInputSplit != null) {
+        initialize(getSplit(oldInputSplit, conf), conf);
         setTimeZoneConversion(jobConf, ((FileSplit) oldInputSplit).getPath());
       }
       colsToInclude = ColumnProjectionUtils.getReadColumnIDs(conf);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
index 5401c7b..00c9645 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.StatsSetupConst.StatDB;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.Context;
@@ -57,6 +58,7 @@ import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapred.Counters;
 import org.apache.hadoop.mapred.FileInputFormat;
+import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.JobClient;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RunningJob;
@@ -75,6 +77,8 @@ import org.apache.logging.log4j.core.appender.RollingFileAppender;
 @SuppressWarnings( { "deprecation"})
 public class PartialScanTask extends Task<PartialScanWork> implements
     Serializable, HadoopJobExecHook {
+
+
   private static final long serialVersionUID = 1L;
 
   protected transient JobConf job;
@@ -270,7 +274,7 @@ public class PartialScanTask extends Task<PartialScanWork> implements
     return "RCFile Statistics Partial Scan";
   }
 
-  public static final String INPUT_SEPERATOR = ":";
+  public static String INPUT_SEPERATOR = ":";
 
   public static void main(String[] args) {
     String inputPathStr = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 01e8a48..d255265 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -21,11 +21,6 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.plan.HiveOperation;
-import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.LockTableDesc;
-import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 import org.apache.hive.common.util.ShutdownHookManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -62,13 +57,6 @@ import java.util.concurrent.atomic.AtomicInteger;
  * with a single thread accessing it at a time, with the exception of {@link #heartbeat()} method.
  * The later may (usually will) be called from a timer thread.
  * See {@link #getMS()} for more important concurrency/metastore access notes.
- * 
- * Each statement that the TM (transaction manager) should be aware of should belong to a transaction.
- * Effectively, that means any statement that has side effects.  Exceptions are statements like
- * Show Compactions, Show Tables, Use Database foo, etc.  The transaction is started either
- * explicitly ( via Start Transaction SQL statement from end user - not fully supported) or
- * implicitly by the {@link org.apache.hadoop.hive.ql.Driver} (which looks exactly as autoCommit=true
- * from end user poit of view). See more at {@link #isExplicitTransaction}.
  */
 public final class DbTxnManager extends HiveTxnManagerImpl {
 
@@ -88,47 +76,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
    * to keep apart multiple writes of the same data within the same transaction
    * Also see {@link org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options}
    */
-  private int writeId = -1;
-  /**
-   * counts number of statements in the current transaction
-   */
-  private int numStatements = 0;
-  /**
-   * if {@code true} it means current transaction is started via START TRANSACTION which means it cannot
-   * include any Operations which cannot be rolled back (drop partition; write to  non-acid table).
-   * If false, it's a single statement transaction which can include any statement.  This is not a 
-   * contradiction from the user point of view who doesn't know anything about the implicit txn
-   * and cannot call rollback (the statement of course can fail in which case there is nothing to 
-   * rollback (assuming the statement is well implemented)).
-   *
-   * This is done so that all commands run in a transaction which simplifies implementation and
-   * allows a simple implementation of multi-statement txns which don't require a lock manager
-   * capable of deadlock detection.  (todo: not fully implemented; elaborate on how this LM works)
-   *
-   * Also, critically important, ensuring that everything runs in a transaction assigns an order
-   * to all operations in the system - needed for replication/DR.
-   *
-   * We don't want to allow non-transactional statements in a user demarcated txn because the effect
-   * of such statement is "visible" immediately on statement completion, but the user may
-   * issue a rollback but the action of the statement can't be undone (and has possibly already been
-   * seen by another txn).  For example,
-   * start transaction
-   * insert into transactional_table values(1);
-   * insert into non_transactional_table select * from transactional_table;
-   * rollback
-   *
-   * The user would be in for a surprise especially if they are not aware of transactional
-   * properties of the tables involved.
-   *
-   * As a side note: what should the lock manager do with locks for non-transactional resources?
-   * Should it it release them at the end of the stmt or txn?
-   * Some interesting thoughts: http://mysqlmusings.blogspot.com/2009/02/mixing-engines-in-transactions.html
-   */
-  private boolean isExplicitTransaction = false;
-  /**
-   * To ensure transactions don't nest.
-   */
-  private int startTransactionCount = 0;
+  private int statementId = -1;
 
   // QueryId for the query in current transaction
   private String queryId;
@@ -193,22 +141,15 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
 
   @VisibleForTesting
   long openTxn(Context ctx, String user, long delay) throws LockException {
-    /*Q: why don't we lock the snapshot here???  Instead of having client make an explicit call
-    whenever it chooses
-    A: If we want to rely on locks for transaction scheduling we must get the snapshot after lock
-    acquisition.  Relying on locks is a pessimistic strategy which works better under high
-    contention.*/
+    //todo: why don't we lock the snapshot here???  Instead of having client make an explicit call
+    //whenever it chooses
     init();
-    getLockManager();
     if(isTxnOpen()) {
       throw new LockException("Transaction already opened. " + JavaUtils.txnIdToString(txnId));
     }
     try {
       txnId = getMS().openTxn(user);
-      writeId = 0;
-      numStatements = 0;
-      isExplicitTransaction = false;
-      startTransactionCount = 0;
+      statementId = 0;
       LOG.debug("Opened " + JavaUtils.txnIdToString(txnId));
       ctx.setHeartbeater(startHeartbeat(delay));
       return txnId;
@@ -218,8 +159,8 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
   }
 
   /**
-   * we don't expect multiple threads to call this method concurrently but {@link #lockMgr} will
-   * be read by a different threads than one writing it, thus it's {@code volatile}
+   * we don't expect multiple thread to call this method concurrently but {@link #lockMgr} will
+   * be read by a different threads that one writing it, thus it's {@code volatile}
    */
   @Override
   public HiveLockManager getLockManager() throws LockException {
@@ -238,95 +179,24 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
     catch(LockException e) {
       if(e.getCause() instanceof TxnAbortedException) {
         txnId = 0;
-        writeId = -1;
+        statementId = -1;
       }
       throw e;
     }
   }
 
   /**
-   * Watermark to include in error msgs and logs
-   * @param queryPlan
-   * @return
-   */
-  private static String getQueryIdWaterMark(QueryPlan queryPlan) {
-    return "queryId=" + queryPlan.getQueryId();
-  }
-
-  private void markExplicitTransaction(QueryPlan queryPlan) throws LockException {
-    isExplicitTransaction = true;
-    if(++startTransactionCount > 1) {
-      throw new LockException(null, ErrorMsg.OP_NOT_ALLOWED_IN_TXN, queryPlan.getOperationName(),
-        JavaUtils.txnIdToString(getCurrentTxnId()), queryPlan.getQueryId());
-    }
-
-  }
-  /**
-   * Ensures that the current SQL statement is appropriate for the current state of the
-   * Transaction Manager (e.g. can call commit unless you called start transaction)
-   * 
-   * Note that support for multi-statement txns is a work-in-progress so it's only supported in
-   * HiveConf#HIVE_IN_TEST/HiveConf#TEZ_HIVE_IN_TEST.
-   * @param queryPlan
-   * @throws LockException
-   */
-  private void verifyState(QueryPlan queryPlan) throws LockException {
-    if(!isTxnOpen()) {
-      throw new LockException("No transaction context for operation: " + queryPlan.getOperationName() + 
-        " for " + getQueryIdWaterMark(queryPlan));
-    }
-    if(queryPlan.getOperation() == null) {
-      throw new IllegalStateException("Unkown HiverOperation for " + getQueryIdWaterMark(queryPlan));
-    }
-    numStatements++;
-    switch (queryPlan.getOperation()) {
-      case START_TRANSACTION:
-        markExplicitTransaction(queryPlan);
-        break;
-      case COMMIT:
-      case ROLLBACK:
-        if(!isTxnOpen()) {
-          throw new LockException(null, ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN, queryPlan.getOperationName());
-        }
-        if(!isExplicitTransaction) {
-          throw new LockException(null, ErrorMsg.OP_NOT_ALLOWED_IN_IMPLICIT_TXN, queryPlan.getOperationName());
-        }
-        break;
-      default:
-        if(!queryPlan.getOperation().isAllowedInTransaction() && isExplicitTransaction) {
-          //for example, drop table in an explicit txn is not allowed
-          //in some cases this requires looking at more than just the operation
-          //for example HiveOperation.LOAD - OK if target is MM table but not OK if non-acid table
-          throw new LockException(null, ErrorMsg.OP_NOT_ALLOWED_IN_TXN, queryPlan.getOperationName(),
-            JavaUtils.txnIdToString(getCurrentTxnId()), queryPlan.getQueryId());
-        }
-    }
-    /*
-    Should we allow writing to non-transactional tables in an explicit transaction?  The user may
-    issue ROLLBACK but these tables won't rollback.
-    Can do this by checking ReadEntity/WriteEntity to determine whether it's reading/writing
-    any non acid and raise an appropriate error
-    * Driver.acidSinks and Driver.acidInQuery can be used if any acid is in the query*/
-  }
-  /**
-   * Normally client should call {@link #acquireLocks(org.apache.hadoop.hive.ql.QueryPlan, org.apache.hadoop.hive.ql.Context, String)}
+   * This is for testing only.  Normally client should call {@link #acquireLocks(org.apache.hadoop.hive.ql.QueryPlan, org.apache.hadoop.hive.ql.Context, String)}
    * @param isBlocking if false, the method will return immediately; thus the locks may be in LockState.WAITING
    * @return null if no locks were needed
    */
-  @VisibleForTesting
   LockState acquireLocks(QueryPlan plan, Context ctx, String username, boolean isBlocking) throws LockException {
     init();
-    // Make sure we've built the lock manager
+        // Make sure we've built the lock manager
     getLockManager();
-    verifyState(plan);
+
     boolean atLeastOneLock = false;
     queryId = plan.getQueryId();
-    switch (plan.getOperation()) {
-      case SET_AUTOCOMMIT:
-        /**This is here for documentation purposes.  This TM doesn't support this - only has one
-        * mode of operation documented at {@link DbTxnManager#isExplicitTransaction}*/
-        return  null;
-    }
 
     LockRequestBuilder rqstBuilder = new LockRequestBuilder(queryId);
     //link queryId to txnId
@@ -370,8 +240,8 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
           // This is a file or something we don't hold locks for.
           continue;
       }
-      if(t != null) {
-        compBuilder.setIsAcid(AcidUtils.isFullAcidTable(t));
+      if(t != null && AcidUtils.isFullAcidTable(t)) {
+        compBuilder.setIsAcid(true);
       }
       LockComponent comp = compBuilder.build();
       LOG.debug("Adding lock component to lock request " + comp.toString());
@@ -392,33 +262,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
       }
       LockComponentBuilder compBuilder = new LockComponentBuilder();
       Table t = null;
-      switch (output.getType()) {
-        case DATABASE:
-          compBuilder.setDbName(output.getDatabase().getName());
-          break;
-
-        case TABLE:
-        case DUMMYPARTITION:   // in case of dynamic partitioning lock the table
-          t = output.getTable();
-          compBuilder.setDbName(t.getDbName());
-          compBuilder.setTableName(t.getTableName());
-          break;
-
-        case PARTITION:
-          compBuilder.setPartitionName(output.getPartition().getName());
-          t = output.getPartition().getTable();
-          compBuilder.setDbName(t.getDbName());
-          compBuilder.setTableName(t.getTableName());
-          break;
-
-        default:
-          // This is a file or something we don't hold locks for.
-          continue;
-      }
       switch (output.getWriteType()) {
-        /* base this on HiveOperation instead?  this and DDL_NO_LOCK is peppered all over the code...
-         Seems much cleaner if each stmt is identified as a particular HiveOperation (which I'd think
-         makes sense everywhere).  This however would be problematic for merge...*/
         case DDL_EXCLUSIVE:
         case INSERT_OVERWRITE:
           compBuilder.setExclusive();
@@ -426,9 +270,10 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
           break;
 
         case INSERT:
-          assert t != null;
+          t = getTable(output);
           if(AcidUtils.isFullAcidTable(t)) {
             compBuilder.setShared();
+            compBuilder.setIsAcid(true);
           }
           else {
             if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) {
@@ -436,6 +281,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
             } else {  // this is backward compatible for non-ACID resources, w/o ACID semantics
               compBuilder.setShared();
             }
+            compBuilder.setIsAcid(false);
           }
           compBuilder.setOperationType(DataOperationType.INSERT);
           break;
@@ -447,10 +293,12 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
         case UPDATE:
           compBuilder.setSemiShared();
           compBuilder.setOperationType(DataOperationType.UPDATE);
+          t = getTable(output);
           break;
         case DELETE:
           compBuilder.setSemiShared();
           compBuilder.setOperationType(DataOperationType.DELETE);
+          t = getTable(output);
           break;
 
         case DDL_NO_LOCK:
@@ -459,11 +307,34 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
         default:
           throw new RuntimeException("Unknown write type " +
               output.getWriteType().toString());
+
       }
-      if(t != null) {
-        compBuilder.setIsAcid(AcidUtils.isFullAcidTable(t));
-      }
+      switch (output.getType()) {
+        case DATABASE:
+          compBuilder.setDbName(output.getDatabase().getName());
+          break;
 
+        case TABLE:
+        case DUMMYPARTITION:   // in case of dynamic partitioning lock the table
+          t = output.getTable();
+          compBuilder.setDbName(t.getDbName());
+          compBuilder.setTableName(t.getTableName());
+          break;
+
+        case PARTITION:
+          compBuilder.setPartitionName(output.getPartition().getName());
+          t = output.getPartition().getTable();
+          compBuilder.setDbName(t.getDbName());
+          compBuilder.setTableName(t.getTableName());
+          break;
+
+        default:
+          // This is a file or something we don't hold locks for.
+          continue;
+      }
+      if(t != null && AcidUtils.isFullAcidTable(t)) {
+        compBuilder.setIsAcid(true);
+      }
       compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());
       LockComponent comp = compBuilder.build();
       LOG.debug("Adding lock component to lock request " + comp.toString());
@@ -534,8 +405,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
           e);
     } finally {
       txnId = 0;
-      writeId = -1;
-      numStatements = 0;
+      statementId = -1;
     }
   }
 
@@ -559,8 +429,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
           e);
     } finally {
       txnId = 0;
-      writeId = -1;
-      numStatements = 0;
+      statementId = -1;
     }
   }
 
@@ -687,26 +556,6 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
   public boolean supportsExplicitLock() {
     return false;
   }
-  @Override
-  public int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException {
-    super.lockTable(db, lockTbl);
-    throw new UnsupportedOperationException();
-  }
-  @Override
-  public int unlockTable(Hive hiveDB, UnlockTableDesc unlockTbl) throws HiveException {
-    super.unlockTable(hiveDB, unlockTbl);
-    throw new UnsupportedOperationException();
-  }
-  @Override
-  public int lockDatabase(Hive hiveDB, LockDatabaseDesc lockDb) throws HiveException {
-    super.lockDatabase(hiveDB, lockDb);
-    throw new UnsupportedOperationException();
-  }
-  @Override
-  public int unlockDatabase(Hive hiveDB, UnlockDatabaseDesc unlockDb) throws HiveException {
-    super.unlockDatabase(hiveDB, unlockDb);
-    throw new UnsupportedOperationException();
-  }
 
   @Override
   public boolean useNewShowLocksFormat() {
@@ -717,44 +566,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
   public boolean supportsAcid() {
     return true;
   }
-  /**
-   * In an explicit txn start_transaction is the 1st statement and we record the snapshot at the
-   * start of the txn for Snapshot Isolation.  For Read Committed (not supported yet) we'd record
-   * it before executing each statement (but after lock acquisition if using lock based concurrency
-   * control).
-   * For implicit txn, the stmt that triggered/started the txn is the first statement
-   */
-  @Override
-  public boolean recordSnapshot(QueryPlan queryPlan) {
-    assert isTxnOpen();
-    assert numStatements > 0 : "was acquireLocks() called already?";
-    if(queryPlan.getOperation() == HiveOperation.START_TRANSACTION) {
-      //here if start of explicit txn
-      assert isExplicitTransaction;
-      assert numStatements == 1;
-      return true;
-    }
-    else if(!isExplicitTransaction) {
-      assert numStatements == 1 : "numStatements=" + numStatements + " in implicit txn";
-      if (queryPlan.hasAcidResourcesInQuery()) {
-        //1st and only stmt in implicit txn and uses acid resource
-        return true;
-      }
-    }
-    return false;
-  }
-  @Override
-  public boolean isImplicitTransactionOpen() {
-    if(!isTxnOpen()) {
-      //some commands like "show databases" don't start implicit transactions
-      return false;
-    }
-    if(!isExplicitTransaction) {
-      assert numStatements == 1 : "numStatements=" + numStatements;
-      return true;
-    }
-    return false;
-  }
+
   @Override
   protected void destruct() {
     try {
@@ -814,7 +626,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
   @Override
   public int getWriteIdAndIncrement() {
     assert isTxnOpen();
-    return writeId++;
+    return statementId++;
   }
 
   private static long getHeartbeatInterval(Configuration conf) throws LockException {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
index 24df25b..53ee9c8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
@@ -301,8 +301,7 @@ class DummyTxnManager extends HiveTxnManagerImpl {
       new HiveLockObject.HiveLockObjectData(plan.getQueryId(),
                              String.valueOf(System.currentTimeMillis()),
                              "IMPLICIT",
-                             plan.getQueryStr(),
-                             conf);
+                             plan.getQueryStr());
 
     if (db != null) {
       locks.add(new HiveLockObj(new HiveLockObject(db.getName(), lockData),

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
index a514339..fff03df 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
@@ -23,7 +23,6 @@ import java.util.Map;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.hive.common.StringInternUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.metadata.DummyPartition;
 import org.apache.hadoop.hive.ql.metadata.Hive;
@@ -49,23 +48,16 @@ public class HiveLockObject {
      * Note: The parameters are used to uniquely identify a HiveLockObject. 
      * The parameters will be stripped off any ':' characters in order not 
      * to interfere with the way the data is serialized (':' delimited string).
-     * The query string might be truncated depending on HIVE_LOCK_QUERY_STRING_MAX_LENGTH
-     * @param queryId The query identifier will be added to the object without change
-     * @param lockTime The lock time  will be added to the object without change
-     * @param lockMode The lock mode  will be added to the object without change
-     * @param queryStr The query string might be truncated based on
-     *     HIVE_LOCK_QUERY_STRING_MAX_LENGTH conf variable
-     * @param conf The hive configuration based on which we decide if we should truncate the query
-     *     string or not
      */
-    public HiveLockObjectData(String queryId, String lockTime, String lockMode, String queryStr,
-        HiveConf conf) {
+    public HiveLockObjectData(String queryId,
+        String lockTime,
+        String lockMode,
+        String queryStr) {
       this.queryId = removeDelimiter(queryId);
       this.lockTime = StringInternUtils.internIfNotNull(removeDelimiter(lockTime));
       this.lockMode = removeDelimiter(lockMode);
       this.queryStr = StringInternUtils.internIfNotNull(
-          queryStr == null ? null : StringUtils.substring(removeDelimiter(queryStr.trim()), 0,
-              conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_QUERY_STRING_MAX_LENGTH)));
+          removeDelimiter(queryStr == null ? null : queryStr.trim()));
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
index b24351c..187a658 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
@@ -72,7 +72,7 @@ public interface HiveTxnManager {
 
   /**
    * Acquire all of the locks needed by a query.  If used with a query that
-   * requires transactions, this should be called after {@link #openTxn(Context, String)}.
+   * requires transactions, this should be called after {@link #openTxn(String)}.
    * A list of acquired locks will be stored in the
    * {@link org.apache.hadoop.hive.ql.Context} object and can be retrieved
    * via {@link org.apache.hadoop.hive.ql.Context#getHiveLocks}.
@@ -208,13 +208,17 @@ public interface HiveTxnManager {
   boolean supportsAcid();
 
   /**
-   * For resources that support MVCC, the state of the DB must be recorded for the duration of the
-   * operation/transaction.  Returns {@code true} if current statment needs to do this.
+   * This behaves exactly as
+   * https://docs.oracle.com/javase/6/docs/api/java/sql/Connection.html#setAutoCommit(boolean)
+   */
+  void setAutoCommit(boolean autoCommit) throws LockException;
+
+  /**
+   * This behaves exactly as
+   * https://docs.oracle.com/javase/6/docs/api/java/sql/Connection.html#getAutoCommit()
    */
-  boolean recordSnapshot(QueryPlan queryPlan);
+  boolean getAutoCommit();
 
-  boolean isImplicitTransactionOpen();
-  
   boolean isTxnOpen();
   /**
    * if {@code isTxnOpen()}, returns the currently active transaction ID

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
index 8dbbf87..a371a5a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 abstract class HiveTxnManagerImpl implements HiveTxnManager {
 
   protected HiveConf conf;
+  private boolean isAutoCommit = true;//true by default; matches JDBC spec
 
   void setHiveConf(HiveConf c) {
     conf = c;
@@ -67,6 +68,16 @@ abstract class HiveTxnManagerImpl implements HiveTxnManager {
     destruct();
   }
   @Override
+  public void setAutoCommit(boolean autoCommit) throws LockException {
+    isAutoCommit = autoCommit;
+  }
+
+  @Override
+  public boolean getAutoCommit() {
+    return isAutoCommit;
+  }
+
+  @Override
   public int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException {
     HiveLockManager lockMgr = getAndCheckLockManager();
 
@@ -82,8 +93,7 @@ abstract class HiveTxnManagerImpl implements HiveTxnManager {
         new HiveLockObjectData(lockTbl.getQueryId(),
             String.valueOf(System.currentTimeMillis()),
             "EXPLICIT",
-            lockTbl.getQueryStr(),
-            conf);
+            lockTbl.getQueryStr());
 
     if (partSpec == null) {
       HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, true);
@@ -141,7 +151,7 @@ abstract class HiveTxnManagerImpl implements HiveTxnManager {
     HiveLockObjectData lockData =
         new HiveLockObjectData(lockDb.getQueryId(),
             String.valueOf(System.currentTimeMillis()),
-            "EXPLICIT", lockDb.getQueryStr(), conf);
+            "EXPLICIT", lockDb.getQueryStr());
 
     HiveLock lck = lockMgr.lock(new HiveLockObject(dbObj.getName(), lockData), mode, true);
     if (lck == null) {
@@ -192,13 +202,4 @@ abstract class HiveTxnManagerImpl implements HiveTxnManager {
 
     return lockMgr;
   }
-  @Override
-  public boolean recordSnapshot(QueryPlan queryPlan) {
-    return false;
-  }
-  @Override
-  public boolean isImplicitTransactionOpen() {
-    return true;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
index 9b46ae7..c2a4806 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
@@ -285,10 +285,8 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
     int tryNum = 0;
     ZooKeeperHiveLock ret = null;
     Set<String> conflictingLocks = new HashSet<String>();
-    Exception lastException = null;
 
     do {
-      lastException = null;
       tryNum++;
       try {
         if (tryNum > 1) {
@@ -300,22 +298,26 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
           break;
         }
       } catch (Exception e1) {
-        lastException = e1;
         if (e1 instanceof KeeperException) {
           KeeperException e = (KeeperException) e1;
           switch (e.code()) {
           case CONNECTIONLOSS:
           case OPERATIONTIMEOUT:
-          case NONODE:
-          case NODEEXISTS:
             LOG.debug("Possibly transient ZooKeeper exception: ", e);
-            break;
+            continue;
           default:
             LOG.error("Serious Zookeeper exception: ", e);
             break;
           }
-        } else {
-          LOG.error("Other unexpected exception: ", e1);
+        }
+        if (tryNum >= numRetriesForLock) {
+          console.printError("Unable to acquire " + key.getData().getLockMode()
+              + ", " + mode + " lock " + key.getDisplayName() + " after "
+              + tryNum + " attempts.");
+          LOG.error("Exceeds maximum retries with errors: ", e1);
+          printConflictingLocks(key,mode,conflictingLocks);
+          conflictingLocks.clear();
+          throw new LockException(e1);
         }
       }
     } while (tryNum < numRetriesForLock);
@@ -325,11 +327,8 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
           + ", " + mode + " lock " + key.getDisplayName() + " after "
           + tryNum + " attempts.");
       printConflictingLocks(key,mode,conflictingLocks);
-      if (lastException != null) {
-        LOG.error("Exceeds maximum retries with errors: ", lastException);
-        throw new LockException(lastException);
-      }
     }
+    conflictingLocks.clear();
     return ret;
   }
 
@@ -351,19 +350,6 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
     }
   }
 
-  /**
-   * Creates a primitive lock object on ZooKeeper.
-   * @param key The lock data
-   * @param mode The lock mode (HiveLockMode - EXCLUSIVE/SHARED/SEMI_SHARED)
-   * @param keepAlive If true creating PERSISTENT ZooKeeper locks, otherwise EPHEMERAL ZooKeeper
-   *                  locks
-   * @param parentCreated If we expect, that the parent is already created then true, otherwise
-   *                      we will try to create the parents as well
-   * @param conflictingLocks The set where we should collect the conflicting locks when
-   *                         the logging level is set to DEBUG
-   * @return The created ZooKeeperHiveLock object, null if there was a conflicting lock
-   * @throws Exception If there was an unexpected Exception
-   */
   private ZooKeeperHiveLock lockPrimitive(HiveLockObject key,
       HiveLockMode mode, boolean keepAlive, boolean parentCreated,
       Set<String> conflictingLocks)
@@ -404,7 +390,7 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
     int seqNo = getSequenceNumber(res, getLockName(lastName, mode));
     if (seqNo == -1) {
       curatorFramework.delete().forPath(res);
-      throw new LockException("The created node does not contain a sequence number: " + res);
+      return null;
     }
 
     List<String> children = curatorFramework.getChildren().forPath(lastName);
@@ -598,6 +584,7 @@ public class ZooKeeperHiveLockManager implements HiveLockManager {
 
   /**
    * @param conf        Hive configuration
+   * @param zkpClient   The ZooKeeper client
    * @param key         The object to be compared against - if key is null, then get all locks
    **/
   private static List<HiveLock> getLocks(HiveConf conf,

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java b/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java
deleted file mode 100644
index 64ce100..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.log;
-
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.hive.common.LogUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.log.PerfLogger;
-import org.apache.hadoop.hive.ql.session.OperationLog;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.core.LogEvent;
-import org.apache.logging.log4j.core.LoggerContext;
-import org.apache.logging.log4j.core.appender.RandomAccessFileAppender;
-import org.apache.logging.log4j.core.appender.routing.Route;
-import org.apache.logging.log4j.core.appender.routing.Routes;
-import org.apache.logging.log4j.core.appender.routing.RoutingAppender;
-import org.apache.logging.log4j.core.config.Configuration;
-import org.apache.logging.log4j.core.config.LoggerConfig;
-import org.apache.logging.log4j.core.config.Node;
-import org.apache.logging.log4j.core.config.plugins.Plugin;
-import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
-import org.apache.logging.log4j.core.config.plugins.PluginFactory;
-import org.apache.logging.log4j.core.config.plugins.processor.PluginEntry;
-import org.apache.logging.log4j.core.config.plugins.util.PluginType;
-import org.apache.logging.log4j.core.filter.AbstractFilter;
-import org.apache.logging.log4j.core.layout.PatternLayout;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-
-/**
- * Divert appender to redirect operation logs to separate files.
- */
-public class LogDivertAppender {
-  private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(LogDivertAppender.class.getName());
-  public static final String verboseLayout = "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n";
-  public static final String nonVerboseLayout = "%-5p : %m%n";
-
-  /**
-   * A log filter that filters messages coming from the logger with the given names.
-   * It be used as a white list filter or a black list filter.
-   * We apply black list filter on the Loggers used by the log diversion stuff, so that
-   * they don't generate more logs for themselves when they process logs.
-   * White list filter is used for less verbose log collection
-   */
-  @Plugin(name = "NameFilter", category = "Core", elementType="filter", printObject = true)
-  private static class NameFilter extends AbstractFilter {
-    private Pattern namePattern;
-    private OperationLog.LoggingLevel loggingMode;
-
-    /* Patterns that are excluded in verbose logging level.
-     * Filter out messages coming from log processing classes, or we'll run an infinite loop.
-     */
-    private static final Pattern verboseExcludeNamePattern = Pattern.compile(Joiner.on("|").
-        join(new String[]{LOG.getName(), OperationLog.class.getName()}));
-
-    /* Patterns that are included in execution logging level.
-     * In execution mode, show only select logger messages.
-     */
-    private static final Pattern executionIncludeNamePattern = Pattern.compile(Joiner.on("|").
-        join(new String[]{"org.apache.hadoop.mapreduce.JobSubmitter",
-            "org.apache.hadoop.mapreduce.Job", "SessionState", Task.class.getName(),
-            Driver.class.getName(), "org.apache.hadoop.hive.ql.exec.spark.status.SparkJobMonitor"}));
-
-    /* Patterns that are included in performance logging level.
-     * In performance mode, show execution and performance logger messages.
-     */
-    private static final Pattern performanceIncludeNamePattern = Pattern.compile(
-        executionIncludeNamePattern.pattern() + "|" + PerfLogger.class.getName());
-
-    private void setCurrentNamePattern(OperationLog.LoggingLevel mode) {
-      if (mode == OperationLog.LoggingLevel.VERBOSE) {
-        this.namePattern = verboseExcludeNamePattern;
-      } else if (mode == OperationLog.LoggingLevel.EXECUTION) {
-        this.namePattern = executionIncludeNamePattern;
-      } else if (mode == OperationLog.LoggingLevel.PERFORMANCE) {
-        this.namePattern = performanceIncludeNamePattern;
-      }
-    }
-
-    public NameFilter(OperationLog.LoggingLevel loggingMode) {
-      this.loggingMode = loggingMode;
-      setCurrentNamePattern(loggingMode);
-    }
-
-    @Override
-    public Result filter(LogEvent event) {
-      boolean excludeMatches = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
-
-      String logLevel = event.getContextMap().get(LogUtils.OPERATIONLOG_LEVEL_KEY);
-      logLevel = logLevel == null ? "" : logLevel;
-      OperationLog.LoggingLevel currentLoggingMode = OperationLog.getLoggingLevel(logLevel);
-      // If logging is disabled, deny everything.
-      if (currentLoggingMode == OperationLog.LoggingLevel.NONE) {
-        return Result.DENY;
-      }
-      // Look at the current session's setting
-      // and set the pattern and excludeMatches accordingly.
-      if (currentLoggingMode != loggingMode) {
-        loggingMode = currentLoggingMode;
-        excludeMatches = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
-        setCurrentNamePattern(loggingMode);
-      }
-
-      boolean isMatch = namePattern.matcher(event.getLoggerName()).matches();
-
-      if (excludeMatches == isMatch) {
-        // Deny if this is black-list filter (excludeMatches = true) and it
-        // matched or if this is whitelist filter and it didn't match
-        return Result.DENY;
-      }
-
-      return Result.NEUTRAL;
-    }
-
-    @PluginFactory
-    public static NameFilter createFilter(
-        @PluginAttribute("loggingLevel") final String loggingLevel) {
-      // Name required for routing. Error out if it is not set.
-      Preconditions.checkNotNull(loggingLevel,
-          "loggingLevel must be specified for " + NameFilter.class.getName());
-
-      return new NameFilter(OperationLog.getLoggingLevel(loggingLevel));
-    }
-  }
-
-  /**
-   * Programmatically register a routing appender to Log4J configuration, which
-   * automatically writes the log of each query to an individual file.
-   * The equivilent property configuration is as follows:
-   * # queryId based routing file appender
-      appender.query-routing.type = Routing
-      appender.query-routing.name = query-routing
-      appender.query-routing.routes.type = Routes
-      appender.query-routing.routes.pattern = $${ctx:queryId}
-      # default route
-      appender.query-routing.routes.route-default.type = Route
-      appender.query-routing.routes.route-default.key = $${ctx:queryId}
-      appender.query-routing.routes.route-default.app.type = null
-      appender.query-routing.routes.route-default.app.name = Null
-      # queryId based route
-      appender.query-routing.routes.route-mdc.type = Route
-      appender.query-routing.routes.route-mdc.name = IrrelevantName-query-routing
-      appender.query-routing.routes.route-mdc.app.type = RandomAccessFile
-      appender.query-routing.routes.route-mdc.app.name = query-file-appender
-      appender.query-routing.routes.route-mdc.app.fileName = ${sys:hive.log.dir}/${ctx:sessionId}/${ctx:queryId}
-      appender.query-routing.routes.route-mdc.app.layout.type = PatternLayout
-      appender.query-routing.routes.route-mdc.app.layout.pattern = %d{ISO8601} %5p %c{2}: %m%n
-   * @param conf  the configuration for HiveServer2 instance
-   */
-  public static void registerRoutingAppender(org.apache.hadoop.conf.Configuration conf) {
-    String loggingLevel = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL);
-    OperationLog.LoggingLevel loggingMode = OperationLog.getLoggingLevel(loggingLevel);
-    String layout = loggingMode == OperationLog.LoggingLevel.VERBOSE ? verboseLayout : nonVerboseLayout;
-    String logLocation = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION);
-
-    // Create NullAppender
-    PluginEntry nullEntry = new PluginEntry();
-    nullEntry.setClassName(NullAppender.class.getName());
-    nullEntry.setKey("null");
-    nullEntry.setName("appender");
-    PluginType<NullAppender> nullChildType = new PluginType<NullAppender>(nullEntry, NullAppender.class, "appender");
-    Node nullChildNode = new Node(null, "Null", nullChildType);
-
-    // Create default route
-    PluginEntry defaultEntry = new PluginEntry();
-    defaultEntry.setClassName(Route.class.getName());
-    defaultEntry.setKey("route");
-    defaultEntry.setName("Route");
-    PluginType<Route> defaultType = new PluginType<Route>(defaultEntry, Route.class, "Route");
-    Node nullNode = new Node(null, "Route", defaultType);
-    nullNode.getChildren().add(nullChildNode);
-    Route defaultRoute = Route.createRoute(null, "${ctx:queryId}", nullNode);
-
-    // Create queryId based route
-    PluginEntry entry = new PluginEntry();
-    entry.setClassName(Route.class.getName());
-    entry.setKey("route");
-    entry.setName("Route");
-    PluginType<Route> type = new PluginType<Route>(entry, Route.class, "Route");
-    Node node = new Node(null, "Route", type);
-
-    PluginEntry childEntry = new PluginEntry();
-    childEntry.setClassName(RandomAccessFileAppender.class.getName());
-    childEntry.setKey("randomaccessfile");
-    childEntry.setName("appender");
-    PluginType<RandomAccessFileAppender> childType = new PluginType<RandomAccessFileAppender>(childEntry, RandomAccessFileAppender.class, "appender");
-    Node childNode = new Node(node, "RandomAccessFile", childType);
-    childNode.getAttributes().put("name", "query-file-appender");
-    childNode.getAttributes().put("fileName", logLocation + "/${ctx:sessionId}/${ctx:queryId}");
-    node.getChildren().add(childNode);
-
-    PluginEntry filterEntry = new PluginEntry();
-    filterEntry.setClassName(NameFilter.class.getName());
-    filterEntry.setKey("namefilter");
-    filterEntry.setName("namefilter");
-    PluginType<NameFilter> filterType = new PluginType<NameFilter>(filterEntry, NameFilter.class, "filter");
-    Node filterNode = new Node(childNode, "NameFilter", filterType);
-    filterNode.getAttributes().put("loggingLevel", loggingMode.name());
-    childNode.getChildren().add(filterNode);
-
-    PluginEntry layoutEntry = new PluginEntry();
-    layoutEntry.setClassName(PatternLayout.class.getName());
-    layoutEntry.setKey("patternlayout");
-    layoutEntry.setName("layout");
-    PluginType<PatternLayout> layoutType = new PluginType<PatternLayout>(layoutEntry, PatternLayout.class, "layout");
-    Node layoutNode = new Node(childNode, "PatternLayout", layoutType);
-    layoutNode.getAttributes().put("pattern", layout);
-    childNode.getChildren().add(layoutNode);
-
-    Route mdcRoute = Route.createRoute(null, null, node);
-    Routes routes = Routes.createRoutes("${ctx:queryId}", defaultRoute, mdcRoute);
-
-    LoggerContext context = (LoggerContext) LogManager.getContext(false);
-    Configuration configuration = context.getConfiguration();
-
-    RoutingAppender routingAppender = RoutingAppender.createAppender("query-routing",
-        "true",
-        routes,
-        configuration,
-        null,
-        null,
-        null);
-
-    LoggerConfig loggerConfig = configuration.getRootLogger();
-    loggerConfig.addAppender(routingAppender, null, null);
-    context.updateLoggers();
-    routingAppender.start();
-  }
-}


[09/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 3230c61..0b615cd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -37,13 +37,10 @@ import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantLock;
 
-import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Iterables;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
-
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.common.metrics.common.Metrics;
@@ -74,9 +71,12 @@ import org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext;
 import org.apache.hadoop.hive.ql.hooks.Hook;
 import org.apache.hadoop.hive.ql.hooks.HookContext;
 import org.apache.hadoop.hive.ql.hooks.HookUtils;
-import org.apache.hadoop.hive.ql.hooks.HooksLoader;
+import org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook;
 import org.apache.hadoop.hive.ql.hooks.PostExecute;
 import org.apache.hadoop.hive.ql.hooks.PreExecute;
+import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook;
+import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext;
+import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContextImpl;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
@@ -101,7 +101,7 @@ import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContextImpl;
 import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.ParseException;
+import org.apache.hadoop.hive.ql.parse.ParseDriver;
 import org.apache.hadoop.hive.ql.parse.ParseUtils;
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
@@ -119,6 +119,8 @@ import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObje
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivObjectActionType;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext;
+import org.apache.hadoop.hive.ql.session.OperationLog;
+import org.apache.hadoop.hive.ql.session.OperationLog.LoggingLevel;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.hadoop.hive.serde2.ByteStream;
@@ -127,9 +129,7 @@ import org.apache.hadoop.mapred.ClusterStatus;
 import org.apache.hadoop.mapred.JobClient;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.MRJobConfig;
-
 import org.apache.hive.common.util.ShutdownHookManager;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -137,7 +137,6 @@ import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Sets;
 
-
 public class Driver implements CommandProcessor {
 
   static final private String CLASS_NAME = Driver.class.getName();
@@ -163,6 +162,11 @@ public class Driver implements CommandProcessor {
   private FetchTask fetchTask;
   List<HiveLock> hiveLocks = new ArrayList<HiveLock>();
 
+  // A list of FileSinkOperators writing in an ACID compliant manner
+  private Set<FileSinkDesc> acidSinks;
+  // whether any ACID table is involved in a query
+  private boolean acidInQuery;
+
   // A limit on the number of threads that can be launched
   private int maxthreads;
   private int tryCount = Integer.MAX_VALUE;
@@ -180,8 +184,7 @@ public class Driver implements CommandProcessor {
   private QueryState queryState;
 
   // Query hooks that execute before compilation and after execution
-  private QueryLifeTimeHookRunner queryLifeTimeHookRunner;
-  private final HooksLoader hooksLoader;
+  private List<QueryLifeTimeHook> queryHooks;
 
   public enum DriverState {
     INITIALIZED,
@@ -205,25 +208,6 @@ public class Driver implements CommandProcessor {
     // resource releases
     public final ReentrantLock stateLock = new ReentrantLock();
     public DriverState driverState = DriverState.INITIALIZED;
-    private static ThreadLocal<LockedDriverState> lds = new ThreadLocal<LockedDriverState>() {
-      @Override
-      protected LockedDriverState initialValue() {
-        return new LockedDriverState();
-      }
-    };
-
-    public static void setLockedDriverState(LockedDriverState lDrv) {
-      lds.set(lDrv);
-    }
-
-    public static LockedDriverState getLockedDriverState() {
-      return lds.get();
-    }
-
-    public static void removeLockedDriverState() {
-      if (lds != null)
-        lds.remove();
-    }
   }
 
   private boolean checkConcurrency() {
@@ -370,21 +354,11 @@ public class Driver implements CommandProcessor {
   }
 
   public Driver(QueryState queryState, String userName) {
-    this(queryState, userName, new HooksLoader(queryState.getConf()));
-  }
-
-  public Driver(HiveConf conf, HooksLoader hooksLoader) {
-    this(new QueryState(conf), null, hooksLoader);
-  }
-
-  private Driver(QueryState queryState, String userName, HooksLoader hooksLoader) {
     this.queryState = queryState;
     this.conf = queryState.getConf();
     isParallelEnabled = (conf != null)
         && HiveConf.getBoolVar(conf, ConfVars.HIVE_SERVER2_PARALLEL_COMPILATION);
     this.userName = userName;
-    this.hooksLoader = hooksLoader;
-    this.queryLifeTimeHookRunner = new QueryLifeTimeHookRunner(conf, hooksLoader, console);
   }
 
   /**
@@ -412,7 +386,7 @@ public class Driver implements CommandProcessor {
   // deferClose indicates if the close/destroy should be deferred when the process has been
   // interrupted, it should be set to true if the compile is called within another method like
   // runInternal, which defers the close to the called in that method.
-  private int compile(String command, boolean resetTaskIds, boolean deferClose) {
+  public int compile(String command, boolean resetTaskIds, boolean deferClose) {
     PerfLogger perfLogger = SessionState.getPerfLogger(true);
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_RUN);
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.COMPILE);
@@ -452,8 +426,6 @@ public class Driver implements CommandProcessor {
       TaskFactory.resetId();
     }
 
-    LockedDriverState.setLockedDriverState(lDrvState);
-
     String queryId = conf.getVar(HiveConf.ConfVars.HIVEQUERYID);
 
     //save some info for webUI for use after plan is freed
@@ -466,8 +438,6 @@ public class Driver implements CommandProcessor {
 
     // Whether any error occurred during query compilation. Used for query lifetime hook.
     boolean compileError = false;
-    boolean parseError = false;
-
     try {
 
       // Initialize the transaction manager.  This must be done before analyze is called.
@@ -501,27 +471,26 @@ public class Driver implements CommandProcessor {
       ctx.setHDFSCleanup(true);
 
       perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARSE);
+      ASTNode tree = ParseUtils.parse(command, ctx);
+      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
 
       // Trigger query hook before compilation
-      queryLifeTimeHookRunner.runBeforeParseHook(command);
-
-      ASTNode tree;
-      try {
-        tree = ParseUtils.parse(command, ctx);
-      } catch (ParseException e) {
-        parseError = true;
-        throw e;
-      } finally {
-        queryLifeTimeHookRunner.runAfterParseHook(command, parseError);
+      queryHooks = loadQueryHooks();
+      if (queryHooks != null && !queryHooks.isEmpty()) {
+        QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl();
+        qhc.setHiveConf(conf);
+        qhc.setCommand(command);
+
+        for (QueryLifeTimeHook hook : queryHooks) {
+          hook.beforeCompile(qhc);
+        }
       }
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
-
-      queryLifeTimeHookRunner.runBeforeCompileHook(command);
 
       perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
       BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(queryState, tree);
       List<HiveSemanticAnalyzerHook> saHooks =
-          hooksLoader.getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, console);
+          getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK,
+              HiveSemanticAnalyzerHook.class);
 
       // Flush the metastore cache.  This assures that we don't pick up objects from a previous
       // query running in this same thread.  This has to be done after we get our semantic
@@ -529,15 +498,6 @@ public class Driver implements CommandProcessor {
       // because at that point we need access to the objects.
       Hive.get().getMSC().flushCache();
 
-      if(checkConcurrency() && startImplicitTxn(txnManager)) {
-        String userFromUGI = getUserFromUGI();
-        if (!txnManager.isTxnOpen()) {
-          if(userFromUGI == null) {
-            return 10;
-          }
-          long txnid = txnManager.openTxn(ctx, userFromUGI);
-        }
-      }
       // Do semantic analysis and plan generation
       if (saHooks != null && !saHooks.isEmpty()) {
         HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
@@ -556,10 +516,15 @@ public class Driver implements CommandProcessor {
       } else {
         sem.analyze(tree, ctx);
       }
+      // Record any ACID compliant FileSinkOperators we saw so we can add our transaction ID to
+      // them later.
+      acidSinks = sem.getAcidFileSinks();
+
       LOG.info("Semantic Analysis Completed");
 
       // validate the plan
       sem.validate();
+      acidInQuery = sem.hasAcidInQuery();
       perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE);
 
       if (isInterrupted()) {
@@ -602,8 +567,10 @@ public class Driver implements CommandProcessor {
       if (conf.getBoolVar(ConfVars.HIVE_LOG_EXPLAIN_OUTPUT)) {
         String explainOutput = getExplainOutput(sem, plan, tree);
         if (explainOutput != null) {
-          LOG.info("EXPLAIN output for queryid " + queryId + " : "
-            + explainOutput);
+          if (conf.getBoolVar(ConfVars.HIVE_LOG_EXPLAIN_OUTPUT)) {
+            LOG.info("EXPLAIN output for queryid " + queryId + " : "
+              + explainOutput);
+          }
           if (conf.isWebUiQueryInfoCacheEnabled()) {
             queryDisplay.setExplainPlan(explainOutput);
           }
@@ -642,12 +609,17 @@ public class Driver implements CommandProcessor {
     } finally {
       // Trigger post compilation hook. Note that if the compilation fails here then
       // before/after execution hook will never be executed.
-      if (!parseError) {
-        try {
-          queryLifeTimeHookRunner.runAfterCompilationHook(command, compileError);
-        } catch (Exception e) {
-          LOG.warn("Failed when invoking query after-compilation hook.", e);
+      try {
+        if (queryHooks != null && !queryHooks.isEmpty()) {
+          QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl();
+          qhc.setHiveConf(conf);
+          qhc.setCommand(command);
+          for (QueryLifeTimeHook hook : queryHooks) {
+            hook.afterCompile(qhc, compileError);
+          }
         }
+      } catch (Exception e) {
+        LOG.warn("Failed when invoking query after-compilation hook.", e);
       }
 
       double duration = perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.COMPILE)/1000.00;
@@ -677,55 +649,11 @@ public class Driver implements CommandProcessor {
     }
   }
 
-  private boolean startImplicitTxn(HiveTxnManager txnManager) throws LockException {
-    boolean shouldOpenImplicitTxn = !ctx.isExplainPlan();
-    //this is dumb. HiveOperation is not always set. see HIVE-16447/HIVE-16443
-    switch (queryState.getHiveOperation() == null ? HiveOperation.QUERY : queryState.getHiveOperation()) {
-      case COMMIT:
-      case ROLLBACK:
-        if(!txnManager.isTxnOpen()) {
-          throw new LockException(null, ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN, queryState.getHiveOperation().getOperationName());
-        }
-      case SWITCHDATABASE:
-      case SET_AUTOCOMMIT:
-        /**
-         * autocommit is here for completeness.  TM doesn't use it.  If we want to support JDBC
-         * semantics (or any other definition of autocommit) it should be done at session level.
-         */
-      case SHOWDATABASES:
-      case SHOWTABLES:
-      case SHOWCOLUMNS:
-      case SHOWFUNCTIONS:
-      case SHOWINDEXES:
-      case SHOWPARTITIONS:
-      case SHOWLOCKS:
-      case SHOWVIEWS:
-      case SHOW_ROLES:
-      case SHOW_ROLE_PRINCIPALS:
-      case SHOW_COMPACTIONS:
-      case SHOW_TRANSACTIONS:
-      case ABORT_TRANSACTIONS:
-        shouldOpenImplicitTxn = false;
-        //this implies that no locks are needed for such a command
-    }
-    return shouldOpenImplicitTxn;
-  }
-  private int handleInterruption(String msg) {
-    return handleInterruptionWithHook(msg, null, null);
-  }
 
-  private int handleInterruptionWithHook(String msg, HookContext hookContext,
-      PerfLogger perfLogger) {
+  private int handleInterruption(String msg) {
     SQLState = "HY008";  //SQLState for cancel operation
     errorMessage = "FAILED: command has been interrupted: " + msg;
     console.printError(errorMessage);
-    if (hookContext != null) {
-      try {
-        invokeFailureHooks(perfLogger, hookContext, errorMessage, null);
-      } catch (Exception e) {
-        LOG.warn("Caught exception attempting to invoke Failure Hooks", e);
-      }
-    }
     return 1000;
   }
 
@@ -742,6 +670,19 @@ public class Driver implements CommandProcessor {
     }
   }
 
+  private List<QueryLifeTimeHook> loadQueryHooks() throws Exception {
+    List<QueryLifeTimeHook> hooks = new ArrayList<>();
+
+    if (conf.getBoolVar(ConfVars.HIVE_SERVER2_METRICS_ENABLED)) {
+      hooks.add(new MetricsQueryLifeTimeHook());
+    }
+    List<QueryLifeTimeHook> propertyDefinedHoooks = getHooks(ConfVars.HIVE_QUERY_LIFETIME_HOOKS, QueryLifeTimeHook.class);
+    if (propertyDefinedHoooks != null) {
+      Iterables.addAll(hooks, propertyDefinedHoooks);
+    }
+    return hooks;
+  }
+
   private ImmutableMap<String, Long> dumpMetaCallTimingWithoutEx(String phase) {
     try {
       return Hive.get().dumpAndClearMetaCallTiming(phase);
@@ -1123,17 +1064,8 @@ public class Driver implements CommandProcessor {
   // Write the current set of valid transactions into the conf file so that it can be read by
   // the input format.
   private void recordValidTxns() throws LockException {
-    ValidTxnList oldList = null;
-    String s = conf.get(ValidTxnList.VALID_TXNS_KEY);
-    if(s != null && s.length() > 0) {
-      oldList = new ValidReadTxnList(s);
-    }
     HiveTxnManager txnMgr = SessionState.get().getTxnMgr();
     ValidTxnList txns = txnMgr.getValidTxns();
-    if(oldList != null) {
-      throw new IllegalStateException("calling recordValidTxn() more than once in the same " +
-        JavaUtils.txnIdToString(txnMgr.getCurrentTxnId()));
-    }
     String txnStr = txns.toString();
     conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr);
     if(plan.getFetchTask() != null) {
@@ -1147,61 +1079,79 @@ public class Driver implements CommandProcessor {
     LOG.debug("Encoding valid txns info " + txnStr + " txnid:" + txnMgr.getCurrentTxnId());
   }
 
-  private String getUserFromUGI() {
-    // Don't use the userName member, as it may or may not have been set.  Get the value from
-    // conf, which calls into getUGI to figure out who the process is running as.
-    try {
-      return conf.getUser();
-    } catch (IOException e) {
-      errorMessage = "FAILED: Error in determining user while acquiring locks: " + e.getMessage();
-      SQLState = ErrorMsg.findSQLState(e.getMessage());
-      downstreamError = e;
-      console.printError(errorMessage,
-        "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
-    }
-    return null;
-  }
   /**
    * Acquire read and write locks needed by the statement. The list of objects to be locked are
-   * obtained from the inputs and outputs populated by the compiler.  Locking strategy depends on
-   * HiveTxnManager and HiveLockManager configured
+   * obtained from the inputs and outputs populated by the compiler. The lock acquisition scheme is
+   * pretty simple. If all the locks cannot be obtained, error out. Deadlock is avoided by making
+   * sure that the locks are lexicographically sorted.
    *
    * This method also records the list of valid transactions.  This must be done after any
-   * transactions have been opened.
+   * transactions have been opened and locks acquired.
+   * @param startTxnImplicitly in AC=false, the 1st DML starts a txn
    **/
-  private int acquireLocks() {
+  private int acquireLocksAndOpenTxn(boolean startTxnImplicitly) {
     PerfLogger perfLogger = SessionState.getPerfLogger();
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
 
     SessionState ss = SessionState.get();
     HiveTxnManager txnMgr = ss.getTxnMgr();
-    if(!txnMgr.isTxnOpen() && txnMgr.supportsAcid()) {
-      /*non acid txn managers don't support txns but fwd lock requests to lock managers
-        acid txn manager requires all locks to be associated with a txn so if we
-        end up here w/o an open txn it's because we are processing something like "use <database>
-        which by definition needs no locks*/
-      return 0;
+    if(startTxnImplicitly) {
+      assert !txnMgr.getAutoCommit();
     }
+
     try {
-      String userFromUGI = getUserFromUGI();
-      if(userFromUGI == null) {
+      // Don't use the userName member, as it may or may not have been set.  Get the value from
+      // conf, which calls into getUGI to figure out who the process is running as.
+      String userFromUGI;
+      try {
+        userFromUGI = conf.getUser();
+      } catch (IOException e) {
+        errorMessage = "FAILED: Error in determining user while acquiring locks: " + e.getMessage();
+        SQLState = ErrorMsg.findSQLState(e.getMessage());
+        downstreamError = e;
+        console.printError(errorMessage,
+            "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
         return 10;
       }
+
+      boolean initiatingTransaction = false;
+      boolean readOnlyQueryInAutoCommit = false;
+      if((txnMgr.getAutoCommit() && haveAcidWrite()) || plan.getOperation() == HiveOperation.START_TRANSACTION ||
+        (!txnMgr.getAutoCommit() && startTxnImplicitly)) {
+        if(txnMgr.isTxnOpen()) {
+          throw new RuntimeException("Already have an open transaction txnid:" + txnMgr.getCurrentTxnId());
+        }
+        // We are writing to tables in an ACID compliant way, so we need to open a transaction
+        txnMgr.openTxn(ctx, userFromUGI);
+        initiatingTransaction = true;
+      }
+      else {
+        readOnlyQueryInAutoCommit = txnMgr.getAutoCommit() && plan.getOperation() == HiveOperation.QUERY && !haveAcidWrite();
+      }
       // Set the transaction id in all of the acid file sinks
       if (haveAcidWrite()) {
-        for (FileSinkDesc desc : plan.getAcidSinks()) {
+        for (FileSinkDesc desc : acidSinks) {
           desc.setTransactionId(txnMgr.getCurrentTxnId());
           //it's possible to have > 1 FileSink writing to the same table/partition
           //e.g. Merge stmt, multi-insert stmt when mixing DP and SP writes
           desc.setStatementId(txnMgr.getWriteIdAndIncrement());
         }
       }
-      /*It's imperative that {@code acquireLocks()} is called for all commands so that 
-      HiveTxnManager can transition its state machine correctly*/
+      /*Note, we have to record snapshot after lock acquisition to prevent lost update problem
+      consider 2 concurrent "update table T set x = x + 1".  1st will get the locks and the
+      2nd will block until 1st one commits and only then lock in the snapshot, i.e. it will
+      see the changes made by 1st one.  This takes care of autoCommit=true case.
+      For multi-stmt txns this is not sufficient and will be managed via WriteSet tracking
+      in the lock manager.*/
       txnMgr.acquireLocks(plan, ctx, userFromUGI, lDrvState);
-      if(txnMgr.recordSnapshot(plan)) {
+      if(initiatingTransaction || (readOnlyQueryInAutoCommit && acidInQuery)) {
+        //For multi-stmt txns we should record the snapshot when txn starts but
+        // don't update it after that until txn completes.  Thus the check for {@code initiatingTransaction}
+        //For autoCommit=true, Read-only statements, txn is implicit, i.e. lock in the snapshot
+        //for each statement.
         recordValidTxns();
       }
+
       return 0;
     } catch (Exception e) {
       errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage();
@@ -1216,7 +1166,7 @@ public class Driver implements CommandProcessor {
   }
 
   private boolean haveAcidWrite() {
-    return !plan.getAcidSinks().isEmpty();
+    return acidSinks != null && !acidSinks.isEmpty();
   }
   /**
    * @param commit if there is an open transaction and if true, commit,
@@ -1224,11 +1174,11 @@ public class Driver implements CommandProcessor {
    * @param txnManager an optional existing transaction manager retrieved earlier from the session
    *
    **/
-  @VisibleForTesting
-  public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnManager)
+  private void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnManager)
       throws LockException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
+
     HiveTxnManager txnMgr;
     if (txnManager == null) {
       SessionState ss = SessionState.get();
@@ -1238,7 +1188,6 @@ public class Driver implements CommandProcessor {
     }
     // If we've opened a transaction we need to commit or rollback rather than explicitly
     // releasing the locks.
-    conf.unset(ValidTxnList.VALID_TXNS_KEY);
     if (txnMgr.isTxnOpen()) {
       if (commit) {
         if(conf.getBoolVar(ConfVars.HIVE_IN_TEST) && conf.getBoolVar(ConfVars.HIVETESTMODEROLLBACKTXN)) {
@@ -1360,20 +1309,16 @@ public class Driver implements CommandProcessor {
       metrics.incrementCounter(MetricsConstant.WAITING_COMPILE_OPS, 1);
     }
 
-    PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.WAIT_COMPILE);
     final ReentrantLock compileLock = tryAcquireCompileLock(isParallelEnabled,
       command);
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.WAIT_COMPILE);
-    if (metrics != null) {
-      metrics.decrementCounter(MetricsConstant.WAITING_COMPILE_OPS, 1);
-    }
-
     if (compileLock == null) {
       return ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode();
     }
 
     try {
+      if (metrics != null) {
+        metrics.decrementCounter(MetricsConstant.WAITING_COMPILE_OPS, 1);
+      }
       ret = compile(command, true, deferClose);
     } finally {
       compileLock.unlock();
@@ -1391,6 +1336,7 @@ public class Driver implements CommandProcessor {
     //Save compile-time PerfLogging for WebUI.
     //Execution-time Perf logs are done by either another thread's PerfLogger
     //or a reset PerfLogger.
+    PerfLogger perfLogger = SessionState.getPerfLogger();
     queryDisplay.setPerfLogStarts(QueryDisplay.Phase.COMPILATION, perfLogger.getStartTimes());
     queryDisplay.setPerfLogEnds(QueryDisplay.Phase.COMPILATION, perfLogger.getEndTimes());
     return ret;
@@ -1433,6 +1379,11 @@ public class Driver implements CommandProcessor {
       LOG.debug("Waiting to acquire compile lock: " + command);
     }
 
+    OperationLog ol = OperationLog.getCurrentOperationLog();
+    if (ol != null) {
+      ol.writeOperationLog(LoggingLevel.EXECUTION, "Waiting to acquire compile lock.\n");
+    }
+
     if (maxCompileLockWaitTime > 0) {
       try {
         if(!compileLock.tryLock(maxCompileLockWaitTime, TimeUnit.SECONDS)) {
@@ -1452,6 +1403,9 @@ public class Driver implements CommandProcessor {
     }
 
     LOG.debug(lockAcquiredMsg);
+    if (ol != null) {
+        ol.writeOperationLog(LoggingLevel.EXECUTION, lockAcquiredMsg + "\n");
+    }
     return compileLock;
   }
 
@@ -1460,8 +1414,6 @@ public class Driver implements CommandProcessor {
     errorMessage = null;
     SQLState = null;
     downstreamError = null;
-    LockedDriverState.setLockedDriverState(lDrvState);
-
     lDrvState.stateLock.lock();
     try {
       if (alreadyCompiled) {
@@ -1488,7 +1440,8 @@ public class Driver implements CommandProcessor {
       // Get all the driver run hooks and pre-execute them.
       List<HiveDriverRunHook> driverRunHooks;
       try {
-        driverRunHooks = hooksLoader.getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, console);
+        driverRunHooks = getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS,
+            HiveDriverRunHook.class);
         for (HiveDriverRunHook driverRunHook : driverRunHooks) {
             driverRunHook.preDriverRun(hookContext);
         }
@@ -1524,12 +1477,52 @@ public class Driver implements CommandProcessor {
       HiveTxnManager txnManager = SessionState.get().getTxnMgr();
       ctx.setHiveTxnManager(txnManager);
 
+      boolean startTxnImplicitly = false;
+      {
+        //this block ensures op makes sense in given context, e.g. COMMIT is valid only if txn is open
+        //DDL is not allowed in a txn, etc.
+        //an error in an open txn does a rollback of the txn
+        if (txnManager.isTxnOpen() && !plan.getOperation().isAllowedInTransaction()) {
+          assert !txnManager.getAutoCommit() : "didn't expect AC=true";
+          return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_IN_TXN, null,
+            plan.getOperationName(), Long.toString(txnManager.getCurrentTxnId())));
+        }
+        if(!txnManager.isTxnOpen() && plan.getOperation().isRequiresOpenTransaction()) {
+          return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN, null, plan.getOperationName()));
+        }
+        if(!txnManager.isTxnOpen() && plan.getOperation() == HiveOperation.QUERY && !txnManager.getAutoCommit()) {
+          //this effectively makes START TRANSACTION optional and supports JDBC setAutoCommit(false) semantics
+          //also, indirectly allows DDL to be executed outside a txn context
+          startTxnImplicitly = true;
+        }
+        if(txnManager.getAutoCommit() && plan.getOperation() == HiveOperation.START_TRANSACTION) {
+          return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_IN_AUTOCOMMIT, null, plan.getOperationName()));
+        }
+      }
+      if(plan.getOperation() == HiveOperation.SET_AUTOCOMMIT) {
+        try {
+          if(plan.getAutoCommitValue() && !txnManager.getAutoCommit()) {
+            /*here, if there is an open txn, we want to commit it; this behavior matches
+            * https://docs.oracle.com/javase/6/docs/api/java/sql/Connection.html#setAutoCommit(boolean)*/
+            releaseLocksAndCommitOrRollback(true, null);
+            txnManager.setAutoCommit(true);
+          }
+          else if(!plan.getAutoCommitValue() && txnManager.getAutoCommit()) {
+            txnManager.setAutoCommit(false);
+          }
+          else {/*didn't change autoCommit value - no-op*/}
+        }
+        catch(LockException e) {
+          return handleHiveException(e, 12);
+        }
+      }
+
       if (requiresLock()) {
         // a checkpoint to see if the thread is interrupted or not before an expensive operation
         if (isInterrupted()) {
           ret = handleInterruption("at acquiring the lock.");
         } else {
-          ret = acquireLocks();
+          ret = acquireLocksAndOpenTxn(startTxnImplicitly);
         }
         if (ret != 0) {
           return rollback(createProcessorResponse(ret));
@@ -1550,8 +1543,7 @@ public class Driver implements CommandProcessor {
 
       //if needRequireLock is false, the release here will do nothing because there is no lock
       try {
-        //since set autocommit starts an implicit txn, close it
-        if(txnManager.isImplicitTransactionOpen() || plan.getOperation() == HiveOperation.COMMIT) {
+        if(txnManager.getAutoCommit() || plan.getOperation() == HiveOperation.COMMIT) {
           releaseLocksAndCommitOrRollback(true, null);
         }
         else if(plan.getOperation() == HiveOperation.ROLLBACK) {
@@ -1720,16 +1712,37 @@ public class Driver implements CommandProcessor {
   private CommandProcessorResponse createProcessorResponse(int ret) {
     SessionState.getPerfLogger().cleanupPerfLogMetrics();
     queryDisplay.setErrorMessage(errorMessage);
-    if(downstreamError != null && downstreamError instanceof HiveException) {
-      ErrorMsg em = ((HiveException)downstreamError).getCanonicalErrorMsg();
-      if(em != null) {
-        return new CommandProcessorResponse(ret, errorMessage, SQLState,
-          schema, downstreamError, em.getErrorCode(), null);
-      }
-    }
     return new CommandProcessorResponse(ret, errorMessage, SQLState, downstreamError);
   }
 
+  /**
+   * Returns a set of hooks specified in a configuration variable.
+   * See getHooks(HiveConf.ConfVars hookConfVar, Class<T> clazz)
+   */
+  private List<Hook> getHooks(HiveConf.ConfVars hookConfVar) throws Exception {
+    return getHooks(hookConfVar, Hook.class);
+  }
+
+  /**
+   * Returns the hooks specified in a configuration variable.
+   *
+   * @param hookConfVar The configuration variable specifying a comma separated list of the hook
+   *                    class names.
+   * @param clazz       The super type of the hooks.
+   * @return            A list of the hooks cast as the type specified in clazz, in the order
+   *                    they are listed in the value of hookConfVar
+   * @throws Exception
+   */
+  private <T extends Hook> List<T> getHooks(ConfVars hookConfVar,
+      Class<T> clazz) throws Exception {
+    try {
+      return HookUtils.getHooks(conf, hookConfVar, clazz);
+    } catch (ClassNotFoundException e) {
+      console.printError(hookConfVar.varname + " Class not found:" + e.getMessage());
+      throw e;
+    }
+  }
+
   public int execute() throws CommandNeedRetryException {
     return execute(false);
   }
@@ -1794,7 +1807,7 @@ public class Driver implements CommandProcessor {
           ss.getSessionId(), Thread.currentThread().getName(), ss.isHiveServerQuery(), perfLogger);
       hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK);
 
-      for (Hook peh : hooksLoader.getHooks(HiveConf.ConfVars.PREEXECHOOKS, console)) {
+      for (Hook peh : getHooks(HiveConf.ConfVars.PREEXECHOOKS)) {
         if (peh instanceof ExecuteWithHookContext) {
           perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName());
 
@@ -1812,7 +1825,16 @@ public class Driver implements CommandProcessor {
       }
 
       // Trigger query hooks before query execution.
-      queryLifeTimeHookRunner.runBeforeExecutionHook(queryStr, hookContext);
+      if (queryHooks != null && !queryHooks.isEmpty()) {
+        QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl();
+        qhc.setHiveConf(conf);
+        qhc.setCommand(queryStr);
+        qhc.setHookContext(hookContext);
+
+        for (QueryLifeTimeHook hook : queryHooks) {
+          hook.beforeExecution(qhc);
+        }
+      }
 
       setQueryDisplays(plan.getRootTasks());
       int mrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
@@ -1837,7 +1859,7 @@ public class Driver implements CommandProcessor {
       // The main thread polls the TaskRunners to check if they have finished.
 
       if (isInterrupted()) {
-        return handleInterruptionWithHook("before running tasks.", hookContext, perfLogger);
+        return handleInterruption("before running tasks.");
       }
       DriverContext driverCxt = new DriverContext(ctx);
       driverCxt.prepare(plan);
@@ -1887,7 +1909,7 @@ public class Driver implements CommandProcessor {
 
         int exitVal = result.getExitVal();
         if (isInterrupted()) {
-          return handleInterruptionWithHook("when checking the execution result.", hookContext, perfLogger);
+          return handleInterruption("when checking the execution result.");
         }
         if (exitVal != 0) {
           if (tsk.ifRetryCmdWhenFail()) {
@@ -1912,9 +1934,6 @@ public class Driver implements CommandProcessor {
 
           } else {
             setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk);
-            if (driverCxt.isShutdown()) {
-              errorMessage = "FAILED: Operation cancelled. " + errorMessage;
-            }
             invokeFailureHooks(perfLogger, hookContext,
               errorMessage + Strings.nullToEmpty(tsk.getDiagnosticsMessage()), result.getTaskError());
             SQLState = "08S01";
@@ -1973,7 +1992,7 @@ public class Driver implements CommandProcessor {
 
       hookContext.setHookType(HookContext.HookType.POST_EXEC_HOOK);
       // Get all the post execution hooks and execute them.
-      for (Hook peh : hooksLoader.getHooks(HiveConf.ConfVars.POSTEXECHOOKS, console)) {
+      for (Hook peh : getHooks(HiveConf.ConfVars.POSTEXECHOOKS)) {
         if (peh instanceof ExecuteWithHookContext) {
           perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName());
 
@@ -2003,7 +2022,7 @@ public class Driver implements CommandProcessor {
     } catch (Throwable e) {
       executionError = true;
       if (isInterrupted()) {
-        return handleInterruptionWithHook("during query execution: \n" + e.getMessage(), hookContext, perfLogger);
+        return handleInterruption("during query execution: \n" + e.getMessage());
       }
 
       ctx.restoreOriginalTracker();
@@ -2028,7 +2047,16 @@ public class Driver implements CommandProcessor {
     } finally {
       // Trigger query hooks after query completes its execution.
       try {
-        queryLifeTimeHookRunner.runAfterExecutionHook(queryStr, hookContext, executionError);
+        if (queryHooks != null && !queryHooks.isEmpty()) {
+          QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl();
+          qhc.setHiveConf(conf);
+          qhc.setCommand(queryStr);
+          qhc.setHookContext(hookContext);
+
+          for (QueryLifeTimeHook hook : queryHooks) {
+            hook.afterExecution(qhc, executionError);
+          }
+        }
       } catch (Exception e) {
         LOG.warn("Failed when invoking query after execution hook", e);
       }
@@ -2119,6 +2147,13 @@ public class Driver implements CommandProcessor {
     }
     String warning = HiveConf.generateMrDeprecationWarning();
     LOG.warn(warning);
+    warning = "WARNING: " + warning;
+    console.printInfo(warning);
+    // Propagate warning to beeline via operation log.
+    OperationLog ol = OperationLog.getCurrentOperationLog();
+    if (ol != null) {
+      ol.writeOperationLog(LoggingLevel.EXECUTION, warning + "\n");
+    }
   }
 
   private void setErrorMsgAndDetail(int exitVal, Throwable downstreamError, Task tsk) {
@@ -2143,7 +2178,7 @@ public class Driver implements CommandProcessor {
     hookContext.setErrorMessage(errorMessage);
     hookContext.setException(exception);
     // Get all the failure execution hooks and execute them.
-    for (Hook ofh : hooksLoader.getHooks(HiveConf.ConfVars.ONFAILUREHOOKS, console)) {
+    for (Hook ofh : getHooks(HiveConf.ConfVars.ONFAILUREHOOKS)) {
       perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());
 
       ((ExecuteWithHookContext) ofh).run(hookContext);
@@ -2193,6 +2228,7 @@ public class Driver implements CommandProcessor {
       if (LOG.isInfoEnabled()){
         LOG.info("Starting task [" + tsk + "] in parallel");
       }
+      tskRun.setOperationLog(OperationLog.getCurrentOperationLog());
       tskRun.start();
     } else {
       if (LOG.isInfoEnabled()){
@@ -2409,7 +2445,6 @@ public class Driver implements CommandProcessor {
       lDrvState.driverState = DriverState.CLOSED;
     } finally {
       lDrvState.stateLock.unlock();
-      LockedDriverState.removeLockedDriverState();
     }
     if (SessionState.get() != null) {
       SessionState.get().getLineageState().clear();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index d01a203..6a43385 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -27,7 +27,6 @@ import java.util.regex.Pattern;
 import org.antlr.runtime.tree.Tree;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.ASTNodeOrigin;
-import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
 
 /**
  * List of all error messages.
@@ -218,7 +217,7 @@ public enum ErrorMsg {
   ALTER_COMMAND_FOR_VIEWS(10131, "To alter a view you need to use the ALTER VIEW command."),
   ALTER_COMMAND_FOR_TABLES(10132, "To alter a base table you need to use the ALTER TABLE command."),
   ALTER_VIEW_DISALLOWED_OP(10133, "Cannot use this form of ALTER on a view"),
-  ALTER_TABLE_NON_NATIVE(10134, "ALTER TABLE can only be used for " + AlterTableTypes.nonNativeTableAllowedTypes + " to a non-native table "),
+  ALTER_TABLE_NON_NATIVE(10134, "ALTER TABLE cannot be used for a non-native table"),
   SORTMERGE_MAPJOIN_FAILED(10135,
       "Sort merge bucketed join could not be performed. " +
       "If you really want to perform the operation, either set " +
@@ -411,8 +410,8 @@ public enum ErrorMsg {
   INSERT_CANNOT_CREATE_TEMP_FILE(10293, "Unable to create temp file for insert values "),
   ACID_OP_ON_NONACID_TXNMGR(10294, "Attempt to do update or delete using transaction manager that" +
       " does not support these operations."),
-  NO_INSERT_OVERWRITE_WITH_ACID(10295, "INSERT OVERWRITE not allowed on table {0} with OutputFormat " +
-      "that implements AcidOutputFormat while transaction manager that supports ACID is in use", true),
+  NO_INSERT_OVERWRITE_WITH_ACID(10295, "INSERT OVERWRITE not allowed on table with OutputFormat " +
+      "that implements AcidOutputFormat while transaction manager that supports ACID is in use"),
   VALUES_TABLE_CONSTRUCTOR_NOT_SUPPORTED(10296,
       "Values clause with table constructor not yet supported"),
   ACID_OP_ON_NONACID_TABLE(10297, "Attempt to do update or delete on table {0} that does not use " +
@@ -482,17 +481,9 @@ public enum ErrorMsg {
       "is controlled by hive.exec.max.dynamic.partitions and hive.exec.max.dynamic.partitions.pernode. "),
   PARTITION_SCAN_LIMIT_EXCEEDED(20005, "Number of partitions scanned (={0}) on table {1} exceeds limit" +
       " (={2}). This is controlled by hive.limit.query.max.table.partition.", true),
-  /**
-   * {1} is the transaction id;
-   * use {@link org.apache.hadoop.hive.common.JavaUtils#txnIdToString(long)} to format
-   */
-  OP_NOT_ALLOWED_IN_IMPLICIT_TXN(20006, "Operation {0} is not allowed in an implicit transaction ({1}).", true),
-  /**
-   * {1} is the transaction id;
-   * use {@link org.apache.hadoop.hive.common.JavaUtils#txnIdToString(long)} to format
-   */
-  OP_NOT_ALLOWED_IN_TXN(20007, "Operation {0} is not allowed in a transaction ({1},queryId={2}).", true),
-  OP_NOT_ALLOWED_WITHOUT_TXN(20008, "Operation {0} is not allowed without an active transaction", true),
+  OP_NOT_ALLOWED_IN_AUTOCOMMIT(20006, "Operation {0} is not allowed when autoCommit=true.", true),//todo: better SQLState?
+  OP_NOT_ALLOWED_IN_TXN(20007, "Operation {0} is not allowed in a transaction.  TransactionID={1}.", true),
+  OP_NOT_ALLOWED_WITHOUT_TXN(20008, "Operation {0} is not allowed since autoCommit=false and there is no active transaction", true),
   //========================== 30000 range starts here ========================//
   STATSPUBLISHER_NOT_OBTAINED(30000, "StatsPublisher cannot be obtained. " +
     "There was a error to retrieve the StatsPublisher, and retrying " +

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java
deleted file mode 100644
index 85e038c..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.hadoop.hive.ql;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.google.common.collect.Iterables;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.hooks.HookContext;
-import org.apache.hadoop.hive.ql.hooks.HooksLoader;
-import org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook;
-import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook;
-import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext;
-import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContextImpl;
-import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookWithParseHooks;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-
-/**
- * A runner class for {@link QueryLifeTimeHook}s and {@link QueryLifeTimeHookWithParseHooks}. The class has run methods
- * for each phase of a {@link QueryLifeTimeHook} and {@link QueryLifeTimeHookWithParseHooks}. Each run method checks if
- * a list of hooks has be specified, and if so invokes the appropriate callback method of each hook. Each method
- * constructs a {@link QueryLifeTimeHookContext} object and pass it to the callback functions.
- */
-class QueryLifeTimeHookRunner {
-
-  private final HiveConf conf;
-  private final List<QueryLifeTimeHook> queryHooks;
-
-  /**
-   * Constructs a {@link QueryLifeTimeHookRunner} that loads all hooks to be run via a {@link HooksLoader}.
-   *
-   * @param conf the {@link HiveConf} to use when creating {@link QueryLifeTimeHookContext} objects
-   * @param hooksLoader the {@link HooksLoader} to use when loading all hooks to be run
-   * @param console the {@link SessionState.LogHelper} to use when running {@link HooksLoader#getHooks(HiveConf.ConfVars)}
-   */
-  QueryLifeTimeHookRunner(HiveConf conf, HooksLoader hooksLoader, SessionState.LogHelper console) {
-    this.conf = conf;
-    this.queryHooks = new ArrayList<>();
-
-    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED)) {
-      queryHooks.add(new MetricsQueryLifeTimeHook());
-    }
-    List<QueryLifeTimeHook> propertyDefinedHoooks;
-    try {
-      propertyDefinedHoooks = hooksLoader.getHooks(
-              HiveConf.ConfVars.HIVE_QUERY_LIFETIME_HOOKS, console);
-    } catch (IllegalAccessException | InstantiationException | ClassNotFoundException e) {
-      throw new IllegalArgumentException(e);
-    }
-    if (propertyDefinedHoooks != null) {
-      Iterables.addAll(queryHooks, propertyDefinedHoooks);
-    }
-  }
-
-  /**
-   * If {@link QueryLifeTimeHookWithParseHooks} have been loaded via the {@link HooksLoader} then invoke the
-   * {@link QueryLifeTimeHookWithParseHooks#beforeParse(QueryLifeTimeHookContext)} method for each
-   * {@link QueryLifeTimeHookWithParseHooks}.
-   *
-   * @param command the Hive command that is being run
-   */
-  void runBeforeParseHook(String command) {
-    if (containsHooks()) {
-      QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
-              command).build();
-
-      for (QueryLifeTimeHook hook : queryHooks) {
-        if (hook instanceof QueryLifeTimeHookWithParseHooks) {
-          ((QueryLifeTimeHookWithParseHooks) hook).beforeParse(qhc);
-        }
-      }
-    }
-  }
-
-  /**
-   * If {@link QueryLifeTimeHookWithParseHooks} have been loaded via the {@link HooksLoader} then invoke the
-   * {@link QueryLifeTimeHookWithParseHooks#afterParse(QueryLifeTimeHookContext, boolean)} method for each
-   * {@link QueryLifeTimeHookWithParseHooks}.
-   *
-   * @param command the Hive command that is being run
-   * @param parseError true if there was an error while parsing the command, false otherwise
-   */
-  void runAfterParseHook(String command, boolean parseError) {
-    if (containsHooks()) {
-      QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
-              command).build();
-
-      for (QueryLifeTimeHook hook : queryHooks) {
-        if (hook instanceof QueryLifeTimeHookWithParseHooks) {
-          ((QueryLifeTimeHookWithParseHooks) hook).afterParse(qhc, parseError);
-        }
-      }
-    }
-  }
-
-  /**
-   * Invoke the {@link QueryLifeTimeHook#beforeCompile(QueryLifeTimeHookContext)} method for each {@link QueryLifeTimeHook}
-   *
-   * @param command the Hive command that is being run
-   */
-  void runBeforeCompileHook(String command) {
-    if (containsHooks()) {
-      QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
-              command).build();
-
-      for (QueryLifeTimeHook hook : queryHooks) {
-        hook.beforeCompile(qhc);
-      }
-    }
-  }
-
-   /**
-   * Invoke the {@link QueryLifeTimeHook#afterCompile(QueryLifeTimeHookContext, boolean)} method for each {@link QueryLifeTimeHook}
-   *
-   * @param command the Hive command that is being run
-   * @param compileError true if there was an error while compiling the command, false otherwise
-   */
-  void runAfterCompilationHook(String command, boolean compileError) {
-    if (containsHooks()) {
-      QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
-              command).build();
-
-      for (QueryLifeTimeHook hook : queryHooks) {
-        hook.afterCompile(qhc, compileError);
-      }
-    }
-  }
-
-  /**
-   * Invoke the {@link QueryLifeTimeHook#beforeExecution(QueryLifeTimeHookContext)} method for each {@link QueryLifeTimeHook}
-   *
-   * @param command the Hive command that is being run
-   * @param hookContext the {@link HookContext} of the command being run
-   */
-  void runBeforeExecutionHook(String command, HookContext hookContext) {
-    if (containsHooks()) {
-      QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
-              command).withHookContext(hookContext).build();
-
-      for (QueryLifeTimeHook hook : queryHooks) {
-        hook.beforeExecution(qhc);
-      }
-    }
-  }
-
-  /**
-   * Invoke the {@link QueryLifeTimeHook#afterExecution(QueryLifeTimeHookContext, boolean)} method for each {@link QueryLifeTimeHook}
-   *
-   * @param command the Hive command that is being run
-   * @param hookContext the {@link HookContext} of the command being run
-   * @param executionError true if there was an error while executing the command, false otherwise
-   */
-  void runAfterExecutionHook(String command, HookContext hookContext, boolean executionError) {
-    if (containsHooks()) {
-      QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
-              command).withHookContext(hookContext).build();
-
-      for (QueryLifeTimeHook hook : queryHooks) {
-        hook.afterExecution(qhc, executionError);
-      }
-    }
-  }
-
-  private boolean containsHooks() {
-    return queryHooks != null && !queryHooks.isEmpty();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
index 2ddabd9..e8c8ae6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
@@ -35,7 +35,6 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hive.metastore.api.Schema;
 import org.apache.hadoop.hive.ql.exec.ConditionalTask;
 import org.apache.hadoop.hive.ql.exec.ExplainTask;
@@ -49,7 +48,6 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.ColumnAccessInfo;
 import org.apache.hadoop.hive.ql.parse.TableAccessInfo;
-import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.ReducerTimeStatsPerJob;
@@ -107,19 +105,11 @@ public class QueryPlan implements Serializable {
 
   private transient Long queryStartTime;
   private final HiveOperation operation;
-  private final boolean acidResourcesInQuery;
-  private final Set<FileSinkDesc> acidSinks;
   private Boolean autoCommitValue;
 
   public QueryPlan() {
-    this(null);
-  }
-  @VisibleForTesting
-  protected QueryPlan(HiveOperation command) {
-    this.reducerTimeStatsPerJobList = new ArrayList<>();
-    this.operation = command;
-    this.acidResourcesInQuery = false;
-    this.acidSinks = Collections.emptySet();
+    this.reducerTimeStatsPerJobList = new ArrayList<ReducerTimeStatsPerJob>();
+    operation = null;
   }
 
   public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, String queryId,
@@ -146,22 +136,8 @@ public class QueryPlan implements Serializable {
     this.operation = operation;
     this.autoCommitValue = sem.getAutoCommitValue();
     this.resultSchema = resultSchema;
-    this.acidResourcesInQuery = sem.hasAcidInQuery();
-    this.acidSinks = sem.getAcidFileSinks();
   }
 
-  /**
-   * @return true if any acid resources are read/written
-   */
-  public boolean hasAcidResourcesInQuery() {
-    return acidResourcesInQuery;
-  }
-  /**
-   * @return Collection of FileSinkDesc representing writes to Acid resources
-   */
-  Set<FileSinkDesc> getAcidSinks() {
-    return acidSinks;
-  }
   public String getQueryStr() {
     return queryString;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
index f7fad94..6381a21 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
@@ -28,6 +28,8 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
@@ -38,6 +40,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.shims.HadoopShims;
 
 /**
  * ArchiveUtils.
@@ -45,7 +48,9 @@ import org.apache.hadoop.hive.ql.metadata.Table;
  */
 @SuppressWarnings("nls")
 public final class ArchiveUtils {
-  public static final String ARCHIVING_LEVEL = "archiving_level";
+  private static final Logger LOG = LoggerFactory.getLogger(ArchiveUtils.class.getName());
+
+  public static String ARCHIVING_LEVEL = "archiving_level";
 
   /**
    * PartSpecInfo keeps fields and values extracted from partial partition info

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnInfo.java
index bb8dcbb..e3da7f0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnInfo.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnInfo.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 
-import org.apache.hadoop.hive.common.StringInternUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
@@ -97,7 +96,7 @@ public class ColumnInfo implements Serializable {
     this.tabAlias = tabAlias;
     this.isVirtualCol = isVirtualCol;
     this.isHiddenVirtualCol = isHiddenVirtualCol;
-    setTypeName(getType().getTypeName());
+    this.typeName = getType().getTypeName();
   }
 
   public ColumnInfo(ColumnInfo columnInfo) {
@@ -115,7 +114,7 @@ public class ColumnInfo implements Serializable {
   }
 
   public void setTypeName(String typeName) {
-    this.typeName = StringInternUtils.internIfNotNull(typeName);
+    this.typeName = typeName;
   }
 
   public TypeInfo getType() {
@@ -161,7 +160,7 @@ public class ColumnInfo implements Serializable {
   }
 
   public void setAlias(String col_alias) {
-    alias = StringInternUtils.internIfNotNull(col_alias);
+    alias = col_alias;
   }
 
   public String getAlias() {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
index d96f432..a899964 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
@@ -110,12 +110,8 @@ public class ColumnStatsTask extends Task<ColumnStatsWork> implements Serializab
     }
   }
 
-  @SuppressWarnings("serial")
-  class UnsupportedDoubleException extends Exception {
-  }
-
   private void unpackDoubleStats(ObjectInspector oi, Object o, String fName,
-      ColumnStatisticsObj statsObj) throws UnsupportedDoubleException {
+      ColumnStatisticsObj statsObj) {
     if (fName.equals("countnulls")) {
       long v = ((LongObjectInspector) oi).get(o);
       statsObj.getStatsData().getDoubleStats().setNumNulls(v);
@@ -124,15 +120,9 @@ public class ColumnStatsTask extends Task<ColumnStatsWork> implements Serializab
       statsObj.getStatsData().getDoubleStats().setNumDVs(v);
     } else if (fName.equals("max")) {
       double d = ((DoubleObjectInspector) oi).get(o);
-      if (Double.isInfinite(d) || Double.isNaN(d)) {
-        throw new UnsupportedDoubleException();
-      }
       statsObj.getStatsData().getDoubleStats().setHighValue(d);
     } else if (fName.equals("min")) {
       double d = ((DoubleObjectInspector) oi).get(o);
-      if (Double.isInfinite(d) || Double.isNaN(d)) {
-        throw new UnsupportedDoubleException();
-      }
       statsObj.getStatsData().getDoubleStats().setLowValue(d);
     } else if (fName.equals("ndvbitvector")) {
       PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
@@ -244,7 +234,7 @@ public class ColumnStatsTask extends Task<ColumnStatsWork> implements Serializab
   }
 
   private void unpackPrimitiveObject (ObjectInspector oi, Object o, String fieldName,
-      ColumnStatisticsObj statsObj) throws UnsupportedDoubleException {
+      ColumnStatisticsObj statsObj) {
     if (o == null) {
       return;
     }
@@ -304,7 +294,7 @@ public class ColumnStatsTask extends Task<ColumnStatsWork> implements Serializab
   }
 
   private void unpackStructObject(ObjectInspector oi, Object o, String fName,
-      ColumnStatisticsObj cStatsObj) throws UnsupportedDoubleException {
+      ColumnStatisticsObj cStatsObj) {
     if (oi.getCategory() != ObjectInspector.Category.STRUCT) {
       throw new RuntimeException("Invalid object datatype : " + oi.getCategory().toString());
     }
@@ -361,13 +351,8 @@ public class ColumnStatsTask extends Task<ColumnStatsWork> implements Serializab
         ColumnStatisticsObj statsObj = new ColumnStatisticsObj();
         statsObj.setColName(colName.get(i));
         statsObj.setColType(colType.get(i));
-        try {
-          unpackStructObject(foi, f, fieldName, statsObj);
-          statsObjs.add(statsObj);
-        } catch (UnsupportedDoubleException e) {
-          // due to infinity or nan.
-          LOG.info("Because " + colName.get(i) + " is infinite or NaN, we skip stats.");
-        }
+        unpackStructObject(foi, f, fieldName, statsObj);
+        statsObjs.add(statsObj);
       }
 
       if (!isTblLevel) {
@@ -386,9 +371,7 @@ public class ColumnStatsTask extends Task<ColumnStatsWork> implements Serializab
       ColumnStatistics colStats = new ColumnStatistics();
       colStats.setStatsDesc(statsDesc);
       colStats.setStatsObj(statsObjs);
-      if (!statsObjs.isEmpty()) {
-        stats.add(colStats);
-      }
+      stats.add(colStats);
     }
     ftOp.clearFetchContext();
     return stats;
@@ -415,9 +398,6 @@ public class ColumnStatsTask extends Task<ColumnStatsWork> implements Serializab
     List<ColumnStatistics> colStats = constructColumnStatsFromPackedRows(db);
     // Persist the column statistics object to the metastore
     // Note, this function is shared for both table and partition column stats.
-    if (colStats.isEmpty()) {
-      return 0;
-    }
     SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats);
     if (work.getColStats() != null && work.getColStats().getNumBitVector() > 0) {
       request.setNeedMerge(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
index 82f6074..e8526f6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
@@ -80,7 +80,8 @@ public class CopyTask extends Task<CopyWork> implements Serializable {
         }
       }
 
-      if (!FileUtils.mkdir(dstFs, toPath, conf)) {
+      boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+      if (!FileUtils.mkdir(dstFs, toPath, inheritPerms, conf)) {
         console.printError("Cannot make target directory: " + toPath.toString());
         return 2;
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 81e4744..a1a0862 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -3330,30 +3330,20 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         if (tbl.isPartitioned() && part == null) {
           // No partitioned specified for partitioned table, lets fetch all.
           Map<String,String> tblProps = tbl.getParameters() == null ? new HashMap<String,String>() : tbl.getParameters();
-          Map<String, Long> valueMap = new HashMap<>();
-          Map<String, Boolean> stateMap = new HashMap<>();
-          for (String stat : StatsSetupConst.supportedStats) {
-            valueMap.put(stat, 0L);
-            stateMap.put(stat, true);
-          }
           PartitionIterable parts = new PartitionIterable(db, tbl, null, conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
-          int numParts = 0;
-          for (Partition partition : parts) {
-            Map<String, String> props = partition.getParameters();
-            Boolean state = StatsSetupConst.areBasicStatsUptoDate(props);
-            for (String stat : StatsSetupConst.supportedStats) {
-              stateMap.put(stat, stateMap.get(stat) && state);
+          for (String stat : StatsSetupConst.supportedStats) {
+            boolean state = true;
+            long statVal = 0l;
+            for (Partition partition : parts) {
+              Map<String,String> props = partition.getParameters();
+              state &= StatsSetupConst.areBasicStatsUptoDate(props);
               if (props != null && props.get(stat) != null) {
-                valueMap.put(stat, valueMap.get(stat) + Long.parseLong(props.get(stat)));
+                statVal += Long.parseLong(props.get(stat));
               }
             }
-            numParts++;
+            StatsSetupConst.setBasicStatsState(tblProps, Boolean.toString(state));
+            tblProps.put(stat, String.valueOf(statVal));
           }
-          for (String stat : StatsSetupConst.supportedStats) {
-            StatsSetupConst.setBasicStatsState(tblProps, Boolean.toString(stateMap.get(stat)));
-            tblProps.put(stat, valueMap.get(stat).toString());
-          }
-          tblProps.put(StatsSetupConst.NUM_PARTITIONS, Integer.toString(numParts));
           tbl.setParameters(tblProps);
         }
       } else {
@@ -4876,8 +4866,32 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     String tableName = truncateTableDesc.getTableName();
     Map<String, String> partSpec = truncateTableDesc.getPartSpec();
 
+    Table table = db.getTable(tableName, true);
+
     try {
-      db.truncateTable(tableName, partSpec);
+      // this is not transactional
+      for (Path location : getLocations(db, table, partSpec)) {
+        FileSystem fs = location.getFileSystem(conf);
+        HadoopShims.HdfsEncryptionShim shim
+          = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf);
+        if (!shim.isPathEncrypted(location)) {
+          HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(conf, fs, location);
+          FileStatus targetStatus = fs.getFileStatus(location);
+          String targetGroup = targetStatus == null ? null : targetStatus.getGroup();
+          FileUtils.moveToTrash(fs, location, conf);
+          fs.mkdirs(location);
+          HdfsUtils.setFullFileStatus(conf, status, targetGroup, fs, location, false);
+        } else {
+          FileStatus[] statuses = fs.listStatus(location, FileUtils.HIDDEN_FILES_PATH_FILTER);
+          if (statuses == null || statuses.length == 0) {
+            continue;
+          }
+          boolean success = Hive.trashFiles(fs, statuses, conf);
+          if (!success) {
+            throw new HiveException("Error in deleting the contents of " + location.toString());
+          }
+        }
+      }
     } catch (Exception e) {
       throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
     }
@@ -4908,6 +4922,58 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return 0;
   }
 
+  private List<Path> getLocations(Hive db, Table table, Map<String, String> partSpec)
+      throws HiveException, InvalidOperationException {
+    List<Path> locations = new ArrayList<Path>();
+    if (partSpec == null) {
+      if (table.isPartitioned()) {
+        for (Partition partition : db.getPartitions(table)) {
+          locations.add(partition.getDataLocation());
+          EnvironmentContext environmentContext = new EnvironmentContext();
+          if (needToUpdateStats(partition.getParameters(), environmentContext)) {
+            db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext);
+          }
+        }
+      } else {
+        locations.add(table.getPath());
+        EnvironmentContext environmentContext = new EnvironmentContext();
+        if (needToUpdateStats(table.getParameters(), environmentContext)) {
+          db.alterTable(table.getDbName()+"."+table.getTableName(), table, environmentContext);
+        }
+      }
+    } else {
+      for (Partition partition : db.getPartitionsByNames(table, partSpec)) {
+        locations.add(partition.getDataLocation());
+        EnvironmentContext environmentContext = new EnvironmentContext();
+        if (needToUpdateStats(partition.getParameters(), environmentContext)) {
+          db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext);
+        }
+      }
+    }
+    return locations;
+  }
+
+  private boolean needToUpdateStats(Map<String,String> props, EnvironmentContext environmentContext) {
+    if (null == props) {
+      return false;
+    }
+    boolean statsPresent = false;
+    for (String stat : StatsSetupConst.supportedStats) {
+      String statVal = props.get(stat);
+      if (statVal != null && Long.parseLong(statVal) > 0) {
+        statsPresent = true;
+        //In the case of truncate table, we set the stats to be 0.
+        props.put(stat, "0");
+      }
+    }
+    //first set basic stats to true
+    StatsSetupConst.setBasicStatsState(props, StatsSetupConst.TRUE);
+    environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
+    //then invalidate column stats
+    StatsSetupConst.clearColumnStatsState(props);
+    return statsPresent;
+  }
+
   @Override
   public StageType getType() {
     return StageType.DDL;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
index 4c24ab4..d35e3ba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
@@ -54,11 +54,9 @@ import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.exec.spark.SparkTask;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;
 import org.apache.hadoop.hive.ql.exec.vector.VectorGroupByOperator;
-import org.apache.hadoop.hive.ql.exec.vector.VectorReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
-import org.apache.hadoop.hive.ql.exec.vector.reducesink.VectorReduceSinkCommonOperator;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
@@ -797,12 +795,9 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
           if (jsonOut != null && jsonOut.length() > 0) {
             ((JSONObject) jsonOut.get(JSONObject.getNames(jsonOut)[0])).put("OperatorId:",
                 operator.getOperatorId());
-            if (!this.work.isUserLevelExplain()
-                && this.work.isFormatted()
-                && (operator instanceof ReduceSinkOperator
-                    || operator instanceof VectorReduceSinkOperator || operator instanceof VectorReduceSinkCommonOperator)) {
-              List<String> outputOperators = ((ReduceSinkDesc) operator.getConf())
-                  .getOutputOperators();
+            if (!this.work.isUserLevelExplain() && this.work.isFormatted()
+                && operator instanceof ReduceSinkOperator) {
+              List<String> outputOperators = ((ReduceSinkOperator) operator).getConf().getOutputOperators();
               if (outputOperators != null) {
                 ((JSONObject) jsonOut.get(JSONObject.getNames(jsonOut)[0])).put(OUTPUT_OPERATORS,
                     Arrays.toString(outputOperators.toArray()));

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeConstantDefaultEvaluator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeConstantDefaultEvaluator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeConstantDefaultEvaluator.java
new file mode 100644
index 0000000..f53c3e3
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeConstantDefaultEvaluator.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDefaultDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+
+/**
+ * ExprNodeConstantEvaluator.
+ *
+ */
+public class ExprNodeConstantDefaultEvaluator extends ExprNodeEvaluator<ExprNodeConstantDefaultDesc> {
+
+  transient ObjectInspector writableObjectInspector;
+
+  public ExprNodeConstantDefaultEvaluator(ExprNodeConstantDefaultDesc expr) {
+    this(expr, null);
+  }
+
+  public ExprNodeConstantDefaultEvaluator(ExprNodeConstantDefaultDesc expr, Configuration conf) {
+    super(expr, conf);
+    writableObjectInspector = expr.getWritableObjectInspector();
+  }
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector rowInspector) throws HiveException {
+    return writableObjectInspector;
+  }
+
+  @Override
+  protected Object _evaluate(Object row, int version) throws HiveException {
+    return expr;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeEvaluatorFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeEvaluatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeEvaluatorFactory.java
index cc40cae..34aec55 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeEvaluatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeEvaluatorFactory.java
@@ -24,6 +24,7 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDefaultDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDynamicValueDesc;
@@ -49,6 +50,11 @@ public final class ExprNodeEvaluatorFactory {
       return new ExprNodeConstantEvaluator((ExprNodeConstantDesc) desc, conf);
     }
 
+    // Special 'default' constant node
+    if (desc instanceof ExprNodeConstantDefaultDesc) {
+      return new ExprNodeConstantDefaultEvaluator((ExprNodeConstantDefaultDesc) desc);
+    }
+
     // Column-reference node, e.g. a column in the input row
     if (desc instanceof ExprNodeColumnDesc) {
       return new ExprNodeColumnEvaluator((ExprNodeColumnDesc) desc, conf);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index a3e4c9f..4102d02 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -394,9 +393,6 @@ public class FetchOperator implements Serializable {
         inputSplits = splitSampling(work.getSplitSample(), inputSplits);
       }
       if (inputSplits.length > 0) {
-        if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_IN_TEST)) {
-          Arrays.sort(inputSplits, new FetchInputFormatSplitComparator());
-        }
         return inputSplits;
       }
     }
@@ -742,18 +738,6 @@ public class FetchOperator implements Serializable {
     }
   }
 
-  private static class FetchInputFormatSplitComparator implements Comparator<FetchInputFormatSplit> {
-    @Override
-    public int compare(FetchInputFormatSplit a, FetchInputFormatSplit b) {
-      final Path ap = a.getPath();
-      final Path bp = b.getPath();
-      if (ap != null) {
-        return (ap.compareTo(bp));
-      }
-      return Long.signum(a.getLength() - b.getLength());
-    }
-  }
-
   public Configuration getJobConf() {
     return job;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 8e74b2e..3ad1733 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.ValidWriteIds;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConfUtil;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -148,6 +147,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
   protected transient long cntr = 1;
   protected transient long logEveryNRows = 0;
   protected transient int rowIndex = 0;
+  private transient boolean inheritPerms = false;
   /**
    * Counters.
    */
@@ -256,7 +256,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       if ((bDynParts || isSkewedStoredAsSubDirectories)
           && !fs.exists(finalPaths[idx].getParent())) {
         Utilities.LOG14535.info("commit making path for dyn/skew: " + finalPaths[idx].getParent());
-        FileUtils.mkdir(fs, finalPaths[idx].getParent(), hconf);
+        FileUtils.mkdir(fs, finalPaths[idx].getParent(), inheritPerms, hconf);
       }
       // If we're updating or deleting there may be no file to close.  This can happen
       // because the where clause strained out all of the records for a given bucket.  So
@@ -501,6 +501,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       serializer = (Serializer) conf.getTableInfo().getDeserializerClass().newInstance();
       serializer.initialize(unsetNestedColumnPaths(hconf), conf.getTableInfo().getProperties());
       outputClass = serializer.getSerializedClass();
+      inheritPerms = HiveConf.getBoolVar(hconf, ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
 
       if (isLogInfoEnabled) {
         LOG.info("Using serializer : " + serializer + " and formatter : " + hiveOutputFormat +
@@ -600,10 +601,13 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
   }
 
   private void logOutputFormatError(Configuration hconf, HiveException ex) {
-    StringBuilder errorWriter = new StringBuilder();
+    StringWriter errorWriter = new StringWriter();
     errorWriter.append("Failed to create output format; configuration: ");
-    // redact sensitive information before logging
-    HiveConfUtil.dumpConfig(hconf, errorWriter);
+    try {
+      Configuration.dumpConfiguration(hconf, errorWriter);
+    } catch (IOException ex2) {
+      errorWriter.append("{ failed to dump configuration: " + ex2.getMessage() + " }");
+    }
     Properties tdp = null;
     if (this.conf.getTableInfo() != null
         && (tdp = this.conf.getTableInfo().getProperties()) != null) {
@@ -735,7 +739,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
           conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) {
         Path outPath = fsp.outPaths[filesIdx];
         if ((conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY || conf.isMmTable())
-            && !FileUtils.mkdir(fs, outPath.getParent(), hconf)) {
+            && inheritPerms && !FileUtils.mkdir(fs, outPath.getParent(), inheritPerms, hconf)) {
           LOG.warn("Unable to create directory with inheritPerms: " + outPath);
         }
         fsp.outWriters[filesIdx] = HiveFileFormatUtils.getHiveRecordWriter(jc, conf.getTableInfo(),

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 1b556ac..4ac25c2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -67,7 +67,7 @@ import org.apache.hadoop.hive.ql.udf.UDFFromUnixTime;
 import org.apache.hadoop.hive.ql.udf.UDFHex;
 import org.apache.hadoop.hive.ql.udf.UDFHour;
 import org.apache.hadoop.hive.ql.udf.UDFJson;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFLength;
+import org.apache.hadoop.hive.ql.udf.UDFLength;
 import org.apache.hadoop.hive.ql.udf.UDFLike;
 import org.apache.hadoop.hive.ql.udf.UDFLn;
 import org.apache.hadoop.hive.ql.udf.UDFLog;
@@ -262,18 +262,13 @@ public final class FunctionRegistry {
     system.registerGenericUDF("trim", GenericUDFTrim.class);
     system.registerGenericUDF("ltrim", GenericUDFLTrim.class);
     system.registerGenericUDF("rtrim", GenericUDFRTrim.class);
-    system.registerGenericUDF("length", GenericUDFLength.class);
-    system.registerGenericUDF("character_length", GenericUDFCharacterLength.class);
-    system.registerGenericUDF("char_length", GenericUDFCharacterLength.class);
-    system.registerGenericUDF("octet_length", GenericUDFOctetLength.class);
+    system.registerUDF("length", UDFLength.class, false);
     system.registerUDF("reverse", UDFReverse.class, false);
     system.registerGenericUDF("field", GenericUDFField.class);
     system.registerUDF("find_in_set", UDFFindInSet.class, false);
     system.registerGenericUDF("initcap", GenericUDFInitCap.class);
 
     system.registerUDF("like", UDFLike.class, true);
-    system.registerGenericUDF("likeany", GenericUDFLikeAny.class);
-    system.registerGenericUDF("likeall", GenericUDFLikeAll.class);
     system.registerGenericUDF("rlike", GenericUDFRegExp.class);
     system.registerGenericUDF("regexp", GenericUDFRegExp.class);
     system.registerUDF("regexp_replace", UDFRegExpReplace.class, false);
@@ -423,16 +418,6 @@ public final class FunctionRegistry {
     system.registerGenericUDAF("covar_pop", new GenericUDAFCovariance());
     system.registerGenericUDAF("covar_samp", new GenericUDAFCovarianceSample());
     system.registerGenericUDAF("corr", new GenericUDAFCorrelation());
-    system.registerGenericUDAF("regr_slope", new GenericUDAFBinarySetFunctions.RegrSlope());
-    system.registerGenericUDAF("regr_intercept", new GenericUDAFBinarySetFunctions.RegrIntercept());
-    system.registerGenericUDAF("regr_r2", new GenericUDAFBinarySetFunctions.RegrR2());
-    system.registerGenericUDAF("regr_sxx", new GenericUDAFBinarySetFunctions.RegrSXX());
-    system.registerGenericUDAF("regr_syy", new GenericUDAFBinarySetFunctions.RegrSYY());
-    system.registerGenericUDAF("regr_sxy", new GenericUDAFBinarySetFunctions.RegrSXY());
-    system.registerGenericUDAF("regr_avgx", new GenericUDAFBinarySetFunctions.RegrAvgX());
-    system.registerGenericUDAF("regr_avgy", new GenericUDAFBinarySetFunctions.RegrAvgY());
-    system.registerGenericUDAF("regr_count", new GenericUDAFBinarySetFunctions.RegrCount());
-
     system.registerGenericUDAF("histogram_numeric", new GenericUDAFHistogramNumeric());
     system.registerGenericUDAF("percentile_approx", new GenericUDAFPercentileApprox());
     system.registerGenericUDAF("collect_set", new GenericUDAFCollectSet());
@@ -459,7 +444,6 @@ public final class FunctionRegistry {
     system.registerGenericUDF("struct", GenericUDFStruct.class);
     system.registerGenericUDF("named_struct", GenericUDFNamedStruct.class);
     system.registerGenericUDF("create_union", GenericUDFUnion.class);
-    system.registerGenericUDF("extract_union", GenericUDFExtractUnion.class);
 
     system.registerGenericUDF("case", GenericUDFCase.class);
     system.registerGenericUDF("when", GenericUDFWhen.class);
@@ -483,7 +467,6 @@ public final class FunctionRegistry {
     system.registerGenericUDF("greatest", GenericUDFGreatest.class);
     system.registerGenericUDF("least", GenericUDFLeast.class);
     system.registerGenericUDF("cardinality_violation", GenericUDFCardinalityViolation.class);
-    system.registerGenericUDF("width_bucket", GenericUDFWidthBucket.class);
 
     system.registerGenericUDF("from_utc_timestamp", GenericUDFFromUtcTimestamp.class);
     system.registerGenericUDF("to_utc_timestamp", GenericUDFToUtcTimestamp.class);
@@ -781,7 +764,7 @@ public final class FunctionRegistry {
    *
    * @return null if no common class could be found.
    */
-  public static synchronized TypeInfo getCommonClassForComparison(TypeInfo a, TypeInfo b) {
+  public static TypeInfo getCommonClassForComparison(TypeInfo a, TypeInfo b) {
     // If same return one of them
     if (a.equals(b)) {
       return a;
@@ -1492,20 +1475,6 @@ public final class FunctionRegistry {
   }
 
   /**
-   * Returns whether the fn is an exact equality comparison.
-   */
-  public static boolean isEq(GenericUDF fn) {
-    return fn instanceof GenericUDFOPEqual;
-  }
-
-  /**
-   * Returns whether the fn is an exact non-equality comparison.
-   */
-  public static boolean isNeq(GenericUDF fn) {
-    return fn instanceof GenericUDFOPNotEqual;
-  }
-
-  /**
    * Returns whether the exprNodeDesc is a node of "positive".
    */
   public static boolean isOpPositive(ExprNodeDesc desc) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
index f8b55da..6d6c608 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
@@ -33,7 +33,6 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.llap.LlapDaemonInfo;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.parse.OpParseContext;
@@ -403,8 +402,8 @@ public class GroupByOperator extends Operator<GroupByDesc> {
 
     newKeys = keyWrapperFactory.getKeyWrapper();
     isTez = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez");
-    isLlap = LlapDaemonInfo.INSTANCE.isLlap();
-    numExecutors = isLlap ? LlapDaemonInfo.INSTANCE.getNumExecutors() : 1;
+    isLlap = isTez && HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_EXECUTION_MODE).equals("llap");
+    numExecutors = isLlap ? HiveConf.getIntVar(hconf, HiveConf.ConfVars.LLAP_DAEMON_NUM_EXECUTORS) : 1;
     firstRow = true;
     // estimate the number of hash table entries based on the size of each
     // entry. Since the size of a entry

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 56be518..29b72a0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -153,7 +153,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
         throw new HiveException("Target " + targetPath + " is not a local directory.");
       }
     } else {
-      if (!FileUtils.mkdir(dstFs, targetPath, conf)) {
+      if (!FileUtils.mkdir(dstFs, targetPath, false, conf)) {
         throw new HiveException("Failed to create local target directory " + targetPath);
       }
     }
@@ -182,6 +182,9 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
         actualPath = actualPath.getParent();
       }
       fs.mkdirs(mkDirPath);
+      if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS)) {
+        HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, fs, actualPath), fs, mkDirPath, true);
+      }
     }
     return deletePath;
   }
@@ -415,7 +418,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
     Utilities.LOG14535.info("loadPartition called from " + tbd.getSourcePath()
         + " into " + tbd.getTable().getTableName());
     boolean isCommitMmWrite = tbd.isCommitMmWrite();
-    db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(),
+    db.loadSinglePartition(tbd.getSourcePath(), tbd.getTable().getTableName(),
         tbd.getPartitionSpec(), tbd.getReplace(),
         tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(),
         (work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&


[20/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index d6a90cc..3f2fa56 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -4968,25 +4968,22 @@ class GetOpenTxnsResponse {
 
   GetOpenTxnsResponse(const GetOpenTxnsResponse&);
   GetOpenTxnsResponse& operator=(const GetOpenTxnsResponse&);
-  GetOpenTxnsResponse() : txn_high_water_mark(0), min_open_txn(0), abortedBits() {
+  GetOpenTxnsResponse() : txn_high_water_mark(0), min_open_txn(0) {
   }
 
   virtual ~GetOpenTxnsResponse() throw();
   int64_t txn_high_water_mark;
-  std::vector<int64_t>  open_txns;
+  std::set<int64_t>  open_txns;
   int64_t min_open_txn;
-  std::string abortedBits;
 
   _GetOpenTxnsResponse__isset __isset;
 
   void __set_txn_high_water_mark(const int64_t val);
 
-  void __set_open_txns(const std::vector<int64_t> & val);
+  void __set_open_txns(const std::set<int64_t> & val);
 
   void __set_min_open_txn(const int64_t val);
 
-  void __set_abortedBits(const std::string& val);
-
   bool operator == (const GetOpenTxnsResponse & rhs) const
   {
     if (!(txn_high_water_mark == rhs.txn_high_water_mark))
@@ -4997,8 +4994,6 @@ class GetOpenTxnsResponse {
       return false;
     else if (__isset.min_open_txn && !(min_open_txn == rhs.min_open_txn))
       return false;
-    if (!(abortedBits == rhs.abortedBits))
-      return false;
     return true;
   }
   bool operator != (const GetOpenTxnsResponse &rhs) const {
@@ -6593,8 +6588,7 @@ inline std::ostream& operator<<(std::ostream& out, const CurrentNotificationEven
 }
 
 typedef struct _InsertEventRequestData__isset {
-  _InsertEventRequestData__isset() : replace(false), filesAddedChecksum(false) {}
-  bool replace :1;
+  _InsertEventRequestData__isset() : filesAddedChecksum(false) {}
   bool filesAddedChecksum :1;
 } _InsertEventRequestData__isset;
 
@@ -6603,28 +6597,21 @@ class InsertEventRequestData {
 
   InsertEventRequestData(const InsertEventRequestData&);
   InsertEventRequestData& operator=(const InsertEventRequestData&);
-  InsertEventRequestData() : replace(0) {
+  InsertEventRequestData() {
   }
 
   virtual ~InsertEventRequestData() throw();
-  bool replace;
   std::vector<std::string>  filesAdded;
   std::vector<std::string>  filesAddedChecksum;
 
   _InsertEventRequestData__isset __isset;
 
-  void __set_replace(const bool val);
-
   void __set_filesAdded(const std::vector<std::string> & val);
 
   void __set_filesAddedChecksum(const std::vector<std::string> & val);
 
   bool operator == (const InsertEventRequestData & rhs) const
   {
-    if (__isset.replace != rhs.__isset.replace)
-      return false;
-    else if (__isset.replace && !(replace == rhs.replace))
-      return false;
     if (!(filesAdded == rhs.filesAdded))
       return false;
     if (__isset.filesAddedChecksum != rhs.__isset.filesAddedChecksum)

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
index 2852310..8230d38 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
@@ -39,9 +39,8 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenTxnsResponse");
 
   private static final org.apache.thrift.protocol.TField TXN_HIGH_WATER_MARK_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_high_water_mark", org.apache.thrift.protocol.TType.I64, (short)1);
-  private static final org.apache.thrift.protocol.TField OPEN_TXNS_FIELD_DESC = new org.apache.thrift.protocol.TField("open_txns", org.apache.thrift.protocol.TType.LIST, (short)2);
+  private static final org.apache.thrift.protocol.TField OPEN_TXNS_FIELD_DESC = new org.apache.thrift.protocol.TField("open_txns", org.apache.thrift.protocol.TType.SET, (short)2);
   private static final org.apache.thrift.protocol.TField MIN_OPEN_TXN_FIELD_DESC = new org.apache.thrift.protocol.TField("min_open_txn", org.apache.thrift.protocol.TType.I64, (short)3);
-  private static final org.apache.thrift.protocol.TField ABORTED_BITS_FIELD_DESC = new org.apache.thrift.protocol.TField("abortedBits", org.apache.thrift.protocol.TType.STRING, (short)4);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -50,16 +49,14 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
   }
 
   private long txn_high_water_mark; // required
-  private List<Long> open_txns; // required
+  private Set<Long> open_txns; // required
   private long min_open_txn; // optional
-  private ByteBuffer abortedBits; // required
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
     TXN_HIGH_WATER_MARK((short)1, "txn_high_water_mark"),
     OPEN_TXNS((short)2, "open_txns"),
-    MIN_OPEN_TXN((short)3, "min_open_txn"),
-    ABORTED_BITS((short)4, "abortedBits");
+    MIN_OPEN_TXN((short)3, "min_open_txn");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -80,8 +77,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
           return OPEN_TXNS;
         case 3: // MIN_OPEN_TXN
           return MIN_OPEN_TXN;
-        case 4: // ABORTED_BITS
-          return ABORTED_BITS;
         default:
           return null;
       }
@@ -132,12 +127,10 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
     tmpMap.put(_Fields.TXN_HIGH_WATER_MARK, new org.apache.thrift.meta_data.FieldMetaData("txn_high_water_mark", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.OPEN_TXNS, new org.apache.thrift.meta_data.FieldMetaData("open_txns", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+        new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
     tmpMap.put(_Fields.MIN_OPEN_TXN, new org.apache.thrift.meta_data.FieldMetaData("min_open_txn", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.ABORTED_BITS, new org.apache.thrift.meta_data.FieldMetaData("abortedBits", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOpenTxnsResponse.class, metaDataMap);
   }
@@ -147,14 +140,12 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
 
   public GetOpenTxnsResponse(
     long txn_high_water_mark,
-    List<Long> open_txns,
-    ByteBuffer abortedBits)
+    Set<Long> open_txns)
   {
     this();
     this.txn_high_water_mark = txn_high_water_mark;
     setTxn_high_water_markIsSet(true);
     this.open_txns = open_txns;
-    this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(abortedBits);
   }
 
   /**
@@ -164,13 +155,10 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
     __isset_bitfield = other.__isset_bitfield;
     this.txn_high_water_mark = other.txn_high_water_mark;
     if (other.isSetOpen_txns()) {
-      List<Long> __this__open_txns = new ArrayList<Long>(other.open_txns);
+      Set<Long> __this__open_txns = new HashSet<Long>(other.open_txns);
       this.open_txns = __this__open_txns;
     }
     this.min_open_txn = other.min_open_txn;
-    if (other.isSetAbortedBits()) {
-      this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(other.abortedBits);
-    }
   }
 
   public GetOpenTxnsResponse deepCopy() {
@@ -184,7 +172,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
     this.open_txns = null;
     setMin_open_txnIsSet(false);
     this.min_open_txn = 0;
-    this.abortedBits = null;
   }
 
   public long getTxn_high_water_mark() {
@@ -219,16 +206,16 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
 
   public void addToOpen_txns(long elem) {
     if (this.open_txns == null) {
-      this.open_txns = new ArrayList<Long>();
+      this.open_txns = new HashSet<Long>();
     }
     this.open_txns.add(elem);
   }
 
-  public List<Long> getOpen_txns() {
+  public Set<Long> getOpen_txns() {
     return this.open_txns;
   }
 
-  public void setOpen_txns(List<Long> open_txns) {
+  public void setOpen_txns(Set<Long> open_txns) {
     this.open_txns = open_txns;
   }
 
@@ -269,38 +256,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MIN_OPEN_TXN_ISSET_ID, value);
   }
 
-  public byte[] getAbortedBits() {
-    setAbortedBits(org.apache.thrift.TBaseHelper.rightSize(abortedBits));
-    return abortedBits == null ? null : abortedBits.array();
-  }
-
-  public ByteBuffer bufferForAbortedBits() {
-    return org.apache.thrift.TBaseHelper.copyBinary(abortedBits);
-  }
-
-  public void setAbortedBits(byte[] abortedBits) {
-    this.abortedBits = abortedBits == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(abortedBits, abortedBits.length));
-  }
-
-  public void setAbortedBits(ByteBuffer abortedBits) {
-    this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(abortedBits);
-  }
-
-  public void unsetAbortedBits() {
-    this.abortedBits = null;
-  }
-
-  /** Returns true if field abortedBits is set (has been assigned a value) and false otherwise */
-  public boolean isSetAbortedBits() {
-    return this.abortedBits != null;
-  }
-
-  public void setAbortedBitsIsSet(boolean value) {
-    if (!value) {
-      this.abortedBits = null;
-    }
-  }
-
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case TXN_HIGH_WATER_MARK:
@@ -315,7 +270,7 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
       if (value == null) {
         unsetOpen_txns();
       } else {
-        setOpen_txns((List<Long>)value);
+        setOpen_txns((Set<Long>)value);
       }
       break;
 
@@ -327,14 +282,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
       }
       break;
 
-    case ABORTED_BITS:
-      if (value == null) {
-        unsetAbortedBits();
-      } else {
-        setAbortedBits((ByteBuffer)value);
-      }
-      break;
-
     }
   }
 
@@ -349,9 +296,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
     case MIN_OPEN_TXN:
       return getMin_open_txn();
 
-    case ABORTED_BITS:
-      return getAbortedBits();
-
     }
     throw new IllegalStateException();
   }
@@ -369,8 +313,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
       return isSetOpen_txns();
     case MIN_OPEN_TXN:
       return isSetMin_open_txn();
-    case ABORTED_BITS:
-      return isSetAbortedBits();
     }
     throw new IllegalStateException();
   }
@@ -415,15 +357,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
         return false;
     }
 
-    boolean this_present_abortedBits = true && this.isSetAbortedBits();
-    boolean that_present_abortedBits = true && that.isSetAbortedBits();
-    if (this_present_abortedBits || that_present_abortedBits) {
-      if (!(this_present_abortedBits && that_present_abortedBits))
-        return false;
-      if (!this.abortedBits.equals(that.abortedBits))
-        return false;
-    }
-
     return true;
   }
 
@@ -446,11 +379,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
     if (present_min_open_txn)
       list.add(min_open_txn);
 
-    boolean present_abortedBits = true && (isSetAbortedBits());
-    list.add(present_abortedBits);
-    if (present_abortedBits)
-      list.add(abortedBits);
-
     return list.hashCode();
   }
 
@@ -492,16 +420,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetAbortedBits()).compareTo(other.isSetAbortedBits());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetAbortedBits()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.abortedBits, other.abortedBits);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     return 0;
   }
 
@@ -539,14 +457,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
       sb.append(this.min_open_txn);
       first = false;
     }
-    if (!first) sb.append(", ");
-    sb.append("abortedBits:");
-    if (this.abortedBits == null) {
-      sb.append("null");
-    } else {
-      org.apache.thrift.TBaseHelper.toString(this.abortedBits, sb);
-    }
-    first = false;
     sb.append(")");
     return sb.toString();
   }
@@ -561,10 +471,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
       throw new org.apache.thrift.protocol.TProtocolException("Required field 'open_txns' is unset! Struct:" + toString());
     }
 
-    if (!isSetAbortedBits()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'abortedBits' is unset! Struct:" + toString());
-    }
-
     // check for sub-struct validity
   }
 
@@ -613,17 +519,17 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
             }
             break;
           case 2: // OPEN_TXNS
-            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+            if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
               {
-                org.apache.thrift.protocol.TList _list468 = iprot.readListBegin();
-                struct.open_txns = new ArrayList<Long>(_list468.size);
+                org.apache.thrift.protocol.TSet _set468 = iprot.readSetBegin();
+                struct.open_txns = new HashSet<Long>(2*_set468.size);
                 long _elem469;
-                for (int _i470 = 0; _i470 < _list468.size; ++_i470)
+                for (int _i470 = 0; _i470 < _set468.size; ++_i470)
                 {
                   _elem469 = iprot.readI64();
                   struct.open_txns.add(_elem469);
                 }
-                iprot.readListEnd();
+                iprot.readSetEnd();
               }
               struct.setOpen_txnsIsSet(true);
             } else { 
@@ -638,14 +544,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 4: // ABORTED_BITS
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.abortedBits = iprot.readBinary();
-              struct.setAbortedBitsIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -665,12 +563,12 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
       if (struct.open_txns != null) {
         oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC);
         {
-          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.open_txns.size()));
+          oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.open_txns.size()));
           for (long _iter471 : struct.open_txns)
           {
             oprot.writeI64(_iter471);
           }
-          oprot.writeListEnd();
+          oprot.writeSetEnd();
         }
         oprot.writeFieldEnd();
       }
@@ -679,11 +577,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
         oprot.writeI64(struct.min_open_txn);
         oprot.writeFieldEnd();
       }
-      if (struct.abortedBits != null) {
-        oprot.writeFieldBegin(ABORTED_BITS_FIELD_DESC);
-        oprot.writeBinary(struct.abortedBits);
-        oprot.writeFieldEnd();
-      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -709,7 +602,6 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
           oprot.writeI64(_iter472);
         }
       }
-      oprot.writeBinary(struct.abortedBits);
       BitSet optionals = new BitSet();
       if (struct.isSetMin_open_txn()) {
         optionals.set(0);
@@ -726,18 +618,16 @@ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsR
       struct.txn_high_water_mark = iprot.readI64();
       struct.setTxn_high_water_markIsSet(true);
       {
-        org.apache.thrift.protocol.TList _list473 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-        struct.open_txns = new ArrayList<Long>(_list473.size);
+        org.apache.thrift.protocol.TSet _set473 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.open_txns = new HashSet<Long>(2*_set473.size);
         long _elem474;
-        for (int _i475 = 0; _i475 < _list473.size; ++_i475)
+        for (int _i475 = 0; _i475 < _set473.size; ++_i475)
         {
           _elem474 = iprot.readI64();
           struct.open_txns.add(_elem474);
         }
       }
       struct.setOpen_txnsIsSet(true);
-      struct.abortedBits = iprot.readBinary();
-      struct.setAbortedBitsIsSet(true);
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         struct.min_open_txn = iprot.readI64();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
index 354e634..fd1dc06 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
@@ -38,9 +38,8 @@ import org.slf4j.LoggerFactory;
 public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEventRequestData, InsertEventRequestData._Fields>, java.io.Serializable, Cloneable, Comparable<InsertEventRequestData> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InsertEventRequestData");
 
-  private static final org.apache.thrift.protocol.TField REPLACE_FIELD_DESC = new org.apache.thrift.protocol.TField("replace", org.apache.thrift.protocol.TType.BOOL, (short)1);
-  private static final org.apache.thrift.protocol.TField FILES_ADDED_FIELD_DESC = new org.apache.thrift.protocol.TField("filesAdded", org.apache.thrift.protocol.TType.LIST, (short)2);
-  private static final org.apache.thrift.protocol.TField FILES_ADDED_CHECKSUM_FIELD_DESC = new org.apache.thrift.protocol.TField("filesAddedChecksum", org.apache.thrift.protocol.TType.LIST, (short)3);
+  private static final org.apache.thrift.protocol.TField FILES_ADDED_FIELD_DESC = new org.apache.thrift.protocol.TField("filesAdded", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField FILES_ADDED_CHECKSUM_FIELD_DESC = new org.apache.thrift.protocol.TField("filesAddedChecksum", org.apache.thrift.protocol.TType.LIST, (short)2);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -48,15 +47,13 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
     schemes.put(TupleScheme.class, new InsertEventRequestDataTupleSchemeFactory());
   }
 
-  private boolean replace; // optional
   private List<String> filesAdded; // required
   private List<String> filesAddedChecksum; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    REPLACE((short)1, "replace"),
-    FILES_ADDED((short)2, "filesAdded"),
-    FILES_ADDED_CHECKSUM((short)3, "filesAddedChecksum");
+    FILES_ADDED((short)1, "filesAdded"),
+    FILES_ADDED_CHECKSUM((short)2, "filesAddedChecksum");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -71,11 +68,9 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
      */
     public static _Fields findByThriftId(int fieldId) {
       switch(fieldId) {
-        case 1: // REPLACE
-          return REPLACE;
-        case 2: // FILES_ADDED
+        case 1: // FILES_ADDED
           return FILES_ADDED;
-        case 3: // FILES_ADDED_CHECKSUM
+        case 2: // FILES_ADDED_CHECKSUM
           return FILES_ADDED_CHECKSUM;
         default:
           return null;
@@ -117,14 +112,10 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
   }
 
   // isset id assignments
-  private static final int __REPLACE_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.REPLACE,_Fields.FILES_ADDED_CHECKSUM};
+  private static final _Fields optionals[] = {_Fields.FILES_ADDED_CHECKSUM};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.REPLACE, new org.apache.thrift.meta_data.FieldMetaData("replace", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
     tmpMap.put(_Fields.FILES_ADDED, new org.apache.thrift.meta_data.FieldMetaData("filesAdded", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
@@ -149,8 +140,6 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
    * Performs a deep copy on <i>other</i>.
    */
   public InsertEventRequestData(InsertEventRequestData other) {
-    __isset_bitfield = other.__isset_bitfield;
-    this.replace = other.replace;
     if (other.isSetFilesAdded()) {
       List<String> __this__filesAdded = new ArrayList<String>(other.filesAdded);
       this.filesAdded = __this__filesAdded;
@@ -167,34 +156,10 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
 
   @Override
   public void clear() {
-    setReplaceIsSet(false);
-    this.replace = false;
     this.filesAdded = null;
     this.filesAddedChecksum = null;
   }
 
-  public boolean isReplace() {
-    return this.replace;
-  }
-
-  public void setReplace(boolean replace) {
-    this.replace = replace;
-    setReplaceIsSet(true);
-  }
-
-  public void unsetReplace() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REPLACE_ISSET_ID);
-  }
-
-  /** Returns true if field replace is set (has been assigned a value) and false otherwise */
-  public boolean isSetReplace() {
-    return EncodingUtils.testBit(__isset_bitfield, __REPLACE_ISSET_ID);
-  }
-
-  public void setReplaceIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REPLACE_ISSET_ID, value);
-  }
-
   public int getFilesAddedSize() {
     return (this.filesAdded == null) ? 0 : this.filesAdded.size();
   }
@@ -273,14 +238,6 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
 
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
-    case REPLACE:
-      if (value == null) {
-        unsetReplace();
-      } else {
-        setReplace((Boolean)value);
-      }
-      break;
-
     case FILES_ADDED:
       if (value == null) {
         unsetFilesAdded();
@@ -302,9 +259,6 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
 
   public Object getFieldValue(_Fields field) {
     switch (field) {
-    case REPLACE:
-      return isReplace();
-
     case FILES_ADDED:
       return getFilesAdded();
 
@@ -322,8 +276,6 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
     }
 
     switch (field) {
-    case REPLACE:
-      return isSetReplace();
     case FILES_ADDED:
       return isSetFilesAdded();
     case FILES_ADDED_CHECKSUM:
@@ -345,15 +297,6 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
     if (that == null)
       return false;
 
-    boolean this_present_replace = true && this.isSetReplace();
-    boolean that_present_replace = true && that.isSetReplace();
-    if (this_present_replace || that_present_replace) {
-      if (!(this_present_replace && that_present_replace))
-        return false;
-      if (this.replace != that.replace)
-        return false;
-    }
-
     boolean this_present_filesAdded = true && this.isSetFilesAdded();
     boolean that_present_filesAdded = true && that.isSetFilesAdded();
     if (this_present_filesAdded || that_present_filesAdded) {
@@ -379,11 +322,6 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
   public int hashCode() {
     List<Object> list = new ArrayList<Object>();
 
-    boolean present_replace = true && (isSetReplace());
-    list.add(present_replace);
-    if (present_replace)
-      list.add(replace);
-
     boolean present_filesAdded = true && (isSetFilesAdded());
     list.add(present_filesAdded);
     if (present_filesAdded)
@@ -405,16 +343,6 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
 
     int lastComparison = 0;
 
-    lastComparison = Boolean.valueOf(isSetReplace()).compareTo(other.isSetReplace());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetReplace()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replace, other.replace);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetFilesAdded()).compareTo(other.isSetFilesAdded());
     if (lastComparison != 0) {
       return lastComparison;
@@ -455,12 +383,6 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
     StringBuilder sb = new StringBuilder("InsertEventRequestData(");
     boolean first = true;
 
-    if (isSetReplace()) {
-      sb.append("replace:");
-      sb.append(this.replace);
-      first = false;
-    }
-    if (!first) sb.append(", ");
     sb.append("filesAdded:");
     if (this.filesAdded == null) {
       sb.append("null");
@@ -501,8 +423,6 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
 
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
     } catch (org.apache.thrift.TException te) {
       throw new java.io.IOException(te);
@@ -527,15 +447,7 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
           break;
         }
         switch (schemeField.id) {
-          case 1: // REPLACE
-            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
-              struct.replace = iprot.readBool();
-              struct.setReplaceIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // FILES_ADDED
+          case 1: // FILES_ADDED
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
                 org.apache.thrift.protocol.TList _list558 = iprot.readListBegin();
@@ -553,7 +465,7 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 3: // FILES_ADDED_CHECKSUM
+          case 2: // FILES_ADDED_CHECKSUM
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
                 org.apache.thrift.protocol.TList _list561 = iprot.readListBegin();
@@ -584,11 +496,6 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.isSetReplace()) {
-        oprot.writeFieldBegin(REPLACE_FIELD_DESC);
-        oprot.writeBool(struct.replace);
-        oprot.writeFieldEnd();
-      }
       if (struct.filesAdded != null) {
         oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC);
         {
@@ -640,16 +547,10 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
         }
       }
       BitSet optionals = new BitSet();
-      if (struct.isSetReplace()) {
-        optionals.set(0);
-      }
       if (struct.isSetFilesAddedChecksum()) {
-        optionals.set(1);
-      }
-      oprot.writeBitSet(optionals, 2);
-      if (struct.isSetReplace()) {
-        oprot.writeBool(struct.replace);
+        optionals.set(0);
       }
+      oprot.writeBitSet(optionals, 1);
       if (struct.isSetFilesAddedChecksum()) {
         {
           oprot.writeI32(struct.filesAddedChecksum.size());
@@ -675,12 +576,8 @@ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEve
         }
       }
       struct.setFilesAddedIsSet(true);
-      BitSet incoming = iprot.readBitSet(2);
+      BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
-        struct.replace = iprot.readBool();
-        struct.setReplaceIsSet(true);
-      }
-      if (incoming.get(1)) {
         {
           org.apache.thrift.protocol.TList _list571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
           struct.filesAddedChecksum = new ArrayList<String>(_list571.size);


[17/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index a3201cc..4dcfc76 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -12130,10 +12130,6 @@ class GetOpenTxnsResponse {
    * @var int
    */
   public $min_open_txn = null;
-  /**
-   * @var string
-   */
-  public $abortedBits = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -12144,7 +12140,7 @@ class GetOpenTxnsResponse {
           ),
         2 => array(
           'var' => 'open_txns',
-          'type' => TType::LST,
+          'type' => TType::SET,
           'etype' => TType::I64,
           'elem' => array(
             'type' => TType::I64,
@@ -12154,10 +12150,6 @@ class GetOpenTxnsResponse {
           'var' => 'min_open_txn',
           'type' => TType::I64,
           ),
-        4 => array(
-          'var' => 'abortedBits',
-          'type' => TType::STRING,
-          ),
         );
     }
     if (is_array($vals)) {
@@ -12170,9 +12162,6 @@ class GetOpenTxnsResponse {
       if (isset($vals['min_open_txn'])) {
         $this->min_open_txn = $vals['min_open_txn'];
       }
-      if (isset($vals['abortedBits'])) {
-        $this->abortedBits = $vals['abortedBits'];
-      }
     }
   }
 
@@ -12203,18 +12192,22 @@ class GetOpenTxnsResponse {
           }
           break;
         case 2:
-          if ($ftype == TType::LST) {
+          if ($ftype == TType::SET) {
             $this->open_txns = array();
             $_size413 = 0;
             $_etype416 = 0;
-            $xfer += $input->readListBegin($_etype416, $_size413);
+            $xfer += $input->readSetBegin($_etype416, $_size413);
             for ($_i417 = 0; $_i417 < $_size413; ++$_i417)
             {
               $elem418 = null;
               $xfer += $input->readI64($elem418);
-              $this->open_txns []= $elem418;
+              if (is_scalar($elem418)) {
+                $this->open_txns[$elem418] = true;
+              } else {
+                $this->open_txns []= $elem418;
+              }
             }
-            $xfer += $input->readListEnd();
+            $xfer += $input->readSetEnd();
           } else {
             $xfer += $input->skip($ftype);
           }
@@ -12226,13 +12219,6 @@ class GetOpenTxnsResponse {
             $xfer += $input->skip($ftype);
           }
           break;
-        case 4:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->abortedBits);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -12255,16 +12241,20 @@ class GetOpenTxnsResponse {
       if (!is_array($this->open_txns)) {
         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
       }
-      $xfer += $output->writeFieldBegin('open_txns', TType::LST, 2);
+      $xfer += $output->writeFieldBegin('open_txns', TType::SET, 2);
       {
-        $output->writeListBegin(TType::I64, count($this->open_txns));
+        $output->writeSetBegin(TType::I64, count($this->open_txns));
         {
-          foreach ($this->open_txns as $iter419)
+          foreach ($this->open_txns as $iter419 => $iter420)
           {
+            if (is_scalar($iter420)) {
             $xfer += $output->writeI64($iter419);
+            } else {
+            $xfer += $output->writeI64($iter420);
+            }
           }
         }
-        $output->writeListEnd();
+        $output->writeSetEnd();
       }
       $xfer += $output->writeFieldEnd();
     }
@@ -12273,11 +12263,6 @@ class GetOpenTxnsResponse {
       $xfer += $output->writeI64($this->min_open_txn);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->abortedBits !== null) {
-      $xfer += $output->writeFieldBegin('abortedBits', TType::STRING, 4);
-      $xfer += $output->writeString($this->abortedBits);
-      $xfer += $output->writeFieldEnd();
-    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;
@@ -12479,14 +12464,14 @@ class OpenTxnsResponse {
         case 1:
           if ($ftype == TType::LST) {
             $this->txn_ids = array();
-            $_size420 = 0;
-            $_etype423 = 0;
-            $xfer += $input->readListBegin($_etype423, $_size420);
-            for ($_i424 = 0; $_i424 < $_size420; ++$_i424)
+            $_size421 = 0;
+            $_etype424 = 0;
+            $xfer += $input->readListBegin($_etype424, $_size421);
+            for ($_i425 = 0; $_i425 < $_size421; ++$_i425)
             {
-              $elem425 = null;
-              $xfer += $input->readI64($elem425);
-              $this->txn_ids []= $elem425;
+              $elem426 = null;
+              $xfer += $input->readI64($elem426);
+              $this->txn_ids []= $elem426;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -12514,9 +12499,9 @@ class OpenTxnsResponse {
       {
         $output->writeListBegin(TType::I64, count($this->txn_ids));
         {
-          foreach ($this->txn_ids as $iter426)
+          foreach ($this->txn_ids as $iter427)
           {
-            $xfer += $output->writeI64($iter426);
+            $xfer += $output->writeI64($iter427);
           }
         }
         $output->writeListEnd();
@@ -12655,14 +12640,14 @@ class AbortTxnsRequest {
         case 1:
           if ($ftype == TType::LST) {
             $this->txn_ids = array();
-            $_size427 = 0;
-            $_etype430 = 0;
-            $xfer += $input->readListBegin($_etype430, $_size427);
-            for ($_i431 = 0; $_i431 < $_size427; ++$_i431)
+            $_size428 = 0;
+            $_etype431 = 0;
+            $xfer += $input->readListBegin($_etype431, $_size428);
+            for ($_i432 = 0; $_i432 < $_size428; ++$_i432)
             {
-              $elem432 = null;
-              $xfer += $input->readI64($elem432);
-              $this->txn_ids []= $elem432;
+              $elem433 = null;
+              $xfer += $input->readI64($elem433);
+              $this->txn_ids []= $elem433;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -12690,9 +12675,9 @@ class AbortTxnsRequest {
       {
         $output->writeListBegin(TType::I64, count($this->txn_ids));
         {
-          foreach ($this->txn_ids as $iter433)
+          foreach ($this->txn_ids as $iter434)
           {
-            $xfer += $output->writeI64($iter433);
+            $xfer += $output->writeI64($iter434);
           }
         }
         $output->writeListEnd();
@@ -13112,15 +13097,15 @@ class LockRequest {
         case 1:
           if ($ftype == TType::LST) {
             $this->component = array();
-            $_size434 = 0;
-            $_etype437 = 0;
-            $xfer += $input->readListBegin($_etype437, $_size434);
-            for ($_i438 = 0; $_i438 < $_size434; ++$_i438)
+            $_size435 = 0;
+            $_etype438 = 0;
+            $xfer += $input->readListBegin($_etype438, $_size435);
+            for ($_i439 = 0; $_i439 < $_size435; ++$_i439)
             {
-              $elem439 = null;
-              $elem439 = new \metastore\LockComponent();
-              $xfer += $elem439->read($input);
-              $this->component []= $elem439;
+              $elem440 = null;
+              $elem440 = new \metastore\LockComponent();
+              $xfer += $elem440->read($input);
+              $this->component []= $elem440;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13176,9 +13161,9 @@ class LockRequest {
       {
         $output->writeListBegin(TType::STRUCT, count($this->component));
         {
-          foreach ($this->component as $iter440)
+          foreach ($this->component as $iter441)
           {
-            $xfer += $iter440->write($output);
+            $xfer += $iter441->write($output);
           }
         }
         $output->writeListEnd();
@@ -14121,15 +14106,15 @@ class ShowLocksResponse {
         case 1:
           if ($ftype == TType::LST) {
             $this->locks = array();
-            $_size441 = 0;
-            $_etype444 = 0;
-            $xfer += $input->readListBegin($_etype444, $_size441);
-            for ($_i445 = 0; $_i445 < $_size441; ++$_i445)
+            $_size442 = 0;
+            $_etype445 = 0;
+            $xfer += $input->readListBegin($_etype445, $_size442);
+            for ($_i446 = 0; $_i446 < $_size442; ++$_i446)
             {
-              $elem446 = null;
-              $elem446 = new \metastore\ShowLocksResponseElement();
-              $xfer += $elem446->read($input);
-              $this->locks []= $elem446;
+              $elem447 = null;
+              $elem447 = new \metastore\ShowLocksResponseElement();
+              $xfer += $elem447->read($input);
+              $this->locks []= $elem447;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -14157,9 +14142,9 @@ class ShowLocksResponse {
       {
         $output->writeListBegin(TType::STRUCT, count($this->locks));
         {
-          foreach ($this->locks as $iter447)
+          foreach ($this->locks as $iter448)
           {
-            $xfer += $iter447->write($output);
+            $xfer += $iter448->write($output);
           }
         }
         $output->writeListEnd();
@@ -14434,17 +14419,17 @@ class HeartbeatTxnRangeResponse {
         case 1:
           if ($ftype == TType::SET) {
             $this->aborted = array();
-            $_size448 = 0;
-            $_etype451 = 0;
-            $xfer += $input->readSetBegin($_etype451, $_size448);
-            for ($_i452 = 0; $_i452 < $_size448; ++$_i452)
+            $_size449 = 0;
+            $_etype452 = 0;
+            $xfer += $input->readSetBegin($_etype452, $_size449);
+            for ($_i453 = 0; $_i453 < $_size449; ++$_i453)
             {
-              $elem453 = null;
-              $xfer += $input->readI64($elem453);
-              if (is_scalar($elem453)) {
-                $this->aborted[$elem453] = true;
+              $elem454 = null;
+              $xfer += $input->readI64($elem454);
+              if (is_scalar($elem454)) {
+                $this->aborted[$elem454] = true;
               } else {
-                $this->aborted []= $elem453;
+                $this->aborted []= $elem454;
               }
             }
             $xfer += $input->readSetEnd();
@@ -14455,17 +14440,17 @@ class HeartbeatTxnRangeResponse {
         case 2:
           if ($ftype == TType::SET) {
             $this->nosuch = array();
-            $_size454 = 0;
-            $_etype457 = 0;
-            $xfer += $input->readSetBegin($_etype457, $_size454);
-            for ($_i458 = 0; $_i458 < $_size454; ++$_i458)
+            $_size455 = 0;
+            $_etype458 = 0;
+            $xfer += $input->readSetBegin($_etype458, $_size455);
+            for ($_i459 = 0; $_i459 < $_size455; ++$_i459)
             {
-              $elem459 = null;
-              $xfer += $input->readI64($elem459);
-              if (is_scalar($elem459)) {
-                $this->nosuch[$elem459] = true;
+              $elem460 = null;
+              $xfer += $input->readI64($elem460);
+              if (is_scalar($elem460)) {
+                $this->nosuch[$elem460] = true;
               } else {
-                $this->nosuch []= $elem459;
+                $this->nosuch []= $elem460;
               }
             }
             $xfer += $input->readSetEnd();
@@ -14494,12 +14479,12 @@ class HeartbeatTxnRangeResponse {
       {
         $output->writeSetBegin(TType::I64, count($this->aborted));
         {
-          foreach ($this->aborted as $iter460 => $iter461)
+          foreach ($this->aborted as $iter461 => $iter462)
           {
-            if (is_scalar($iter461)) {
-            $xfer += $output->writeI64($iter460);
-            } else {
+            if (is_scalar($iter462)) {
             $xfer += $output->writeI64($iter461);
+            } else {
+            $xfer += $output->writeI64($iter462);
             }
           }
         }
@@ -14515,12 +14500,12 @@ class HeartbeatTxnRangeResponse {
       {
         $output->writeSetBegin(TType::I64, count($this->nosuch));
         {
-          foreach ($this->nosuch as $iter462 => $iter463)
+          foreach ($this->nosuch as $iter463 => $iter464)
           {
-            if (is_scalar($iter463)) {
-            $xfer += $output->writeI64($iter462);
-            } else {
+            if (is_scalar($iter464)) {
             $xfer += $output->writeI64($iter463);
+            } else {
+            $xfer += $output->writeI64($iter464);
             }
           }
         }
@@ -14679,17 +14664,17 @@ class CompactionRequest {
         case 6:
           if ($ftype == TType::MAP) {
             $this->properties = array();
-            $_size464 = 0;
-            $_ktype465 = 0;
-            $_vtype466 = 0;
-            $xfer += $input->readMapBegin($_ktype465, $_vtype466, $_size464);
-            for ($_i468 = 0; $_i468 < $_size464; ++$_i468)
+            $_size465 = 0;
+            $_ktype466 = 0;
+            $_vtype467 = 0;
+            $xfer += $input->readMapBegin($_ktype466, $_vtype467, $_size465);
+            for ($_i469 = 0; $_i469 < $_size465; ++$_i469)
             {
-              $key469 = '';
-              $val470 = '';
-              $xfer += $input->readString($key469);
-              $xfer += $input->readString($val470);
-              $this->properties[$key469] = $val470;
+              $key470 = '';
+              $val471 = '';
+              $xfer += $input->readString($key470);
+              $xfer += $input->readString($val471);
+              $this->properties[$key470] = $val471;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -14742,10 +14727,10 @@ class CompactionRequest {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties));
         {
-          foreach ($this->properties as $kiter471 => $viter472)
+          foreach ($this->properties as $kiter472 => $viter473)
           {
-            $xfer += $output->writeString($kiter471);
-            $xfer += $output->writeString($viter472);
+            $xfer += $output->writeString($kiter472);
+            $xfer += $output->writeString($viter473);
           }
         }
         $output->writeMapEnd();
@@ -15332,15 +15317,15 @@ class ShowCompactResponse {
         case 1:
           if ($ftype == TType::LST) {
             $this->compacts = array();
-            $_size473 = 0;
-            $_etype476 = 0;
-            $xfer += $input->readListBegin($_etype476, $_size473);
-            for ($_i477 = 0; $_i477 < $_size473; ++$_i477)
+            $_size474 = 0;
+            $_etype477 = 0;
+            $xfer += $input->readListBegin($_etype477, $_size474);
+            for ($_i478 = 0; $_i478 < $_size474; ++$_i478)
             {
-              $elem478 = null;
-              $elem478 = new \metastore\ShowCompactResponseElement();
-              $xfer += $elem478->read($input);
-              $this->compacts []= $elem478;
+              $elem479 = null;
+              $elem479 = new \metastore\ShowCompactResponseElement();
+              $xfer += $elem479->read($input);
+              $this->compacts []= $elem479;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15368,9 +15353,9 @@ class ShowCompactResponse {
       {
         $output->writeListBegin(TType::STRUCT, count($this->compacts));
         {
-          foreach ($this->compacts as $iter479)
+          foreach ($this->compacts as $iter480)
           {
-            $xfer += $iter479->write($output);
+            $xfer += $iter480->write($output);
           }
         }
         $output->writeListEnd();
@@ -15499,14 +15484,14 @@ class AddDynamicPartitions {
         case 4:
           if ($ftype == TType::LST) {
             $this->partitionnames = array();
-            $_size480 = 0;
-            $_etype483 = 0;
-            $xfer += $input->readListBegin($_etype483, $_size480);
-            for ($_i484 = 0; $_i484 < $_size480; ++$_i484)
+            $_size481 = 0;
+            $_etype484 = 0;
+            $xfer += $input->readListBegin($_etype484, $_size481);
+            for ($_i485 = 0; $_i485 < $_size481; ++$_i485)
             {
-              $elem485 = null;
-              $xfer += $input->readString($elem485);
-              $this->partitionnames []= $elem485;
+              $elem486 = null;
+              $xfer += $input->readString($elem486);
+              $this->partitionnames []= $elem486;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15556,9 +15541,9 @@ class AddDynamicPartitions {
       {
         $output->writeListBegin(TType::STRING, count($this->partitionnames));
         {
-          foreach ($this->partitionnames as $iter486)
+          foreach ($this->partitionnames as $iter487)
           {
-            $xfer += $output->writeString($iter486);
+            $xfer += $output->writeString($iter487);
           }
         }
         $output->writeListEnd();
@@ -15939,15 +15924,15 @@ class NotificationEventResponse {
         case 1:
           if ($ftype == TType::LST) {
             $this->events = array();
-            $_size487 = 0;
-            $_etype490 = 0;
-            $xfer += $input->readListBegin($_etype490, $_size487);
-            for ($_i491 = 0; $_i491 < $_size487; ++$_i491)
+            $_size488 = 0;
+            $_etype491 = 0;
+            $xfer += $input->readListBegin($_etype491, $_size488);
+            for ($_i492 = 0; $_i492 < $_size488; ++$_i492)
             {
-              $elem492 = null;
-              $elem492 = new \metastore\NotificationEvent();
-              $xfer += $elem492->read($input);
-              $this->events []= $elem492;
+              $elem493 = null;
+              $elem493 = new \metastore\NotificationEvent();
+              $xfer += $elem493->read($input);
+              $this->events []= $elem493;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15975,9 +15960,9 @@ class NotificationEventResponse {
       {
         $output->writeListBegin(TType::STRUCT, count($this->events));
         {
-          foreach ($this->events as $iter493)
+          foreach ($this->events as $iter494)
           {
-            $xfer += $iter493->write($output);
+            $xfer += $iter494->write($output);
           }
         }
         $output->writeListEnd();
@@ -16070,10 +16055,6 @@ class InsertEventRequestData {
   static $_TSPEC;
 
   /**
-   * @var bool
-   */
-  public $replace = null;
-  /**
    * @var string[]
    */
   public $filesAdded = null;
@@ -16086,10 +16067,6 @@ class InsertEventRequestData {
     if (!isset(self::$_TSPEC)) {
       self::$_TSPEC = array(
         1 => array(
-          'var' => 'replace',
-          'type' => TType::BOOL,
-          ),
-        2 => array(
           'var' => 'filesAdded',
           'type' => TType::LST,
           'etype' => TType::STRING,
@@ -16097,7 +16074,7 @@ class InsertEventRequestData {
             'type' => TType::STRING,
             ),
           ),
-        3 => array(
+        2 => array(
           'var' => 'filesAddedChecksum',
           'type' => TType::LST,
           'etype' => TType::STRING,
@@ -16108,9 +16085,6 @@ class InsertEventRequestData {
         );
     }
     if (is_array($vals)) {
-      if (isset($vals['replace'])) {
-        $this->replace = $vals['replace'];
-      }
       if (isset($vals['filesAdded'])) {
         $this->filesAdded = $vals['filesAdded'];
       }
@@ -16140,40 +16114,33 @@ class InsertEventRequestData {
       switch ($fid)
       {
         case 1:
-          if ($ftype == TType::BOOL) {
-            $xfer += $input->readBool($this->replace);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 2:
           if ($ftype == TType::LST) {
             $this->filesAdded = array();
-            $_size494 = 0;
-            $_etype497 = 0;
-            $xfer += $input->readListBegin($_etype497, $_size494);
-            for ($_i498 = 0; $_i498 < $_size494; ++$_i498)
+            $_size495 = 0;
+            $_etype498 = 0;
+            $xfer += $input->readListBegin($_etype498, $_size495);
+            for ($_i499 = 0; $_i499 < $_size495; ++$_i499)
             {
-              $elem499 = null;
-              $xfer += $input->readString($elem499);
-              $this->filesAdded []= $elem499;
+              $elem500 = null;
+              $xfer += $input->readString($elem500);
+              $this->filesAdded []= $elem500;
             }
             $xfer += $input->readListEnd();
           } else {
             $xfer += $input->skip($ftype);
           }
           break;
-        case 3:
+        case 2:
           if ($ftype == TType::LST) {
             $this->filesAddedChecksum = array();
-            $_size500 = 0;
-            $_etype503 = 0;
-            $xfer += $input->readListBegin($_etype503, $_size500);
-            for ($_i504 = 0; $_i504 < $_size500; ++$_i504)
+            $_size501 = 0;
+            $_etype504 = 0;
+            $xfer += $input->readListBegin($_etype504, $_size501);
+            for ($_i505 = 0; $_i505 < $_size501; ++$_i505)
             {
-              $elem505 = null;
-              $xfer += $input->readString($elem505);
-              $this->filesAddedChecksum []= $elem505;
+              $elem506 = null;
+              $xfer += $input->readString($elem506);
+              $this->filesAddedChecksum []= $elem506;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16193,22 +16160,17 @@ class InsertEventRequestData {
   public function write($output) {
     $xfer = 0;
     $xfer += $output->writeStructBegin('InsertEventRequestData');
-    if ($this->replace !== null) {
-      $xfer += $output->writeFieldBegin('replace', TType::BOOL, 1);
-      $xfer += $output->writeBool($this->replace);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->filesAdded !== null) {
       if (!is_array($this->filesAdded)) {
         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
       }
-      $xfer += $output->writeFieldBegin('filesAdded', TType::LST, 2);
+      $xfer += $output->writeFieldBegin('filesAdded', TType::LST, 1);
       {
         $output->writeListBegin(TType::STRING, count($this->filesAdded));
         {
-          foreach ($this->filesAdded as $iter506)
+          foreach ($this->filesAdded as $iter507)
           {
-            $xfer += $output->writeString($iter506);
+            $xfer += $output->writeString($iter507);
           }
         }
         $output->writeListEnd();
@@ -16219,13 +16181,13 @@ class InsertEventRequestData {
       if (!is_array($this->filesAddedChecksum)) {
         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
       }
-      $xfer += $output->writeFieldBegin('filesAddedChecksum', TType::LST, 3);
+      $xfer += $output->writeFieldBegin('filesAddedChecksum', TType::LST, 2);
       {
         $output->writeListBegin(TType::STRING, count($this->filesAddedChecksum));
         {
-          foreach ($this->filesAddedChecksum as $iter507)
+          foreach ($this->filesAddedChecksum as $iter508)
           {
-            $xfer += $output->writeString($iter507);
+            $xfer += $output->writeString($iter508);
           }
         }
         $output->writeListEnd();
@@ -16443,14 +16405,14 @@ class FireEventRequest {
         case 5:
           if ($ftype == TType::LST) {
             $this->partitionVals = array();
-            $_size508 = 0;
-            $_etype511 = 0;
-            $xfer += $input->readListBegin($_etype511, $_size508);
-            for ($_i512 = 0; $_i512 < $_size508; ++$_i512)
+            $_size509 = 0;
+            $_etype512 = 0;
+            $xfer += $input->readListBegin($_etype512, $_size509);
+            for ($_i513 = 0; $_i513 < $_size509; ++$_i513)
             {
-              $elem513 = null;
-              $xfer += $input->readString($elem513);
-              $this->partitionVals []= $elem513;
+              $elem514 = null;
+              $xfer += $input->readString($elem514);
+              $this->partitionVals []= $elem514;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16501,9 +16463,9 @@ class FireEventRequest {
       {
         $output->writeListBegin(TType::STRING, count($this->partitionVals));
         {
-          foreach ($this->partitionVals as $iter514)
+          foreach ($this->partitionVals as $iter515)
           {
-            $xfer += $output->writeString($iter514);
+            $xfer += $output->writeString($iter515);
           }
         }
         $output->writeListEnd();
@@ -16731,18 +16693,18 @@ class GetFileMetadataByExprResult {
         case 1:
           if ($ftype == TType::MAP) {
             $this->metadata = array();
-            $_size515 = 0;
-            $_ktype516 = 0;
-            $_vtype517 = 0;
-            $xfer += $input->readMapBegin($_ktype516, $_vtype517, $_size515);
-            for ($_i519 = 0; $_i519 < $_size515; ++$_i519)
+            $_size516 = 0;
+            $_ktype517 = 0;
+            $_vtype518 = 0;
+            $xfer += $input->readMapBegin($_ktype517, $_vtype518, $_size516);
+            for ($_i520 = 0; $_i520 < $_size516; ++$_i520)
             {
-              $key520 = 0;
-              $val521 = new \metastore\MetadataPpdResult();
-              $xfer += $input->readI64($key520);
-              $val521 = new \metastore\MetadataPpdResult();
-              $xfer += $val521->read($input);
-              $this->metadata[$key520] = $val521;
+              $key521 = 0;
+              $val522 = new \metastore\MetadataPpdResult();
+              $xfer += $input->readI64($key521);
+              $val522 = new \metastore\MetadataPpdResult();
+              $xfer += $val522->read($input);
+              $this->metadata[$key521] = $val522;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -16777,10 +16739,10 @@ class GetFileMetadataByExprResult {
       {
         $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata));
         {
-          foreach ($this->metadata as $kiter522 => $viter523)
+          foreach ($this->metadata as $kiter523 => $viter524)
           {
-            $xfer += $output->writeI64($kiter522);
-            $xfer += $viter523->write($output);
+            $xfer += $output->writeI64($kiter523);
+            $xfer += $viter524->write($output);
           }
         }
         $output->writeMapEnd();
@@ -16882,14 +16844,14 @@ class GetFileMetadataByExprRequest {
         case 1:
           if ($ftype == TType::LST) {
             $this->fileIds = array();
-            $_size524 = 0;
-            $_etype527 = 0;
-            $xfer += $input->readListBegin($_etype527, $_size524);
-            for ($_i528 = 0; $_i528 < $_size524; ++$_i528)
+            $_size525 = 0;
+            $_etype528 = 0;
+            $xfer += $input->readListBegin($_etype528, $_size525);
+            for ($_i529 = 0; $_i529 < $_size525; ++$_i529)
             {
-              $elem529 = null;
-              $xfer += $input->readI64($elem529);
-              $this->fileIds []= $elem529;
+              $elem530 = null;
+              $xfer += $input->readI64($elem530);
+              $this->fileIds []= $elem530;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16938,9 +16900,9 @@ class GetFileMetadataByExprRequest {
       {
         $output->writeListBegin(TType::I64, count($this->fileIds));
         {
-          foreach ($this->fileIds as $iter530)
+          foreach ($this->fileIds as $iter531)
           {
-            $xfer += $output->writeI64($iter530);
+            $xfer += $output->writeI64($iter531);
           }
         }
         $output->writeListEnd();
@@ -17034,17 +16996,17 @@ class GetFileMetadataResult {
         case 1:
           if ($ftype == TType::MAP) {
             $this->metadata = array();
-            $_size531 = 0;
-            $_ktype532 = 0;
-            $_vtype533 = 0;
-            $xfer += $input->readMapBegin($_ktype532, $_vtype533, $_size531);
-            for ($_i535 = 0; $_i535 < $_size531; ++$_i535)
+            $_size532 = 0;
+            $_ktype533 = 0;
+            $_vtype534 = 0;
+            $xfer += $input->readMapBegin($_ktype533, $_vtype534, $_size532);
+            for ($_i536 = 0; $_i536 < $_size532; ++$_i536)
             {
-              $key536 = 0;
-              $val537 = '';
-              $xfer += $input->readI64($key536);
-              $xfer += $input->readString($val537);
-              $this->metadata[$key536] = $val537;
+              $key537 = 0;
+              $val538 = '';
+              $xfer += $input->readI64($key537);
+              $xfer += $input->readString($val538);
+              $this->metadata[$key537] = $val538;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -17079,10 +17041,10 @@ class GetFileMetadataResult {
       {
         $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata));
         {
-          foreach ($this->metadata as $kiter538 => $viter539)
+          foreach ($this->metadata as $kiter539 => $viter540)
           {
-            $xfer += $output->writeI64($kiter538);
-            $xfer += $output->writeString($viter539);
+            $xfer += $output->writeI64($kiter539);
+            $xfer += $output->writeString($viter540);
           }
         }
         $output->writeMapEnd();
@@ -17151,14 +17113,14 @@ class GetFileMetadataRequest {
         case 1:
           if ($ftype == TType::LST) {
             $this->fileIds = array();
-            $_size540 = 0;
-            $_etype543 = 0;
-            $xfer += $input->readListBegin($_etype543, $_size540);
-            for ($_i544 = 0; $_i544 < $_size540; ++$_i544)
+            $_size541 = 0;
+            $_etype544 = 0;
+            $xfer += $input->readListBegin($_etype544, $_size541);
+            for ($_i545 = 0; $_i545 < $_size541; ++$_i545)
             {
-              $elem545 = null;
-              $xfer += $input->readI64($elem545);
-              $this->fileIds []= $elem545;
+              $elem546 = null;
+              $xfer += $input->readI64($elem546);
+              $this->fileIds []= $elem546;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17186,9 +17148,9 @@ class GetFileMetadataRequest {
       {
         $output->writeListBegin(TType::I64, count($this->fileIds));
         {
-          foreach ($this->fileIds as $iter546)
+          foreach ($this->fileIds as $iter547)
           {
-            $xfer += $output->writeI64($iter546);
+            $xfer += $output->writeI64($iter547);
           }
         }
         $output->writeListEnd();
@@ -17328,14 +17290,14 @@ class PutFileMetadataRequest {
         case 1:
           if ($ftype == TType::LST) {
             $this->fileIds = array();
-            $_size547 = 0;
-            $_etype550 = 0;
-            $xfer += $input->readListBegin($_etype550, $_size547);
-            for ($_i551 = 0; $_i551 < $_size547; ++$_i551)
+            $_size548 = 0;
+            $_etype551 = 0;
+            $xfer += $input->readListBegin($_etype551, $_size548);
+            for ($_i552 = 0; $_i552 < $_size548; ++$_i552)
             {
-              $elem552 = null;
-              $xfer += $input->readI64($elem552);
-              $this->fileIds []= $elem552;
+              $elem553 = null;
+              $xfer += $input->readI64($elem553);
+              $this->fileIds []= $elem553;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17345,14 +17307,14 @@ class PutFileMetadataRequest {
         case 2:
           if ($ftype == TType::LST) {
             $this->metadata = array();
-            $_size553 = 0;
-            $_etype556 = 0;
-            $xfer += $input->readListBegin($_etype556, $_size553);
-            for ($_i557 = 0; $_i557 < $_size553; ++$_i557)
+            $_size554 = 0;
+            $_etype557 = 0;
+            $xfer += $input->readListBegin($_etype557, $_size554);
+            for ($_i558 = 0; $_i558 < $_size554; ++$_i558)
             {
-              $elem558 = null;
-              $xfer += $input->readString($elem558);
-              $this->metadata []= $elem558;
+              $elem559 = null;
+              $xfer += $input->readString($elem559);
+              $this->metadata []= $elem559;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17387,9 +17349,9 @@ class PutFileMetadataRequest {
       {
         $output->writeListBegin(TType::I64, count($this->fileIds));
         {
-          foreach ($this->fileIds as $iter559)
+          foreach ($this->fileIds as $iter560)
           {
-            $xfer += $output->writeI64($iter559);
+            $xfer += $output->writeI64($iter560);
           }
         }
         $output->writeListEnd();
@@ -17404,9 +17366,9 @@ class PutFileMetadataRequest {
       {
         $output->writeListBegin(TType::STRING, count($this->metadata));
         {
-          foreach ($this->metadata as $iter560)
+          foreach ($this->metadata as $iter561)
           {
-            $xfer += $output->writeString($iter560);
+            $xfer += $output->writeString($iter561);
           }
         }
         $output->writeListEnd();
@@ -17525,14 +17487,14 @@ class ClearFileMetadataRequest {
         case 1:
           if ($ftype == TType::LST) {
             $this->fileIds = array();
-            $_size561 = 0;
-            $_etype564 = 0;
-            $xfer += $input->readListBegin($_etype564, $_size561);
-            for ($_i565 = 0; $_i565 < $_size561; ++$_i565)
+            $_size562 = 0;
+            $_etype565 = 0;
+            $xfer += $input->readListBegin($_etype565, $_size562);
+            for ($_i566 = 0; $_i566 < $_size562; ++$_i566)
             {
-              $elem566 = null;
-              $xfer += $input->readI64($elem566);
-              $this->fileIds []= $elem566;
+              $elem567 = null;
+              $xfer += $input->readI64($elem567);
+              $this->fileIds []= $elem567;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17560,9 +17522,9 @@ class ClearFileMetadataRequest {
       {
         $output->writeListBegin(TType::I64, count($this->fileIds));
         {
-          foreach ($this->fileIds as $iter567)
+          foreach ($this->fileIds as $iter568)
           {
-            $xfer += $output->writeI64($iter567);
+            $xfer += $output->writeI64($iter568);
           }
         }
         $output->writeListEnd();
@@ -18535,14 +18497,14 @@ class GetValidWriteIdsResult {
         case 4:
           if ($ftype == TType::LST) {
             $this->ids = array();
-            $_size568 = 0;
-            $_etype571 = 0;
-            $xfer += $input->readListBegin($_etype571, $_size568);
-            for ($_i572 = 0; $_i572 < $_size568; ++$_i572)
+            $_size569 = 0;
+            $_etype572 = 0;
+            $xfer += $input->readListBegin($_etype572, $_size569);
+            for ($_i573 = 0; $_i573 < $_size569; ++$_i573)
             {
-              $elem573 = null;
-              $xfer += $input->readI64($elem573);
-              $this->ids []= $elem573;
+              $elem574 = null;
+              $xfer += $input->readI64($elem574);
+              $this->ids []= $elem574;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18585,9 +18547,9 @@ class GetValidWriteIdsResult {
       {
         $output->writeListBegin(TType::I64, count($this->ids));
         {
-          foreach ($this->ids as $iter574)
+          foreach ($this->ids as $iter575)
           {
-            $xfer += $output->writeI64($iter574);
+            $xfer += $output->writeI64($iter575);
           }
         }
         $output->writeListEnd();
@@ -18652,15 +18614,15 @@ class GetAllFunctionsResponse {
         case 1:
           if ($ftype == TType::LST) {
             $this->functions = array();
-            $_size575 = 0;
-            $_etype578 = 0;
-            $xfer += $input->readListBegin($_etype578, $_size575);
-            for ($_i579 = 0; $_i579 < $_size575; ++$_i579)
+            $_size576 = 0;
+            $_etype579 = 0;
+            $xfer += $input->readListBegin($_etype579, $_size576);
+            for ($_i580 = 0; $_i580 < $_size576; ++$_i580)
             {
-              $elem580 = null;
-              $elem580 = new \metastore\Function();
-              $xfer += $elem580->read($input);
-              $this->functions []= $elem580;
+              $elem581 = null;
+              $elem581 = new \metastore\Function();
+              $xfer += $elem581->read($input);
+              $this->functions []= $elem581;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18688,9 +18650,9 @@ class GetAllFunctionsResponse {
       {
         $output->writeListBegin(TType::STRUCT, count($this->functions));
         {
-          foreach ($this->functions as $iter581)
+          foreach ($this->functions as $iter582)
           {
-            $xfer += $iter581->write($output);
+            $xfer += $iter582->write($output);
           }
         }
         $output->writeListEnd();
@@ -18754,14 +18716,14 @@ class ClientCapabilities {
         case 1:
           if ($ftype == TType::LST) {
             $this->values = array();
-            $_size582 = 0;
-            $_etype585 = 0;
-            $xfer += $input->readListBegin($_etype585, $_size582);
-            for ($_i586 = 0; $_i586 < $_size582; ++$_i586)
+            $_size583 = 0;
+            $_etype586 = 0;
+            $xfer += $input->readListBegin($_etype586, $_size583);
+            for ($_i587 = 0; $_i587 < $_size583; ++$_i587)
             {
-              $elem587 = null;
-              $xfer += $input->readI32($elem587);
-              $this->values []= $elem587;
+              $elem588 = null;
+              $xfer += $input->readI32($elem588);
+              $this->values []= $elem588;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18789,9 +18751,9 @@ class ClientCapabilities {
       {
         $output->writeListBegin(TType::I32, count($this->values));
         {
-          foreach ($this->values as $iter588)
+          foreach ($this->values as $iter589)
           {
-            $xfer += $output->writeI32($iter588);
+            $xfer += $output->writeI32($iter589);
           }
         }
         $output->writeListEnd();
@@ -19091,14 +19053,14 @@ class GetTablesRequest {
         case 2:
           if ($ftype == TType::LST) {
             $this->tblNames = array();
-            $_size589 = 0;
-            $_etype592 = 0;
-            $xfer += $input->readListBegin($_etype592, $_size589);
-            for ($_i593 = 0; $_i593 < $_size589; ++$_i593)
+            $_size590 = 0;
+            $_etype593 = 0;
+            $xfer += $input->readListBegin($_etype593, $_size590);
+            for ($_i594 = 0; $_i594 < $_size590; ++$_i594)
             {
-              $elem594 = null;
-              $xfer += $input->readString($elem594);
-              $this->tblNames []= $elem594;
+              $elem595 = null;
+              $xfer += $input->readString($elem595);
+              $this->tblNames []= $elem595;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19139,9 +19101,9 @@ class GetTablesRequest {
       {
         $output->writeListBegin(TType::STRING, count($this->tblNames));
         {
-          foreach ($this->tblNames as $iter595)
+          foreach ($this->tblNames as $iter596)
           {
-            $xfer += $output->writeString($iter595);
+            $xfer += $output->writeString($iter596);
           }
         }
         $output->writeListEnd();
@@ -19214,15 +19176,15 @@ class GetTablesResult {
         case 1:
           if ($ftype == TType::LST) {
             $this->tables = array();
-            $_size596 = 0;
-            $_etype599 = 0;
-            $xfer += $input->readListBegin($_etype599, $_size596);
-            for ($_i600 = 0; $_i600 < $_size596; ++$_i600)
+            $_size597 = 0;
+            $_etype600 = 0;
+            $xfer += $input->readListBegin($_etype600, $_size597);
+            for ($_i601 = 0; $_i601 < $_size597; ++$_i601)
             {
-              $elem601 = null;
-              $elem601 = new \metastore\Table();
-              $xfer += $elem601->read($input);
-              $this->tables []= $elem601;
+              $elem602 = null;
+              $elem602 = new \metastore\Table();
+              $xfer += $elem602->read($input);
+              $this->tables []= $elem602;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19250,9 +19212,9 @@ class GetTablesResult {
       {
         $output->writeListBegin(TType::STRUCT, count($this->tables));
         {
-          foreach ($this->tables as $iter602)
+          foreach ($this->tables as $iter603)
           {
-            $xfer += $iter602->write($output);
+            $xfer += $iter603->write($output);
           }
         }
         $output->writeListEnd();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index c1c3393..3db3bad 100755
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -48,7 +48,6 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
   print('  void add_foreign_key(AddForeignKeyRequest req)')
   print('  void drop_table(string dbname, string name, bool deleteData)')
   print('  void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)')
-  print('  void truncate_table(string dbName, string tableName,  partNames)')
   print('   get_tables(string db_name, string pattern)')
   print('   get_tables_by_type(string db_name, string pattern, string tableType)')
   print('   get_table_meta(string db_patterns, string tbl_patterns,  tbl_types)')
@@ -395,12 +394,6 @@ elif cmd == 'drop_table_with_environment_context':
     sys.exit(1)
   pp.pprint(client.drop_table_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),))
 
-elif cmd == 'truncate_table':
-  if len(args) != 3:
-    print('truncate_table requires 3 args')
-    sys.exit(1)
-  pp.pprint(client.truncate_table(args[0],args[1],eval(args[2]),))
-
 elif cmd == 'get_tables':
   if len(args) != 2:
     print('get_tables requires 2 args')


[05/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java
index b015e43..49ecdd1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.Map;
 
-import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionError;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -29,6 +28,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.MapredContext;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
+import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe;
 import org.apache.hadoop.hive.ql.exec.tez.TezContext;
@@ -68,21 +68,6 @@ public class VectorMapJoinFastHashTableLoader implements org.apache.hadoop.hive.
     Map<Integer, String> parentToInput = desc.getParentToInput();
     Map<Integer, Long> parentKeyCounts = desc.getParentKeyCounts();
 
-    final float inflationFactor = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASH_TABLE_INFLATION_FACTOR);
-    final long memoryCheckInterval = HiveConf.getLongVar(hconf,
-      HiveConf.ConfVars.LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL);
-    final boolean isLlap = "llap".equals(HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_EXECUTION_MODE));
-    long numEntries = 0;
-    long noCondTaskSize = desc.getNoConditionalTaskSize();
-    boolean doMemCheck = isLlap && inflationFactor > 0.0f && noCondTaskSize > 0 && memoryCheckInterval > 0;
-    if (!doMemCheck) {
-      LOG.info("Not doing hash table memory monitoring. isLlap: {} inflationFactor: {} noConditionalTaskSize: {} " +
-          "memoryCheckInterval: {}", isLlap, inflationFactor, noCondTaskSize, memoryCheckInterval);
-    } else {
-      LOG.info("Memory monitoring for hash table loader enabled. noconditionalTaskSize: {} inflationFactor: {} ",
-        noCondTaskSize, inflationFactor);
-    }
-
     for (int pos = 0; pos < mapJoinTables.length; pos++) {
       if (pos == desc.getPosBigTable()) {
         continue;
@@ -108,41 +93,15 @@ public class VectorMapJoinFastHashTableLoader implements org.apache.hadoop.hive.
         VectorMapJoinFastTableContainer vectorMapJoinFastTableContainer =
                 new VectorMapJoinFastTableContainer(desc, hconf, keyCount);
 
-        LOG.info("Using vectorMapJoinFastTableContainer: " + vectorMapJoinFastTableContainer.getClass().getSimpleName());
-
         vectorMapJoinFastTableContainer.setSerde(null, null); // No SerDes here.
         while (kvReader.next()) {
           vectorMapJoinFastTableContainer.putRow((BytesWritable)kvReader.getCurrentKey(),
               (BytesWritable)kvReader.getCurrentValue());
-          numEntries++;
-          if (doMemCheck && numEntries >= memoryCheckInterval) {
-            if (doMemCheck && ((numEntries % memoryCheckInterval) == 0)) {
-              final long estMemUsage = vectorMapJoinFastTableContainer.getEstimatedMemorySize();
-              final long threshold = (long) (inflationFactor * noCondTaskSize);
-              // guard against poor configuration of noconditional task size. We let hash table grow till 2/3'rd memory
-              // available for container/executor
-              final long effectiveThreshold = (long) Math.max(threshold, (2.0/3.0) * desc.getMaxMemoryAvailable());
-              if (estMemUsage > effectiveThreshold) {
-                String msg = "VectorMapJoin Hash table loading exceeded memory limits." +
-                  " estimatedMemoryUsage: " + estMemUsage + " noconditionalTaskSize: " + noCondTaskSize +
-                  " inflationFactor: " + inflationFactor + " threshold: " + threshold +
-                  " effectiveThreshold: " + effectiveThreshold;
-                LOG.error(msg);
-                throw new MapJoinMemoryExhaustionError(msg);
-              } else {
-                if (LOG.isInfoEnabled()) {
-                  LOG.info("Checking vector mapjoin hash table loader memory usage.. numEntries: {} " +
-                    "estimatedMemoryUsage: {} effectiveThreshold: {}", numEntries, estMemUsage, effectiveThreshold);
-                }
-              }
-            }
-          }
         }
 
         vectorMapJoinFastTableContainer.seal();
-        mapJoinTables[pos] = vectorMapJoinFastTableContainer;
-        LOG.info("Finished loading hashtable using " + vectorMapJoinFastTableContainer.getClass() +
-          ". Small table position: " + pos);
+        mapJoinTables[pos] = (MapJoinTableContainer) vectorMapJoinFastTableContainer;
+
       } catch (IOException e) {
         throw new HiveException(e);
       } catch (SerDeException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java
index 3e9ff84..be51693 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java
@@ -18,14 +18,13 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast;
 
-import org.apache.hadoop.hive.common.MemoryEstimate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.serde2.WriteBuffers;
 
 // Optimized for sequential key lookup.
 
-public class VectorMapJoinFastKeyStore implements MemoryEstimate {
+public class VectorMapJoinFastKeyStore {
 
   private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastKeyStore.class.getName());
 
@@ -166,12 +165,4 @@ public class VectorMapJoinFastKeyStore implements MemoryEstimate {
     this.writeBuffers = writeBuffers;
     unsafeReadPos = new WriteBuffers.Position();
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    long size = 0;
-    size += writeBuffers == null ? 0 : writeBuffers.getEstimatedMemorySize();
-    size += unsafeReadPos == null ? 0 : unsafeReadPos.getEstimatedMemorySize();
-    return size;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMap.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMap.java
index d4847b5..6fe98f9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMap.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMap.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hive.common.MemoryEstimate;
-import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.JoinUtil;
@@ -39,7 +37,7 @@ import com.google.common.annotations.VisibleForTesting;
  */
 public class VectorMapJoinFastLongHashMap
              extends VectorMapJoinFastLongHashTable
-             implements VectorMapJoinLongHashMap, MemoryEstimate {
+             implements VectorMapJoinLongHashMap {
 
   public static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastLongHashMap.class);
 
@@ -114,9 +112,4 @@ public class VectorMapJoinFastLongHashMap
         initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount);
     valueStore = new VectorMapJoinFastValueStore(writeBuffersSize);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize() + valueStore.getEstimatedMemorySize();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMultiSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMultiSet.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMultiSet.java
index 566cfa2..9140aee 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMultiSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMultiSet.java
@@ -100,9 +100,4 @@ public class VectorMapJoinFastLongHashMultiSet
     super(minMaxEnabled, isOuterJoin, hashTableKeyType,
         initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashSet.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashSet.java
index fb7ae62..d3efb11 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashSet.java
@@ -96,9 +96,4 @@ public class VectorMapJoinFastLongHashSet
     super(minMaxEnabled, isOuterJoin, hashTableKeyType,
         initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
index 54e667c..8bfa07c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.JoinUtil;
@@ -281,18 +280,4 @@ public abstract class VectorMapJoinFastLongHashTable
     min = Long.MAX_VALUE;
     max = Long.MIN_VALUE;
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    JavaDataModel jdm = JavaDataModel.get();
-    long size = super.getEstimatedMemorySize();
-    size += slotPairs == null ? 0 : jdm.lengthForLongArrayOfSize(slotPairs.length);
-    size += (2 * jdm.primitive2());
-    size += (2 * jdm.primitive1());
-    size += jdm.object();
-    // adding 16KB constant memory for keyBinarySortableDeserializeRead as the rabit hole is deep to implement
-    // MemoryEstimate interface, also it is constant overhead
-    size += (16 * 1024L);
-    return size;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMap.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMap.java
index eb08aa9..add4788 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMap.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMap.java
@@ -53,9 +53,4 @@ public class VectorMapJoinFastMultiKeyHashMap
         int initialCapacity, float loadFactor, int writeBuffersSize, long estimatedKeyCount) {
     super(initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize();
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMultiSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMultiSet.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMultiSet.java
index 56964bc..faefdbb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMultiSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMultiSet.java
@@ -52,8 +52,4 @@ public class VectorMapJoinFastMultiKeyHashMultiSet
     super(initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount);
   }
 
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize();
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashSet.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashSet.java
index 46bafe0..5328910 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashSet.java
@@ -52,8 +52,5 @@ public class VectorMapJoinFastMultiKeyHashSet
     super(initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount);
   }
 
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize();
-  }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMap.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMap.java
index d04590a..f13034f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMap.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMap.java
@@ -43,13 +43,4 @@ public class VectorMapJoinFastStringHashMap extends VectorMapJoinFastBytesHashMa
     super(initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount);
     stringCommon = new VectorMapJoinFastStringCommon(isOuterJoin);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    long size = 0;
-    // adding 16KB constant memory for stringCommon as the rabit hole is deep to implement
-    // MemoryEstimate interface, also it is constant overhead
-    size += (16 * 1024L);
-    return super.getEstimatedMemorySize() + size;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMultiSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMultiSet.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMultiSet.java
index b24bfdf..53ad7b4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMultiSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMultiSet.java
@@ -43,12 +43,4 @@ public class VectorMapJoinFastStringHashMultiSet extends VectorMapJoinFastBytesH
     super(initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount);
     stringCommon = new VectorMapJoinFastStringCommon(isOuterJoin);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    // adding 16KB constant memory for stringCommon as the rabit hole is deep to implement
-    // MemoryEstimate interface, also it is constant overhead
-    long size = (16 * 1024L);
-    return super.getEstimatedMemorySize() + size;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashSet.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashSet.java
index 75fae25..723c729 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashSet.java
@@ -43,12 +43,4 @@ public class VectorMapJoinFastStringHashSet extends VectorMapJoinFastBytesHashSe
     super(initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount);
     stringCommon = new VectorMapJoinFastStringCommon(isOuterJoin);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    // adding 16KB constant memory for stringCommon as the rabit hole is deep to implement
-    // MemoryEstimate interface, also it is constant overhead
-    long size = (16 * 1024L);
-    return super.getEstimatedMemorySize() + size;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
index 2fe4b93..05f1cf1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -27,6 +26,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectSerDeContext;
+import org.apache.hadoop.hive.ql.exec.tez.HashTableLoader;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTable;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinTableContainer;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKind;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Writable;
+import org.apache.tez.runtime.library.api.KeyValueReader;
 
 /**
  * HashTableLoader for Tez constructs the hashtable from records read from
@@ -45,7 +46,7 @@ import org.apache.hadoop.io.Writable;
  */
 public class VectorMapJoinFastTableContainer implements VectorMapJoinTableContainer {
 
-  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastTableContainer.class.getName());
+  private static final Logger LOG = LoggerFactory.getLogger(HashTableLoader.class.getName());
 
   private final MapJoinDesc desc;
   private final Configuration hconf;
@@ -218,17 +219,6 @@ public class VectorMapJoinFastTableContainer implements VectorMapJoinTableContai
   }
 
   @Override
-  public long getEstimatedMemorySize() {
-    JavaDataModel jdm = JavaDataModel.get();
-    long size = 0;
-    size += vectorMapJoinFastHashTable.getEstimatedMemorySize();
-    size += (4 * jdm.primitive1());
-    size += (2 * jdm.object());
-    size += (jdm.primitive2());
-    return size;
-  }
-
-  @Override
   public void setSerde(MapJoinObjectSerDeContext keyCtx, MapJoinObjectSerDeContext valCtx)
       throws SerDeException {
     // Do nothing in this case.

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastValueStore.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastValueStore.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastValueStore.java
index 3cd06e8..f9c5b34 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastValueStore.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastValueStore.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast;
 
-import org.apache.hadoop.hive.common.MemoryEstimate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult;
@@ -31,7 +30,7 @@ import com.google.common.base.Preconditions;
 
 // Supports random access.
 
-public class VectorMapJoinFastValueStore implements MemoryEstimate {
+public class VectorMapJoinFastValueStore {
 
   private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastValueStore.class.getName());
 
@@ -114,11 +113,6 @@ public class VectorMapJoinFastValueStore implements MemoryEstimate {
     return writeBuffers;
   }
 
-  @Override
-  public long getEstimatedMemorySize() {
-    return writeBuffers == null ? 0 : writeBuffers.getEstimatedMemorySize();
-  }
-
   public static class HashMapResult extends VectorMapJoinHashMapResult {
 
     private VectorMapJoinFastValueStore valueStore;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTable.java
index 9cc9ad4..c7e585c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTable.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hive.common.MemoryEstimate;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.io.BytesWritable;
@@ -29,7 +28,7 @@ import org.apache.hadoop.io.BytesWritable;
  * Root interface for a vector map join hash table (which could be a hash map, hash multi-set, or
  * hash set).
  */
-public interface VectorMapJoinHashTable extends MemoryEstimate {
+public interface VectorMapJoinHashTable {
 
 
   /*

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashSet.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashSet.java
index 1560807..93a89d7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashSet.java
@@ -75,9 +75,4 @@ public class VectorMapJoinOptimizedHashSet
       MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) {
     super(originalTableContainer, hashMapRowGetter);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    return super.getEstimatedMemorySize();
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashTable.java
index 5275e1a..5fe7861 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashTable.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.JoinUtil;
@@ -97,12 +96,4 @@ public abstract class VectorMapJoinOptimizedHashTable implements VectorMapJoinHa
   public int size() {
     return originalTableContainer.size();
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    long size = 0;
-    size += originalTableContainer == null ? 0 : originalTableContainer.getEstimatedMemorySize();
-    size += (2 * JavaDataModel.get().object());
-    return size;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashSet.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashSet.java
index 4b46ce0..f921b9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashSet.java
@@ -60,12 +60,4 @@ public class VectorMapJoinOptimizedStringHashSet
     super(originalTableContainer, hashMapRowGetter);
     stringCommon =  new VectorMapJoinOptimizedStringCommon(isOuterJoin);
   }
-
-  @Override
-  public long getEstimatedMemorySize() {
-    // adding 16KB constant memory for stringCommon as the rabit hole is deep to implement
-    // MemoryEstimate interface, also it is constant overhead
-    long size = (16 * 1024L);
-    return super.getEstimatedMemorySize() + size;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
index fc5aea5..42ca4b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
@@ -102,29 +102,36 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
   //---------------------------------------------------------------------------
 
   // Whether there is to be a tag added to the end of each key and the tag value.
-  protected transient boolean reduceSkipTag;
-  protected transient byte reduceTagByte;
+  private transient boolean reduceSkipTag;
+  private transient byte reduceTagByte;
 
   // Binary sortable key serializer.
   protected transient BinarySortableSerializeWrite keyBinarySortableSerializeWrite;
 
+  // The serialized all null key and its hash code.
+  private transient byte[] nullBytes;
+  private transient int nullKeyHashCode;
+
   // Lazy binary value serializer.
-  protected transient LazyBinarySerializeWrite valueLazyBinarySerializeWrite;
+  private transient LazyBinarySerializeWrite valueLazyBinarySerializeWrite;
 
   // This helper object serializes LazyBinary format reducer values from columns of a row
   // in a vectorized row batch.
-  protected transient VectorSerializeRow<LazyBinarySerializeWrite> valueVectorSerializeRow;
+  private transient VectorSerializeRow<LazyBinarySerializeWrite> valueVectorSerializeRow;
 
   // The output buffer used to serialize a value into.
-  protected transient Output valueOutput;
+  private transient Output valueOutput;
 
   // The hive key and bytes writable value needed to pass the key and value to the collector.
-  protected transient HiveKey keyWritable;
-  protected transient BytesWritable valueBytesWritable;
+  private transient HiveKey keyWritable;
+  private transient BytesWritable valueBytesWritable;
 
   // Where to write our key and value pairs.
   private transient OutputCollector out;
 
+  // The object that determines equal key series.
+  protected transient VectorKeySeriesSerialized serializedKeySeries;
+
   private transient long numRows = 0;
   private transient long cntr = 1;
   private transient long logEveryNRows = 0;
@@ -151,8 +158,6 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
       VectorizationContext vContext, OperatorDesc conf) throws HiveException {
     this(ctx);
 
-    LOG.info("VectorReduceSinkCommonOperator constructor");
-
     ReduceSinkDesc desc = (ReduceSinkDesc) conf;
     this.conf = desc;
     vectorDesc = (VectorReduceSinkDesc) desc.getVectorDesc();
@@ -242,46 +247,6 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
   protected void initializeOp(Configuration hconf) throws HiveException {
     super.initializeOp(hconf);
 
-    if (isLogDebugEnabled) {
-      LOG.debug("useUniformHash " + vectorReduceSinkInfo.getUseUniformHash());
-  
-      LOG.debug("reduceSinkKeyColumnMap " +
-          (vectorReduceSinkInfo.getReduceSinkKeyColumnMap() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkKeyColumnMap())));
-      LOG.debug("reduceSinkKeyTypeInfos " +
-          (vectorReduceSinkInfo.getReduceSinkKeyTypeInfos() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkKeyTypeInfos())));
-      LOG.debug("reduceSinkKeyColumnVectorTypes " +
-          (vectorReduceSinkInfo.getReduceSinkKeyColumnVectorTypes() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkKeyColumnVectorTypes())));
-      LOG.debug("reduceSinkKeyExpressions " +
-          (vectorReduceSinkInfo.getReduceSinkKeyExpressions() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkKeyExpressions())));
-  
-      LOG.debug("reduceSinkValueColumnMap " +
-          (vectorReduceSinkInfo.getReduceSinkValueColumnMap() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkValueColumnMap())));
-      LOG.debug("reduceSinkValueTypeInfos " +
-          (vectorReduceSinkInfo.getReduceSinkValueTypeInfos() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkValueTypeInfos())));
-      LOG.debug("reduceSinkValueColumnVectorTypes " +
-          (vectorReduceSinkInfo.getReduceSinkValueColumnVectorTypes() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkValueColumnVectorTypes())));
-      LOG.debug("reduceSinkValueExpressions " +
-          (vectorReduceSinkInfo.getReduceSinkValueExpressions() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkValueExpressions())));
-  
-      LOG.debug("reduceSinkBucketColumnMap " +
-          (vectorReduceSinkInfo.getReduceSinkBucketColumnMap() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkBucketColumnMap())));
-      LOG.debug("reduceSinkBucketTypeInfos " +
-          (vectorReduceSinkInfo.getReduceSinkBucketTypeInfos() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkBucketTypeInfos())));
-      LOG.debug("reduceSinkBucketColumnVectorTypes " +
-          (vectorReduceSinkInfo.getReduceSinkBucketColumnVectorTypes() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkBucketColumnVectorTypes())));
-      LOG.debug("reduceSinkBucketExpressions " +
-          (vectorReduceSinkInfo.getReduceSinkBucketExpressions() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkBucketExpressions())));
-  
-      LOG.debug("reduceSinkPartitionColumnMap " +
-          (vectorReduceSinkInfo.getReduceSinkPartitionColumnMap() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkPartitionColumnMap())));
-      LOG.debug("reduceSinkPartitionTypeInfos " +
-          (vectorReduceSinkInfo.getReduceSinkPartitionTypeInfos() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkPartitionTypeInfos())));
-      LOG.debug("reduceSinkPartitionColumnVectorTypes " +
-          (vectorReduceSinkInfo.getReduceSinkPartitionColumnVectorTypes() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkPartitionColumnVectorTypes())));
-      LOG.debug("reduceSinkPartitionExpressions " +
-          (vectorReduceSinkInfo.getReduceSinkPartitionExpressions() == null ? "NULL" : Arrays.toString(vectorReduceSinkInfo.getReduceSinkPartitionExpressions())));
-    }
-
     if (LOG.isDebugEnabled()) {
       // Determine the name of our map or reduce task for debug tracing.
       BaseWork work = Utilities.getMapWork(hconf);
@@ -315,6 +280,21 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
     keyBinarySortableSerializeWrite = new BinarySortableSerializeWrite(columnSortOrder,
             columnNullMarker, columnNotNullMarker);
 
+    // Create all nulls key.
+    try {
+      Output nullKeyOutput = new Output();
+      keyBinarySortableSerializeWrite.set(nullKeyOutput);
+      for (int i = 0; i < reduceSinkKeyColumnMap.length; i++) {
+        keyBinarySortableSerializeWrite.writeNull();
+      }
+      int nullBytesLength = nullKeyOutput.getLength();
+      nullBytes = new byte[nullBytesLength];
+      System.arraycopy(nullKeyOutput.getData(), 0, nullBytes, 0, nullBytesLength);
+      nullKeyHashCode = HashCodeUtil.calculateBytesHashCode(nullBytes, 0, nullBytesLength);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+
     valueLazyBinarySerializeWrite = new LazyBinarySerializeWrite(reduceSinkValueColumnMap.length);
 
     valueVectorSerializeRow =
@@ -332,6 +312,101 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
     batchCounter = 0;
   }
 
+  @Override
+  public void process(Object row, int tag) throws HiveException {
+
+    try {
+      VectorizedRowBatch batch = (VectorizedRowBatch) row;
+
+      batchCounter++;
+
+      if (batch.size == 0) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
+        }
+        return;
+      }
+
+      // Perform any key expressions.  Results will go into scratch columns.
+      if (reduceSinkKeyExpressions != null) {
+        for (VectorExpression ve : reduceSinkKeyExpressions) {
+          ve.evaluate(batch);
+        }
+      }
+
+      // Perform any value expressions.  Results will go into scratch columns.
+      if (reduceSinkValueExpressions != null) {
+        for (VectorExpression ve : reduceSinkValueExpressions) {
+          ve.evaluate(batch);
+        }
+      }
+
+      serializedKeySeries.processBatch(batch);
+
+      boolean selectedInUse = batch.selectedInUse;
+      int[] selected = batch.selected;
+
+      int keyLength;
+      int logical;
+      int end;
+      int batchIndex;
+      do {
+        if (serializedKeySeries.getCurrentIsAllNull()) {
+
+          // Use the same logic as ReduceSinkOperator.toHiveKey.
+          //
+          if (tag == -1 || reduceSkipTag) {
+            keyWritable.set(nullBytes, 0, nullBytes.length);
+          } else {
+            keyWritable.setSize(nullBytes.length + 1);
+            System.arraycopy(nullBytes, 0, keyWritable.get(), 0, nullBytes.length);
+            keyWritable.get()[nullBytes.length] = reduceTagByte;
+          }
+          keyWritable.setDistKeyLength(nullBytes.length);
+          keyWritable.setHashCode(nullKeyHashCode);
+
+        } else {
+
+          // One serialized key for 1 or more rows for the duplicate keys.
+          // LOG.info("reduceSkipTag " + reduceSkipTag + " tag " + tag + " reduceTagByte " + (int) reduceTagByte + " keyLength " + serializedKeySeries.getSerializedLength());
+          // LOG.info("process offset " + serializedKeySeries.getSerializedStart() + " length " + serializedKeySeries.getSerializedLength());
+          keyLength = serializedKeySeries.getSerializedLength();
+          if (tag == -1 || reduceSkipTag) {
+            keyWritable.set(serializedKeySeries.getSerializedBytes(),
+                serializedKeySeries.getSerializedStart(), keyLength);
+          } else {
+            keyWritable.setSize(keyLength + 1);
+            System.arraycopy(serializedKeySeries.getSerializedBytes(),
+                serializedKeySeries.getSerializedStart(), keyWritable.get(), 0, keyLength);
+            keyWritable.get()[keyLength] = reduceTagByte;
+          }
+          keyWritable.setDistKeyLength(keyLength);
+          keyWritable.setHashCode(serializedKeySeries.getCurrentHashCode());
+        }
+
+        logical = serializedKeySeries.getCurrentLogical();
+        end = logical + serializedKeySeries.getCurrentDuplicateCount();
+        do {
+          batchIndex = (selectedInUse ? selected[logical] : logical);
+
+          valueLazyBinarySerializeWrite.reset();
+          valueVectorSerializeRow.serializeWrite(batch, batchIndex);
+
+          valueBytesWritable.set(valueOutput.getData(), 0, valueOutput.getLength());
+
+          collect(keyWritable, valueBytesWritable);
+        } while (++logical < end);
+  
+        if (!serializedKeySeries.next()) {
+          break;
+        }
+      } while (true);
+
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+
   protected void collect(BytesWritable keyWritable, Writable valueWritable) throws IOException {
     // Since this is a terminal operator, update counters explicitly -
     // forward is not called

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkLongOperator.java
index 0bc1cd1..325f773 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkLongOperator.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 /*
  * Specialized class for native vectorized reduce sink that is reducing on a single long key column.
  */
-public class VectorReduceSinkLongOperator extends VectorReduceSinkUniformHashOperator {
+public class VectorReduceSinkLongOperator extends VectorReduceSinkCommonOperator {
 
   private static final long serialVersionUID = 1L;
   private static final String CLASS_NAME = VectorReduceSinkLongOperator.class.getName();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkMultiKeyOperator.java
index 1cca94d..2027187 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkMultiKeyOperator.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerialize
  * Specialized class for native vectorized reduce sink that is reducing on multiple key columns
  * (or a single non-long / non-string column).
  */
-public class VectorReduceSinkMultiKeyOperator extends VectorReduceSinkUniformHashOperator {
+public class VectorReduceSinkMultiKeyOperator extends VectorReduceSinkCommonOperator {
 
   private static final long serialVersionUID = 1L;
   private static final String CLASS_NAME = VectorReduceSinkMultiKeyOperator.class.getName();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java
deleted file mode 100644
index 6312c44..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java
+++ /dev/null
@@ -1,289 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector.reducesink;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Properties;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator.Counter;
-import org.apache.hadoop.hive.ql.exec.TerminalOperator;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow;
-import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRow;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
-import org.apache.hadoop.hive.ql.exec.vector.keyseries.VectorKeySeriesSerialized;
-import org.apache.hadoop.hive.ql.io.HiveKey;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.BaseWork;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.ql.plan.VectorReduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.VectorReduceSinkInfo;
-import org.apache.hadoop.hive.ql.plan.api.OperatorType;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.ByteStream.Output;
-import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
-import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite;
-import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinarySerializeWrite;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.StructField;
-import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hive.common.util.HashCodeUtil;
-
-import com.google.common.base.Preconditions;
-
-/**
- * This class is uniform hash (common) operator class for native vectorized reduce sink.
- */
-public class VectorReduceSinkObjectHashOperator extends VectorReduceSinkCommonOperator {
-
-  private static final long serialVersionUID = 1L;
-  private static final String CLASS_NAME = VectorReduceSinkObjectHashOperator.class.getName();
-  private static final Log LOG = LogFactory.getLog(CLASS_NAME);
-
-  protected int[] reduceSinkBucketColumnMap;
-  protected TypeInfo[] reduceSinkBucketTypeInfos;
-
-  protected VectorExpression[] reduceSinkBucketExpressions;
-
-  protected int[] reduceSinkPartitionColumnMap;
-  protected TypeInfo[] reduceSinkPartitionTypeInfos;
-
-  protected VectorExpression[] reduceSinkPartitionExpressions;
-
-  // The above members are initialized by the constructor and must not be
-  // transient.
-  //---------------------------------------------------------------------------
-
-  protected transient Output keyOutput;
-  protected transient VectorSerializeRow<BinarySortableSerializeWrite> keyVectorSerializeRow;
-
-  private transient boolean hasBuckets;
-  private transient int numBuckets;
-  private transient ObjectInspector[] bucketObjectInspectors;
-  private transient VectorExtractRow bucketVectorExtractRow;
-  private transient Object[] bucketFieldValues;
-
-  private transient boolean isPartitioned;
-  private transient ObjectInspector[] partitionObjectInspectors;
-  private transient VectorExtractRow partitionVectorExtractRow;
-  private transient Object[] partitionFieldValues;
-  private transient Random nonPartitionRandom;
-
-  /** Kryo ctor. */
-  protected VectorReduceSinkObjectHashOperator() {
-    super();
-  }
-
-  public VectorReduceSinkObjectHashOperator(CompilationOpContext ctx) {
-    super(ctx);
-  }
-
-  public VectorReduceSinkObjectHashOperator(CompilationOpContext ctx,
-      VectorizationContext vContext, OperatorDesc conf) throws HiveException {
-    super(ctx, vContext, conf);
-
-    LOG.info("VectorReduceSinkObjectHashOperator constructor vectorReduceSinkInfo " + vectorReduceSinkInfo);
-
-    // This the is Object Hash class variation.
-    Preconditions.checkState(!vectorReduceSinkInfo.getUseUniformHash());
-
-    reduceSinkBucketColumnMap = vectorReduceSinkInfo.getReduceSinkBucketColumnMap();
-    reduceSinkBucketTypeInfos = vectorReduceSinkInfo.getReduceSinkBucketTypeInfos();
-    reduceSinkBucketExpressions = vectorReduceSinkInfo.getReduceSinkBucketExpressions();
-
-    reduceSinkPartitionColumnMap = vectorReduceSinkInfo.getReduceSinkPartitionColumnMap();
-    reduceSinkPartitionTypeInfos = vectorReduceSinkInfo.getReduceSinkPartitionTypeInfos();
-    reduceSinkPartitionExpressions = vectorReduceSinkInfo.getReduceSinkPartitionExpressions();
-  }
-
-  private ObjectInspector[] getObjectInspectorArray(TypeInfo[] typeInfos) {
-    final int size = typeInfos.length;
-    ObjectInspector[] objectInspectors = new ObjectInspector[size];
-    for(int i = 0; i < size; i++) {
-      TypeInfo typeInfo = typeInfos[i];
-      ObjectInspector standardWritableObjectInspector =
-              TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(typeInfo);
-      objectInspectors[i] = standardWritableObjectInspector;
-    }
-    return objectInspectors;
-  }
-
-  @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
-    super.initializeOp(hconf);
-
-    keyOutput = new Output();
-    keyBinarySortableSerializeWrite.set(keyOutput);
-    keyVectorSerializeRow =
-        new VectorSerializeRow<BinarySortableSerializeWrite>(
-            keyBinarySortableSerializeWrite);
-    keyVectorSerializeRow.init(reduceSinkKeyTypeInfos, reduceSinkKeyColumnMap);
- 
-    hasBuckets = false;
-    isPartitioned = false;
-    numBuckets = 0;
- 
-    // Object Hash.
-
-    numBuckets = conf.getNumBuckets();
-    hasBuckets = (numBuckets > 0);
-
-    if (hasBuckets) {
-      bucketObjectInspectors = getObjectInspectorArray(reduceSinkBucketTypeInfos);
-      bucketVectorExtractRow = new VectorExtractRow();
-      bucketVectorExtractRow.init(reduceSinkBucketTypeInfos, reduceSinkBucketColumnMap);
-      bucketFieldValues = new Object[reduceSinkBucketTypeInfos.length];
-    }
-  
-    isPartitioned = (conf.getPartitionCols() != null);
-    if (!isPartitioned) {
-      nonPartitionRandom = new Random(12345);
-    } else {
-      partitionObjectInspectors = getObjectInspectorArray(reduceSinkPartitionTypeInfos);
-      LOG.debug("*NEW* partitionObjectInspectors " + Arrays.toString(partitionObjectInspectors));
-      partitionVectorExtractRow = new VectorExtractRow();
-      partitionVectorExtractRow.init(reduceSinkPartitionTypeInfos, reduceSinkPartitionColumnMap);
-      partitionFieldValues = new Object[reduceSinkPartitionTypeInfos.length];
-    }
-  }
-
-  @Override
-  public void process(Object row, int tag) throws HiveException {
-
-    try {
-
-      VectorizedRowBatch batch = (VectorizedRowBatch) row;
-
-      batchCounter++;
-
-      if (batch.size == 0) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
-        }
-        return;
-      }
-
-      // Perform any key expressions.  Results will go into scratch columns.
-      if (reduceSinkKeyExpressions != null) {
-        for (VectorExpression ve : reduceSinkKeyExpressions) {
-          ve.evaluate(batch);
-        }
-      }
-  
-      // Perform any value expressions.  Results will go into scratch columns.
-      if (reduceSinkValueExpressions != null) {
-        for (VectorExpression ve : reduceSinkValueExpressions) {
-          ve.evaluate(batch);
-        }
-      }
-  
-      // Perform any bucket expressions.  Results will go into scratch columns.
-      if (reduceSinkBucketExpressions != null) {
-        for (VectorExpression ve : reduceSinkBucketExpressions) {
-          ve.evaluate(batch);
-        }
-      }
-  
-      // Perform any partition expressions.  Results will go into scratch columns.
-      if (reduceSinkPartitionExpressions != null) {
-        for (VectorExpression ve : reduceSinkPartitionExpressions) {
-          ve.evaluate(batch);
-        }
-      }
-  
-      final boolean selectedInUse = batch.selectedInUse;
-      int[] selected = batch.selected;
-
-      final int size = batch.size;
-      for (int logical = 0; logical < size; logical++) {
-        final int batchIndex = (selectedInUse ? selected[logical] : logical);
-  
-        final int hashCode;
-        if (!hasBuckets) {
-          if (!isPartitioned) {
-            hashCode = nonPartitionRandom.nextInt();
-          } else {
-            partitionVectorExtractRow.extractRow(batch, batchIndex, partitionFieldValues);
-            hashCode = 
-                ObjectInspectorUtils.getBucketHashCode(
-                    partitionFieldValues, partitionObjectInspectors);
-          }
-        } else {
-          bucketVectorExtractRow.extractRow(batch, batchIndex, bucketFieldValues);
-          final int bucketNum =
-              ObjectInspectorUtils.getBucketNumber(
-                  bucketFieldValues, bucketObjectInspectors, numBuckets);
-          if (!isPartitioned) {
-            hashCode = nonPartitionRandom.nextInt() * 31 + bucketNum;
-          } else {
-            partitionVectorExtractRow.extractRow(batch, batchIndex, partitionFieldValues);
-            hashCode = 
-                ObjectInspectorUtils.getBucketHashCode(
-                    partitionFieldValues, partitionObjectInspectors) * 31 + bucketNum;
-          }
-        }
-  
-        keyBinarySortableSerializeWrite.reset();
-        keyVectorSerializeRow.serializeWrite(batch, batchIndex);
-  
-        // One serialized key for 1 or more rows for the duplicate keys.
-        final int keyLength = keyOutput.getLength();
-        if (tag == -1 || reduceSkipTag) {
-          keyWritable.set(keyOutput.getData(), 0, keyLength);
-        } else {
-          keyWritable.setSize(keyLength + 1);
-          System.arraycopy(keyOutput.getData(), 0, keyWritable.get(), 0, keyLength);
-          keyWritable.get()[keyLength] = reduceTagByte;
-        }
-        keyWritable.setDistKeyLength(keyLength);
-        keyWritable.setHashCode(hashCode);
-  
-        valueLazyBinarySerializeWrite.reset();
-        valueVectorSerializeRow.serializeWrite(batch, batchIndex);
-  
-        valueBytesWritable.set(valueOutput.getData(), 0, valueOutput.getLength());
-  
-        collect(keyWritable, valueBytesWritable);
-      }
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkStringOperator.java
index a838f4c..b655e6e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkStringOperator.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 /*
  * Specialized class for native vectorized reduce sink that is reducing on a single long key column.
  */
-public class VectorReduceSinkStringOperator extends VectorReduceSinkUniformHashOperator {
+public class VectorReduceSinkStringOperator extends VectorReduceSinkCommonOperator {
 
   private static final long serialVersionUID = 1L;
   private static final String CLASS_NAME = VectorReduceSinkStringOperator.class.getName();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkUniformHashOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkUniformHashOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkUniformHashOperator.java
deleted file mode 100644
index 2dfa721..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkUniformHashOperator.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector.reducesink;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Properties;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator.Counter;
-import org.apache.hadoop.hive.ql.exec.TerminalOperator;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow;
-import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRow;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
-import org.apache.hadoop.hive.ql.exec.vector.keyseries.VectorKeySeriesSerialized;
-import org.apache.hadoop.hive.ql.io.HiveKey;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.BaseWork;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.ql.plan.VectorReduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.VectorReduceSinkInfo;
-import org.apache.hadoop.hive.ql.plan.api.OperatorType;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.ByteStream.Output;
-import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
-import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite;
-import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinarySerializeWrite;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.StructField;
-import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hive.common.util.HashCodeUtil;
-
-/**
- * This class is uniform hash (common) operator class for native vectorized reduce sink.
- */
-public abstract class VectorReduceSinkUniformHashOperator extends VectorReduceSinkCommonOperator {
-
-  private static final long serialVersionUID = 1L;
-  private static final String CLASS_NAME = VectorReduceSinkUniformHashOperator.class.getName();
-  private static final Log LOG = LogFactory.getLog(CLASS_NAME);
-
-  // The above members are initialized by the constructor and must not be
-  // transient.
-  //---------------------------------------------------------------------------
-
-  // The serialized all null key and its hash code.
-  private transient byte[] nullBytes;
-  private transient int nullKeyHashCode;
-
-  // The object that determines equal key series.
-  protected transient VectorKeySeriesSerialized serializedKeySeries;
-
-
-  /** Kryo ctor. */
-  protected VectorReduceSinkUniformHashOperator() {
-    super();
-  }
-
-  public VectorReduceSinkUniformHashOperator(CompilationOpContext ctx) {
-    super(ctx);
-  }
-
-  public VectorReduceSinkUniformHashOperator(CompilationOpContext ctx,
-      VectorizationContext vContext, OperatorDesc conf) throws HiveException {
-    super(ctx, vContext, conf);
-  }
-
-  @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
-    super.initializeOp(hconf);
-
-    // Create all nulls key.
-    try {
-      Output nullKeyOutput = new Output();
-      keyBinarySortableSerializeWrite.set(nullKeyOutput);
-      for (int i = 0; i < reduceSinkKeyColumnMap.length; i++) {
-        keyBinarySortableSerializeWrite.writeNull();
-      }
-      int nullBytesLength = nullKeyOutput.getLength();
-      nullBytes = new byte[nullBytesLength];
-      System.arraycopy(nullKeyOutput.getData(), 0, nullBytes, 0, nullBytesLength);
-      nullKeyHashCode = HashCodeUtil.calculateBytesHashCode(nullBytes, 0, nullBytesLength);
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
-  }
-
-  @Override
-  public void process(Object row, int tag) throws HiveException {
-
-    try {
-      VectorizedRowBatch batch = (VectorizedRowBatch) row;
-
-      batchCounter++;
-
-      if (batch.size == 0) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
-        }
-        return;
-      }
-
-      // Perform any key expressions.  Results will go into scratch columns.
-      if (reduceSinkKeyExpressions != null) {
-        for (VectorExpression ve : reduceSinkKeyExpressions) {
-          ve.evaluate(batch);
-        }
-      }
-
-      // Perform any value expressions.  Results will go into scratch columns.
-      if (reduceSinkValueExpressions != null) {
-        for (VectorExpression ve : reduceSinkValueExpressions) {
-          ve.evaluate(batch);
-        }
-      }
-
-      serializedKeySeries.processBatch(batch);
-
-      boolean selectedInUse = batch.selectedInUse;
-      int[] selected = batch.selected;
-
-      int keyLength;
-      int logical;
-      int end;
-      int batchIndex;
-      do {
-        if (serializedKeySeries.getCurrentIsAllNull()) {
-
-          // Use the same logic as ReduceSinkOperator.toHiveKey.
-          //
-          if (tag == -1 || reduceSkipTag) {
-            keyWritable.set(nullBytes, 0, nullBytes.length);
-          } else {
-            keyWritable.setSize(nullBytes.length + 1);
-            System.arraycopy(nullBytes, 0, keyWritable.get(), 0, nullBytes.length);
-            keyWritable.get()[nullBytes.length] = reduceTagByte;
-          }
-          keyWritable.setDistKeyLength(nullBytes.length);
-          keyWritable.setHashCode(nullKeyHashCode);
-
-        } else {
-
-          // One serialized key for 1 or more rows for the duplicate keys.
-          // LOG.info("reduceSkipTag " + reduceSkipTag + " tag " + tag + " reduceTagByte " + (int) reduceTagByte + " keyLength " + serializedKeySeries.getSerializedLength());
-          // LOG.info("process offset " + serializedKeySeries.getSerializedStart() + " length " + serializedKeySeries.getSerializedLength());
-          keyLength = serializedKeySeries.getSerializedLength();
-          if (tag == -1 || reduceSkipTag) {
-            keyWritable.set(serializedKeySeries.getSerializedBytes(),
-                serializedKeySeries.getSerializedStart(), keyLength);
-          } else {
-            keyWritable.setSize(keyLength + 1);
-            System.arraycopy(serializedKeySeries.getSerializedBytes(),
-                serializedKeySeries.getSerializedStart(), keyWritable.get(), 0, keyLength);
-            keyWritable.get()[keyLength] = reduceTagByte;
-          }
-          keyWritable.setDistKeyLength(keyLength);
-          keyWritable.setHashCode(serializedKeySeries.getCurrentHashCode());
-        }
-
-        logical = serializedKeySeries.getCurrentLogical();
-        end = logical + serializedKeySeries.getCurrentDuplicateCount();
-        do {
-          batchIndex = (selectedInUse ? selected[logical] : logical);
-
-          valueLazyBinarySerializeWrite.reset();
-          valueVectorSerializeRow.serializeWrite(batch, batchIndex);
-
-          valueBytesWritable.set(valueOutput.getData(), 0, valueOutput.getLength());
-
-          collect(keyWritable, valueBytesWritable);
-        } while (++logical < end);
-  
-        if (!serializedKeySeries.next()) {
-          break;
-        }
-      } while (true);
-
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java
index c23d202..6582cdd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java
@@ -46,12 +46,13 @@ import org.apache.hadoop.mapred.Counters.Group;
  * Each session uses a new object, which creates a new file.
  */
 public class HiveHistoryImpl implements HiveHistory{
-  private static final Logger LOG = LoggerFactory.getLogger("hive.ql.exec.HiveHistoryImpl");
 
   PrintWriter histStream; // History File stream
 
   String histFileName; // History file name
 
+  private static final Logger LOG = LoggerFactory.getLogger("hive.ql.exec.HiveHistoryImpl");
+
   private static final Random randGen = new Random();
 
   private LogHelper console;
@@ -304,7 +305,7 @@ public class HiveHistoryImpl implements HiveHistory{
   /**
    * write out counters.
    */
-  static final ThreadLocal<Map<String,String>> ctrMapFactory =
+  static ThreadLocal<Map<String,String>> ctrMapFactory =
       new ThreadLocal<Map<String, String>>() {
     @Override
     protected Map<String,String> initialValue() {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java
index 4380fe3..2f0bd88 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -18,26 +18,66 @@
 
 package org.apache.hadoop.hive.ql.hooks;
 
+import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-
+import org.apache.hadoop.hive.ql.exec.Utilities;
 
 public class HookUtils {
+  /**
+   * Returns the hooks specified in a configuration variable.  The hooks are returned
+   * in a list in the order they were specified in the configuration variable.
+   *
+   * @param conf        Configuration object
+   * @param hookConfVar The configuration variable specifying a comma separated list
+   *                    of the hook class names.
+   * @param clazz       The super type of the hooks.
+   * @return            A list of the hooks cast as the type specified in clazz,
+   *                    in the order they are listed in the value of hookConfVar
+   * @throws ClassNotFoundException
+   * @throws IllegalAccessException
+   * @throws InstantiationException
+   */
+  public static <T extends Hook> List<T> getHooks(HiveConf conf,
+      ConfVars hookConfVar, Class<T> clazz)
+      throws InstantiationException, IllegalAccessException, ClassNotFoundException  {
+    String csHooks = conf.getVar(hookConfVar);
+    List<T> hooks = new ArrayList<T>();
+    if (csHooks == null) {
+      return hooks;
+    }
+
+    csHooks = csHooks.trim();
+    if (csHooks.equals("")) {
+      return hooks;
+    }
+
+    String[] hookClasses = csHooks.split(",");
+    for (String hookClass : hookClasses) {
+        T hook = (T) Class.forName(hookClass.trim(), true,
+                Utilities.getSessionSpecifiedClassLoader()).newInstance();
+        hooks.add(hook);
+    }
+
+    return hooks;
+  }
 
   public static String redactLogString(HiveConf conf, String logString)
-          throws InstantiationException, IllegalAccessException, ClassNotFoundException {
+      throws InstantiationException, IllegalAccessException, ClassNotFoundException {
 
     String redactedString = logString;
 
     if (conf != null && logString != null) {
-      List<Redactor> queryRedactors = new HooksLoader(conf).getHooks(ConfVars.QUERYREDACTORHOOKS);
+      List<Redactor> queryRedactors = getHooks(conf, ConfVars.QUERYREDACTORHOOKS, Redactor.class);
       for (Redactor redactor : queryRedactors) {
         redactor.setConf(conf);
         redactedString = redactor.redactQuery(redactedString);
       }
     }
+
     return redactedString;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java
deleted file mode 100644
index 0008726..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.hadoop.hive.ql.hooks;
-
-import java.util.List;
-
-import com.google.common.collect.ImmutableList;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-
-/**
- * A loader class for {@link Hook}s. The class provides a way to create and instantiate {@link Hook} objects. The
- * methodology for how hooks are loaded is left up to the individual methods.
- */
-public class HooksLoader {
-
-  private final HiveConf conf;
-
-  /**
-   * Creates a new {@link HooksLoader} that uses the specified {@link HiveConf} to load the {@link Hook}s.
-   *
-   * @param conf the {@link HiveConf} to use when loading the {@link Hook}s
-   */
-  public HooksLoader(HiveConf conf) {
-    this.conf = conf;
-  }
-
-  /**
-   * Delegates to {@link #getHooks(HiveConf.ConfVars)} and prints the to the specified {@link SessionState.LogHelper} if
-   * a {@link ClassNotFoundException} is thrown.
-   *
-   * @param hookConfVar the configuration variable specifying a comma separated list of the hook class names
-   * @param console the {@link SessionState.LogHelper} to print to if a {@link ClassNotFoundException} is thrown by the
-   *                {@link #getHooks(HiveConf.ConfVars)} method
-   *
-   * @return a list of the hooks objects, in the order they are listed in the value of hookConfVar
-   *
-   * @throws ClassNotFoundException if the specified class names could not be found
-   * @throws IllegalAccessException if the specified class names could not be accessed
-   * @throws InstantiationException if the specified class names could not be instantiated
-   */
-  public <T extends Hook> List<T> getHooks(HiveConf.ConfVars hookConfVar, SessionState.LogHelper console)
-          throws IllegalAccessException, InstantiationException, ClassNotFoundException {
-    try {
-      return getHooks(hookConfVar);
-    } catch (ClassNotFoundException e) {
-      console.printError(hookConfVar.varname + " Class not found: " + e.getMessage());
-      throw e;
-    }
-  }
-
-  /**
-   * Returns the hooks specified in a configuration variable. The hooks are returned in a list in the order they were
-   * specified in the configuration variable. The value of the specified conf variable should be a comma separated list
-   * of class names where each class implements the {@link Hook} interface. The method uses reflection to an instance
-   * of each class and then returns them in a {@link List}.
-   *
-   * @param hookConfVar The configuration variable specifying a comma separated list of the hook class names
-   *
-   * @return a list of the hooks objects, in the order they are listed in the value of hookConfVar
-   *
-   * @throws ClassNotFoundException if the specified class names could not be found
-   * @throws IllegalAccessException if the specified class names could not be accessed
-   * @throws InstantiationException if the specified class names could not be instantiated
-   */
-  public <T extends Hook> List<T> getHooks(HiveConf.ConfVars hookConfVar)
-          throws InstantiationException, IllegalAccessException, ClassNotFoundException {
-    String csHooks = conf.getVar(hookConfVar);
-    ImmutableList.Builder<T> hooks = ImmutableList.builder();
-    if (csHooks == null) {
-      return ImmutableList.of();
-    }
-
-    csHooks = csHooks.trim();
-    if (csHooks.isEmpty()) {
-      return ImmutableList.of();
-    }
-
-    String[] hookClasses = csHooks.split(",");
-    for (String hookClass : hookClasses) {
-      T hook = (T) Class.forName(hookClass.trim(), true,
-              Utilities.getSessionSpecifiedClassLoader()).newInstance();
-      hooks.add(hook);
-    }
-
-    return hooks.build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
index 7305436..2806c54 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
@@ -27,7 +27,6 @@ import java.util.Set;
 
 import org.apache.commons.collections.SetUtils;
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hive.common.StringInternUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -404,7 +403,7 @@ public class LineageInfo implements Serializable {
      * @param expr the expr to set
      */
     public void setExpr(String expr) {
-      this.expr = StringInternUtils.internIfNotNull(expr);
+      this.expr = expr;
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecOrcRowGroupCountPrinter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecOrcRowGroupCountPrinter.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecOrcRowGroupCountPrinter.java
index ac79ceb..18ef325 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecOrcRowGroupCountPrinter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecOrcRowGroupCountPrinter.java
@@ -60,10 +60,10 @@ public class PostExecOrcRowGroupCountPrinter implements ExecuteWithHookContext {
       if (counters != null) {
         for (CounterGroup group : counters) {
           if (group.getName().equals(LlapIOCounters.class.getName())) {
-            console.printInfo(tezTask.getId() + " LLAP IO COUNTERS:", false);
+            console.printError(tezTask.getId() + " LLAP IO COUNTERS:");
             for (TezCounter counter : group) {
               if (counter.getDisplayName().equals(LlapIOCounters.SELECTED_ROWGROUPS.name())) {
-                console.printInfo("   " + counter.getDisplayName() + ": " + counter.getValue(), false);
+                console.printError("   " + counter.getDisplayName() + ": " + counter.getValue());
               }
             }
           }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
index 45bd6e0..412f45c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
@@ -62,25 +62,25 @@ public class PostExecTezSummaryPrinter implements ExecuteWithHookContext {
         String hiveCountersGroup = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
         for (CounterGroup group : counters) {
           if (hiveCountersGroup.equals(group.getDisplayName())) {
-            console.printInfo(tezTask.getId() + " HIVE COUNTERS:", false);
+            console.printError(tezTask.getId() + " HIVE COUNTERS:");
             for (TezCounter counter : group) {
-              console.printInfo("   " + counter.getDisplayName() + ": " + counter.getValue(), false);
+              console.printError("   " + counter.getDisplayName() + ": " + counter.getValue());
             }
           } else if (group.getName().equals(FileSystemCounter.class.getName())) {
-            console.printInfo(tezTask.getId() + " FILE SYSTEM COUNTERS:", false);
+            console.printError(tezTask.getId() + " FILE SYSTEM COUNTERS:");
             for (TezCounter counter : group) {
               // HDFS counters should be relatively consistent across test runs when compared to
               // local file system counters
               if (counter.getName().contains("HDFS")) {
-                console.printInfo("   " + counter.getDisplayName() + ": " + counter.getValue(), false);
+                console.printError("   " + counter.getDisplayName() + ": " + counter.getValue());
               }
             }
           } else if (group.getName().equals(LlapIOCounters.class.getName())) {
-            console.printInfo(tezTask.getId() + " LLAP IO COUNTERS:", false);
+            console.printError(tezTask.getId() + " LLAP IO COUNTERS:");
             List<String> testSafeCounters = LlapIOCounters.testSafeCounterNames();
             for (TezCounter counter : group) {
               if (testSafeCounters.contains(counter.getDisplayName())) {
-                console.printInfo("   " + counter.getDisplayName() + ": " + counter.getValue(), false);
+                console.printError("   " + counter.getDisplayName() + ": " + counter.getValue());
               }
             }
           }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java
index 3e74396..b4fc125 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java
@@ -116,8 +116,8 @@ public class PostExecutePrinter implements ExecuteWithHookContext {
     }
 
     if (queryState != null) {
-      console.printInfo("POSTHOOK: query: " + queryState.getQueryString().trim(), false);
-      console.printInfo("POSTHOOK: type: " + queryState.getCommandType(), false);
+      console.printError("POSTHOOK: query: " + queryState.getQueryString().trim());
+      console.printError("POSTHOOK: type: " + queryState.getCommandType());
     }
 
     PreExecutePrinter.printEntities(console, inputs, "POSTHOOK: Input: ");
@@ -167,7 +167,7 @@ public class PostExecutePrinter implements ExecuteWithHookContext {
         }
         sb.append("]");
 
-        console.printInfo(sb.toString(), false);
+        console.printError(sb.toString());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecutePrinter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecutePrinter.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecutePrinter.java
index 20acfb1..232c62d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecutePrinter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecutePrinter.java
@@ -65,8 +65,8 @@ public class PreExecutePrinter implements ExecuteWithHookContext {
     }
 
     if (queryState != null) {
-      console.printInfo("PREHOOK: query: " + queryState.getQueryString().trim(), false);
-      console.printInfo("PREHOOK: type: " + queryState.getCommandType(), false);
+      console.printError("PREHOOK: query: " + queryState.getQueryString().trim());
+      console.printError("PREHOOK: type: " + queryState.getCommandType());
     }
 
     printEntities(console, inputs, "PREHOOK: Input: ");
@@ -80,7 +80,7 @@ public class PreExecutePrinter implements ExecuteWithHookContext {
     }
     Collections.sort(strings);
     for (String s : strings) {
-      console.printInfo(prefix + s, false);
+      console.printError(prefix + s);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/hooks/QueryLifeTimeHookContextImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/QueryLifeTimeHookContextImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/QueryLifeTimeHookContextImpl.java
index 1845121..5340848 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/QueryLifeTimeHookContextImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/QueryLifeTimeHookContextImpl.java
@@ -20,12 +20,10 @@ package org.apache.hadoop.hive.ql.hooks;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 
-
 public class QueryLifeTimeHookContextImpl implements QueryLifeTimeHookContext {
-
   private HiveConf conf;
   private String command;
-  private HookContext hc;
+  private HookContext hc = null;
 
   @Override
   public HiveConf getHiveConf() {
@@ -56,34 +54,4 @@ public class QueryLifeTimeHookContextImpl implements QueryLifeTimeHookContext {
   public void setHookContext(HookContext hc) {
     this.hc = hc;
   }
-
-  public static class Builder {
-
-    private HiveConf conf;
-    private String command;
-    private HookContext hc;
-
-    public Builder withHiveConf(HiveConf conf) {
-      this.conf = conf;
-      return this;
-    }
-
-    public Builder withCommand(String command) {
-      this.command = command;
-      return this;
-    }
-
-    public Builder withHookContext(HookContext hc) {
-      this.hc = hc;
-      return this;
-    }
-
-    public QueryLifeTimeHookContextImpl build() {
-      QueryLifeTimeHookContextImpl queryLifeTimeHookContext = new QueryLifeTimeHookContextImpl();
-      queryLifeTimeHookContext.setHiveConf(this.conf);
-      queryLifeTimeHookContext.setCommand(this.command);
-      queryLifeTimeHookContext.setHookContext(this.hc);
-      return queryLifeTimeHookContext;
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/hooks/QueryLifeTimeHookWithParseHooks.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/QueryLifeTimeHookWithParseHooks.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/QueryLifeTimeHookWithParseHooks.java
deleted file mode 100644
index 787590d..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/QueryLifeTimeHookWithParseHooks.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.hooks;
-
-
-/**
- * Extension of {@link QueryLifeTimeHook} that has hooks for pre and post parsing of a query.
- */
-public interface QueryLifeTimeHookWithParseHooks extends QueryLifeTimeHook {
-
-  /**
-   * Invoked before a query enters the parse phase.
-   *
-   * @param ctx the context for the hook
-   */
-  void beforeParse(QueryLifeTimeHookContext ctx);
-
-  /**
-   * Invoked after a query parsing. Note: if 'hasError' is true,
-   * the query won't enter the following compilation phase.
-   *
-   * @param ctx the context for the hook
-   * @param hasError whether any error occurred during compilation.
-   */
-  void afterParse(QueryLifeTimeHookContext ctx, boolean hasError);
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java
index 2c3ba7f..a1408e9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java
@@ -26,8 +26,10 @@ import org.slf4j.LoggerFactory;
  * Holds index related constants
  */
 public class HiveIndex {
+
   public static final Logger l4j = LoggerFactory.getLogger("HiveIndex");
-  public static final String INDEX_TABLE_CREATETIME = "hive.index.basetbl.dfs.lastModifiedTime";
+
+  public static String INDEX_TABLE_CREATETIME = "hive.index.basetbl.dfs.lastModifiedTime";
 
   public static enum IndexType {
     AGGREGATE_TABLE("aggregate",  AggregateIndexHandler.class.getName()),


[50/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 8578a64..47c59da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,5 +29,3 @@ hcatalog/webhcat/svr/target
 conf/hive-default.xml.template
 itests/hive-blobstore/src/test/resources/blobstore-conf.xml
 .DS_Store
-*.rej
-*.orig

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/.travis.yml
----------------------------------------------------------------------
diff --git a/.travis.yml b/.travis.yml
index f392338..d0e1568 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -23,8 +23,10 @@ dist: trusty
 # that requires full git history, enable this
 # before_install: git fetch --unshallow
 
+# parallel builds on jdk7 and jdk8
 language: java
 jdk:
+  - oraclejdk7
   - oraclejdk8
 
 cache:
@@ -33,7 +35,7 @@ cache:
 
 env:
   MAVEN_SKIP_RC=true
-  MAVEN_OPTS="-Xmx2g"
+  MAVEN_OPTS="-Xmx2g -XX:MaxPermSize=512M"
 
 # workaround added: https://github.com/travis-ci/travis-ci/issues/4629
 before_install:

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/RELEASE_NOTES.txt
----------------------------------------------------------------------
diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt
index d8e527b..ec6b579 100644
--- a/RELEASE_NOTES.txt
+++ b/RELEASE_NOTES.txt
@@ -1,17 +1,77 @@
 
-Release Notes - Hive - Version 2.3.0
+Release Notes - Hive - Version 2.1.0
 
 ** Sub-task
-    * [HIVE-14807] - analyze table compute statistics fails due to presence of Infinity value in double column
-    * [HIVE-15556] - Replicate views
-    * [HIVE-16186] - REPL DUMP shows last event ID of the database even if we use LIMIT option.
-    * [HIVE-16249] - With column stats, mergejoin.q throws NPE
-    * [HIVE-16293] - Column pruner should continue to work when SEL has more than 1 child
-    * [HIVE-16387] - Fix failing test org.apache.hive.jdbc.TestJdbcDriver2.testResultSetMetaData
-    * [HIVE-16440] - Fix failing test columnstats_partlvl_invalid_values when autogather column stats is on
-    * [HIVE-16504] - Addition of binary licenses broke rat check
-    * [HIVE-16535] - Hive fails to build from source code tarball
-    * [HIVE-16537] - Add missing AL files
+    * [HIVE-9774] - Print yarn application id to console [Spark Branch]
+    * [HIVE-10280] - LLAP: Handle errors while sending source state updates to the daemons
+    * [HIVE-11107] - Support for Performance regression test suite with TPCDS
+    * [HIVE-11417] - Create shims for the row by row read path that is backed by VectorizedRowBatch
+    * [HIVE-11526] - LLAP: implement LLAP UI as a separate service - part 1
+    * [HIVE-11766] - LLAP: Remove MiniLlapCluster from shim layer after hadoop-1 removal
+    * [HIVE-11927] - Implement/Enable constant related optimization rules in Calcite: enable HiveReduceExpressionsRule to fold constants
+    * [HIVE-12049] - HiveServer2: Provide an option to write serialized thrift objects in final tasks
+    * [HIVE-12159] - Create vectorized readers for the complex types
+    * [HIVE-12442] - HiveServer2: Refactor/repackage HiveServer2's Thrift code so that it can be used in the tasks
+    * [HIVE-12499] - Add HMS metrics for number of tables and partitions
+    * [HIVE-12543] - Disable Hive ConstantPropagate optimizer when CBO has optimized the plan
+    * [HIVE-12550] - Cache and display last N completed queries in HS2 WebUI 
+    * [HIVE-12709] - further improve user level explain
+    * [HIVE-12733] - UX improvements for HIVE-12499
+    * [HIVE-12781] - Temporarily disable authorization tests that always fail on Jenkins
+    * [HIVE-12782] - update the golden files for some tests that fail
+    * [HIVE-12793] - Address TestSparkCliDriver.testCliDriver_order2 failure due to HIVE-12782
+    * [HIVE-12802] - CBO: Calcite Operator To Hive Operator (Calcite Return Path): MiniTezCliDriver.vector_join_filters.q failure
+    * [HIVE-12805] - CBO: Calcite Operator To Hive Operator (Calcite Return Path): MiniTezCliDriver skewjoin.q failure
+    * [HIVE-12853] - LLAP: localize permanent UDF jars to daemon and add them to classloader
+    * [HIVE-12855] - LLAP: add checks when resolving UDFs to enforce whitelist
+    * [HIVE-12857] - LLAP: modify the decider to allow using LLAP with whitelisted UDFs
+    * [HIVE-12868] - Fix empty operation-pool metrics
+    * [HIVE-12889] - Support COUNT(DISTINCT) for partitioning query.
+    * [HIVE-12910] - HBaseStore: initial null value of aggregateData can not call compareTo
+    * [HIVE-12918] - LLAP should never create embedded metastore when localizing functions
+    * [HIVE-12944] - Support SUM(DISTINCT) for partitioning query.
+    * [HIVE-12952] - Show query sub-pages on webui
+    * [HIVE-12960] - Migrate Column Stats Extrapolation and UniformDistribution to HBaseStore
+    * [HIVE-12961] - Migrate Column Stats UniformDistribution to HBaseStore
+    * [HIVE-13005] - CBO: Calcite Operator To Hive Operator (Calcite Return Path): RexNode convert(ExprNodeConstantDesc literal)  decimal support bug
+    * [HIVE-13068] - Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II
+    * [HIVE-13129] - CliService leaks HMS connection
+    * [HIVE-13130] -  HS2 changes : API calls for retrieving primary keys and foreign keys information
+    * [HIVE-13198] - Authorization issues with cascading views
+    * [HIVE-13290] - Support primary keys/foreign keys constraint as part of create table command in Hive
+    * [HIVE-13318] - Cache the result of getTable from metastore
+    * [HIVE-13341] - Stats state is not captured correctly: differentiate load table and create table
+    * [HIVE-13349] - Metastore Changes : API calls for retrieving primary keys and foreign keys information
+    * [HIVE-13350] - Support Alter commands for Rely/NoRely  novalidate for PK/FK constraints
+    * [HIVE-13351] - Support drop Primary Key/Foreign Key constraints
+    * [HIVE-13358] - Stats state is not captured correctly: turn off stats optimizer for sampled table
+    * [HIVE-13360] - Refactoring Hive Authorization
+    * [HIVE-13362] - Commit binary file required for HIVE-13361
+    * [HIVE-13420] - Clarify HS2 WebUI Query 'Elapsed TIme'
+    * [HIVE-13424] - Refactoring the code to pass a QueryState object rather than HiveConf object
+    * [HIVE-13442] - LLAP: refactor submit API to be amenable to signing
+    * [HIVE-13444] - LLAP: add HMAC signatures to LLAP; verify them on LLAP side
+    * [HIVE-13477] - Set HivePrivilegeObjectType to TABLE_OR_VIEW
+    * [HIVE-13486] - Cast the column type for column masking
+    * [HIVE-13505] - Skip running TestDummy where possibe during precommit builds
+    * [HIVE-13507] - Improved logging for ptest
+    * [HIVE-13511] - Run clidriver tests from within the qtest dir for the precommit tests
+    * [HIVE-13520] - Don't allow any test to run for longer than 60minutes in the ptest setup
+    * [HIVE-13541] - Pass view's ColumnAccessInfo to HiveAuthorizer
+    * [HIVE-13565] - thrift change
+    * [HIVE-13566] - Auto-gather column stats - phase 1
+    * [HIVE-13620] - Merge llap branch work to master
+    * [HIVE-13637] - Fold CASE into NVL when CBO optimized the plan
+    * [HIVE-13638] - CBO rule to pull up constants through Sort/Limit
+    * [HIVE-13639] - CBO rule to pull up constants through Union
+    * [HIVE-13654] - Add JAVA8_URL to jenkins-submit-build.sh
+    * [HIVE-13722] - Add flag to detect constants to CBO pull up rules
+    * [HIVE-13758] - "Create table like" command should initialize the basic stats for the table
+    * [HIVE-13786] - Fix the unit test failure org.apache.hive.service.cli.session.TestHiveSessionImpl.testLeakOperationHandle
+    * [HIVE-13794] - HIVE_RPC_QUERY_PLAN should always be set when generating LLAP splits
+    * [HIVE-13807] - Extend metadata provider to pull up predicates through Union
+    * [HIVE-13808] - Use constant expressions to backtrack when we create ReduceSink
+    * [HIVE-13852] - NPE in TaskLocationHints during LLAP GetSplits request
 
 
 
@@ -20,61 +80,501 @@ Release Notes - Hive - Version 2.3.0
 
 
 ** Bug
-    * [HIVE-9815] - Metastore column"SERDE_PARAMS"."PARAM_VALUE"  limited to 4000 bytes
-    * [HIVE-14077] - add implicit decimal arithmetic q test, fix issues if found 
-    * [HIVE-14801] - improve TestPartitionNameWhitelistValidation stability
-    * [HIVE-15035] - Clean up Hive licenses for binary distribution
-    * [HIVE-15249] - HIve 2.1.0 is throwing InvalidObjectException(message:Invalid column type name is too long
-    * [HIVE-15829] - LLAP text cache: disable memory tracking on the writer
-    * [HIVE-15923] - Hive default partition causes errors in get partitions
-    * [HIVE-16007] - When the query does not complie the LogRunnable never stops
-    * [HIVE-16188] - beeline should block the connection if given invalid database name.
-    * [HIVE-16193] - Hive show compactions not reflecting the status of the application
-    * [HIVE-16219] - metastore notification_log contains serialized message with  non functional fields
-    * [HIVE-16231] - Parquet timestamp may be stored differently since HIVE-12767
-    * [HIVE-16274] - Support tuning of NDV of columns using lower/upper bounds
-    * [HIVE-16287] - Alter table partition rename with location - moves partition back to hive warehouse
-    * [HIVE-16301] - Preparing for 2.3 development.
-    * [HIVE-16305] - Additional Datanucleus ClassLoaderResolverImpl leaks causing HS2 OOM
-    * [HIVE-16308] - PreExecutePrinter and PostExecutePrinter should log to INFO level instead of ERROR
-    * [HIVE-16310] - Get the output operators of Reducesink when vectorization is on
-    * [HIVE-16315] - Describe table doesn't show num of partitions
-    * [HIVE-16318] - LLAP cache: address some issues in 2.2/2.3
-    * [HIVE-16321] - Possible deadlock in metastore with Acid enabled
-    * [HIVE-16336] - Rename hive.spark.use.file.size.for.mapjoin to hive.spark.use.ts.stats.for.mapjoin
-    * [HIVE-16341] - Tez Task Execution Summary has incorrect input record counts on some operators
-    * [HIVE-16366] - Hive 2.3 release planning
-    * [HIVE-16380] - removing global test dependency of jsonassert
-    * [HIVE-16385] - StatsNoJobTask could exit early before all partitions have been processed
-    * [HIVE-16390] - LLAP IO should take job config into account; also LLAP config should load defaults
-    * [HIVE-16403] - LLAP UI shows the wrong number of executors
-    * [HIVE-16459] - Forward channelInactive to RpcDispatcher
-    * [HIVE-16461] - DagUtils checks local resource size on the remote fs
-    * [HIVE-16465] - NullPointer Exception when enable vectorization for Parquet file format
-    * [HIVE-16473] - Hive-on-Tez may fail to write to an HBase table
-    * [HIVE-16519] - Fix exception thrown by checkOutputSpecs
-    * [HIVE-16545] - LLAP: bug in arena size determination logic
-    * [HIVE-16547] - LLAP: may not unlock buffers in some cases
+    * [HIVE-1608] - use sequencefile as the default for storing intermediate results
+    * [HIVE-4662] - first_value can't have more than one order by column
+    * [HIVE-8343] - Return value from BlockingQueue.offer() is not checked in DynamicPartitionPruner
+    * [HIVE-9144] - Beeline + Kerberos shouldn't prompt for unused username + password
+    * [HIVE-9457] - Fix obsolete parameter name in HiveConf description of hive.hashtable.initialCapacity
+    * [HIVE-9499] - hive.limit.query.max.table.partition makes queries fail on non-partitioned tables
+    * [HIVE-9534] - incorrect result set for query that projects a windowed aggregate
+    * [HIVE-9862] - Vectorized execution corrupts timestamp values
+    * [HIVE-10171] - Create a storage-api module
+    * [HIVE-10187] - Avro backed tables don't handle cyclical or recursive records
+    * [HIVE-10632] - Make sure TXN_COMPONENTS gets cleaned up if table is dropped before compaction.
+    * [HIVE-10729] - Query failed when select complex columns from joinned table (tez map join only)
+    * [HIVE-11097] - HiveInputFormat uses String.startsWith to compare splitPath and PathToAliases
+    * [HIVE-11388] - Allow ACID Compactor components to run in multiple metastores
+    * [HIVE-11427] - Location of temporary table for CREATE TABLE  SELECT broken by HIVE-7079
+    * [HIVE-11484] - Fix ObjectInspector for Char and VarChar
+    * [HIVE-11550] - ACID queries pollute HiveConf
+    * [HIVE-11675] - make use of file footer PPD API in ETL strategy or separate strategy
+    * [HIVE-11716] - Reading ACID table from non-acid session should raise an error
+    * [HIVE-11806] - Create test for HIVE-11174
+    * [HIVE-11828] - beeline -f fails on scripts with tabs between column type and comment
+    * [HIVE-11848] - tables in subqueries don't get locked
+    * [HIVE-11866] - Add framework to enable testing using LDAPServer using LDAP protocol
+    * [HIVE-11935] - Race condition in  HiveMetaStoreClient: isCompatibleWith and close
+    * [HIVE-11959] - add simple test case for TestTableIterable
+    * [HIVE-12039] - Temporarily disable TestSSL#testSSLVersion 
+    * [HIVE-12045] - ClassNotFoundException for GenericUDF [Spark Branch]
+    * [HIVE-12064] - prevent transactional=false
+    * [HIVE-12075] - add analyze command to explictly cache file metadata in HBase metastore
+    * [HIVE-12165] - wrong result when hive.optimize.sampling.orderby=true with some aggregate functions
+    * [HIVE-12367] - Lock/unlock database should add current database to inputs and outputs of authz hook
+    * [HIVE-12395] - Turn off CBO for hive.support.special.characters.tablename tests until feature is complete
+    * [HIVE-12441] - Driver.acquireLocksAndOpenTxn() should only call recordValidTxns() when needed
+    * [HIVE-12466] - SparkCounter not initialized error
+    * [HIVE-12478] - Improve Hive/Calcite Transitive Predicate inference
+    * [HIVE-12502] - to_date UDF cannot accept NULLs of VOID type
+    * [HIVE-12528] - don't start HS2 Tez sessions in a single thread
+    * [HIVE-12529] - HiveTxnManager.acquireLocks() should not block forever
+    * [HIVE-12538] - After set spark related config, SparkSession never get reused
+    * [HIVE-12552] - Wrong number of reducer estimation causing job to fail
+    * [HIVE-12554] - Fix Spark branch build after merge [Spark Branch]
+    * [HIVE-12558] - LLAP: output QueryFragmentCounters somewhere
+    * [HIVE-12567] - Enhance TxnHandler retry logic to handle ORA-08176
+    * [HIVE-12568] - Provide an option to specify network interface used by Spark remote client [Spark Branch]
+    * [HIVE-12570] - Incorrect error message Expression not in GROUP BY key thrown instead of Invalid function
+    * [HIVE-12608] - Parquet Schema Evolution doesn't work when a column is dropped from array<struct<>>
+    * [HIVE-12612] - beeline always exits with 0 status when reading query from standard input
+    * [HIVE-12616] - NullPointerException when spark session is reused to run a mapjoin
+    * [HIVE-12619] - Switching the field order within an array of structs causes the query to fail
+    * [HIVE-12620] - Misc improvement to Acid module
+    * [HIVE-12635] - Hive should return the latest hbase cell timestamp as the row timestamp value
+    * [HIVE-12643] - For self describing InputFormat don't replicate schema information in partitions
+    * [HIVE-12650] - Improve error messages for Hive on Spark in case the cluster has no resources available
+    * [HIVE-12673] - Orcfiledump throws NPE when no files are available
+    * [HIVE-12708] - Hive on Spark doesn't work with Kerboresed HBase [Spark Branch]
+    * [HIVE-12723] - stats_filemetadata test was added to the wrong driver
+    * [HIVE-12725] - CompactionTxnHandler.findNextToCompact() may produce "Operation not allowed after ResultSet closed"
+    * [HIVE-12749] - Constant propagate returns string values in incorrect format
+    * [HIVE-12752] - Change the schema version to 2.1.0 
+    * [HIVE-12757] - Fix TestCodahaleMetrics#testFileReporting
+    * [HIVE-12780] - Fix the output of the history command in Beeline
+    * [HIVE-12784] - Group by SemanticException: Invalid column reference
+    * [HIVE-12789] - Fix output twice in the history command of Beeline
+    * [HIVE-12792] - HIVE-12075 didn't update operation type for plugins
+    * [HIVE-12795] - Vectorized execution causes ClassCastException
+    * [HIVE-12799] - Always use Schema Evolution for ACID
+    * [HIVE-12808] - Logical PPD: Push filter clauses through PTF(Windowing) into TS
+    * [HIVE-12834] - Fix to accept the arrow keys in BeeLine CLI
+    * [HIVE-12837] - Better memory estimation/allocation for hybrid grace hash join during hash table loading
+    * [HIVE-12848] - Change derby scripts, for HMS upgrade tests, to accomodate 32-bit VM.
+    * [HIVE-12862] - Fix pom.xml to package hiveserver2.jsp
+    * [HIVE-12865] - Exchange partition does not show inputs field for post/pre execute hooks
+    * [HIVE-12867] - Semantic Exception Error Msg should be with in the range of "10000 to 19999"
+    * [HIVE-12880] - spark-assembly causes Hive class version problems
+    * [HIVE-12885] - LDAP Authenticator improvements
+    * [HIVE-12886] - invalid column reference error on grouping by constant
+    * [HIVE-12887] - Handle ORC schema on read with fewer columns than file schema (after Schema Evolution changes)
+    * [HIVE-12888] - TestSparkNegativeCliDriver does not run in Spark mode[Spark Branch]
+    * [HIVE-12894] - Detect whether ORC is reading from ACID table correctly for Schema Evolution
+    * [HIVE-12909] - Some encryption q-tests fail because trash is disabled in encryption_with_trash.q
+    * [HIVE-12927] - HBase metastore: sequences should be one per row, not all in one row
+    * [HIVE-12933] - Beeline will hang when authenticating with PAM when libjpam.so is missing
+    * [HIVE-12937] - DbNotificationListener unable to clean up old notification events
+    * [HIVE-12941] - Unexpected result when using MIN() on struct with NULL in first field
+    * [HIVE-12951] - Reduce Spark executor prewarm timeout to 5s
+    * [HIVE-12963] - LIMIT statement with SORT BY creates additional MR job with hardcoded only one reducer
+    * [HIVE-12965] - Insert overwrite local directory should perserve the overwritten directory permission
+    * [HIVE-12966] - Change some ZooKeeperHiveLockManager logs to debug
+    * [HIVE-12969] - Fix Javadoc for PredicatePushDown class
+    * [HIVE-12976] - MetaStoreDirectSql doesn't batch IN lists in all cases
+    * [HIVE-12981] - ThriftCLIService uses incompatible getShortName() implementation
+    * [HIVE-12990] - LLAP: ORC cache NPE without FileID support
+    * [HIVE-12992] - Hive on tez: Bucket map join plan is incorrect
+    * [HIVE-12993] - user and password supplied from URL is overwritten by the empty user and password of the JDBC connection string when it's calling from beeline
+    * [HIVE-12995] - LLAP: Synthetic file ids need collision checks
+    * [HIVE-12996] - Temp tables shouldn't be locked
+    * [HIVE-12998] - ORC FileDump.printJsonData() does not close RecordReader
+    * [HIVE-12999] - Tez: Vertex creation reduce NN IPCs
+    * [HIVE-13002] - Hive object is not thread safe, is shared via a threadlocal and thus should not be passed around too much - part 1
+    * [HIVE-13008] - WebHcat DDL commands in secure mode NPE when default FileSystem doesn't support delegation tokens
+    * [HIVE-13009] - Fix add_jar_file.q on Windows
+    * [HIVE-13013] - Further Improve concurrency in TxnHandler
+    * [HIVE-13015] - Bundle Log4j2 jars with hive-exec
+    * [HIVE-13016] - ORC FileDump recovery utility fails in Windows
+    * [HIVE-13017] - Child process of HiveServer2 fails to get delegation token from non default FileSystem
+    * [HIVE-13020] - Hive Metastore and HiveServer2 to Zookeeper fails with IBM JDK
+    * [HIVE-13021] - GenericUDAFEvaluator.isEstimable(agg) always returns false
+    * [HIVE-13036] - Split hive.root.logger separately to make it compatible with log4j1.x (for remaining services)
+    * [HIVE-13038] - LLAP needs service class registration for token identifier
+    * [HIVE-13039] - BETWEEN predicate is not functioning correctly with predicate pushdown on Parquet table
+    * [HIVE-13042] - OrcFiledump runs into an ArrayIndexOutOfBoundsException when running against old versions of ORC files
+    * [HIVE-13043] - Reload function has no impact to function registry
+    * [HIVE-13045] - move guava dependency back to 14 after HIVE-12952
+    * [HIVE-13047] - Disabling Web UI leads to NullPointerException
+    * [HIVE-13048] - Rogue SQL statement in an upgrade SQL file for oracle.
+    * [HIVE-13051] - Deadline class has numerous issues
+    * [HIVE-13052] - Allow tests to start MiniHS2 for manual testing
+    * [HIVE-13056] - delegation tokens do not work with HS2 when used with http transport and kerberos
+    * [HIVE-13057] - Remove duplicate copies of TableDesc property values in PartitionDesc
+    * [HIVE-13062] - Hive metastore test failing
+    * [HIVE-13064] - Serde properties are not working while expecting output in a directory
+    * [HIVE-13065] - Hive throws NPE when writing map type data to a HBase backed table
+    * [HIVE-13070] - Precommit HMS tests should run in addition to precommit normal tests, not instead of
+    * [HIVE-13077] - LLAP: Scrub daemon-site.xml from client configs
+    * [HIVE-13079] - LLAP: Allow reading log4j properties from default JAR resources
+    * [HIVE-13082] - Enable constant propagation optimization in query with left semi join
+    * [HIVE-13083] - Writing HiveDecimal to ORC can wrongly suppress present stream
+    * [HIVE-13084] - Vectorization add support for PROJECTION Multi-AND/OR
+    * [HIVE-13086] - LLAP: Programmatically initialize log4j2 to print out the properties location
+    * [HIVE-13087] - LLAP: Print STW pause time and useful application time
+    * [HIVE-13089] - Rounding in Stats for equality expressions
+    * [HIVE-13090] - Hive metastore crashes on NPE with ZooKeeperTokenStore
+    * [HIVE-13093] - hive metastore does not exit on start failure
+    * [HIVE-13094] - CBO: Assertion error  in Case expression
+    * [HIVE-13096] - Cost to choose side table in MapJoin conversion based on cumulative cardinality
+    * [HIVE-13099] - Non-SQLOperations lead to Web UI NPE
+    * [HIVE-13100] - Revert HIVE-13015 that bundles log4j2 jars in hive-exec.jar
+    * [HIVE-13101] - NullPointerException in HiveLexer.g
+    * [HIVE-13105] - LLAP token hashCode and equals methods are incorrect
+    * [HIVE-13108] - Operators: SORT BY randomness is not safe with network partitions
+    * [HIVE-13110] - LLAP: Package log4j2 jars into Slider pkg
+    * [HIVE-13111] - Fix timestamp / interval_day_time wrong results with HIVE-9862 
+    * [HIVE-13112] - Expose Lineage information in case of CTAS
+    * [HIVE-13115] - MetaStore Direct SQL getPartitions call fail when the columns schemas for a partition are null
+    * [HIVE-13126] - Clean up MapJoinOperator properly to avoid object cache reuse with unintentional states
+    * [HIVE-13128] - NullScan fails on a secure setup
+    * [HIVE-13131] - TezWork queryName can be null after HIVE-12523
+    * [HIVE-13134] - JDBC: JDBC Standalone should not be in the lib dir by default
+    * [HIVE-13135] - LLAP: HTTPS Webservices needs trusted keystore configs
+    * [HIVE-13141] - Hive on Spark over HBase should accept parameters starting with "zookeeper.znode"
+    * [HIVE-13144] - HS2 can leak ZK ACL objects when curator retries to create the persistent ephemeral node
+    * [HIVE-13146] - OrcFile table property values are case sensitive
+    * [HIVE-13151] - Clean up UGI objects in FileSystem cache for transactions
+    * [HIVE-13153] - SessionID is appended to thread name twice
+    * [HIVE-13160] - HS2 unable to load UDFs on startup when HMS is not ready
+    * [HIVE-13163] - ORC MemoryManager thread checks are fatal, should WARN 
+    * [HIVE-13169] - HiveServer2: Support delegation token based connection when using http transport
+    * [HIVE-13174] - Remove Vectorizer noise in logs
+    * [HIVE-13175] - Disallow making external tables transactional
+    * [HIVE-13178] - Enhance ORC Schema Evolution to handle more standard data type conversions
+    * [HIVE-13184] - LLAP: DAG credentials (e.g. HBase tokens) are not passed to the tasks in Tez plugin
+    * [HIVE-13185] - orc.ReaderImp.ensureOrcFooter() method fails on small text files with IndexOutOfBoundsException
+    * [HIVE-13186] - ALTER TABLE RENAME should lowercase table name and hdfs location
+    * [HIVE-13197] - Add adapted constprog2.q and constprog_partitioner.q tests back
+    * [HIVE-13199] - NDC stopped working in LLAP logging
+    * [HIVE-13200] - Aggregation functions returning empty rows on partitioned columns
+    * [HIVE-13201] - Compaction shouldn't be allowed on non-ACID table
+    * [HIVE-13209] - metastore get_delegation_token fails with null ip address
+    * [HIVE-13210] - Revert changes in HIVE-12994 related to metastore
+    * [HIVE-13211] - normalize Hive.get overloads to go thru one path
+    * [HIVE-13213] - make DbLockManger work for non-acid resources
+    * [HIVE-13216] - ORC Reader will leave file open until GC when opening a malformed ORC file
+    * [HIVE-13217] - Replication for HoS mapjoin small file needs to respect dfs.replication.max
+    * [HIVE-13218] - LLAP: better configs part 1
+    * [HIVE-13223] - HoS  may hang for queries that run on 0 splits 
+    * [HIVE-13227] - LLAP: Change daemon initialization logs from INFO to WARN
+    * [HIVE-13232] - Aggressively drop compression buffers in ORC OutStreams
+    * [HIVE-13233] - Use min and max values to estimate better stats for comparison operators
+    * [HIVE-13236] - LLAP: token renewal interval needs to be set
+    * [HIVE-13237] - Select parquet struct field with upper case throws NPE
+    * [HIVE-13240] - GroupByOperator: Drop the hash aggregates when closing operator
+    * [HIVE-13241] - LLAP: Incremental Caching marks some small chunks as "incomplete CB"
+    * [HIVE-13242] - DISTINCT keyword is dropped by the parser for windowing
+    * [HIVE-13243] - Hive drop table on encyption zone fails for external tables
+    * [HIVE-13246] - Add log line to ORC writer to print out the file path
+    * [HIVE-13251] - hive can't read the decimal in AVRO file generated from previous version
+    * [HIVE-13255] - FloatTreeReader.nextVector is expensive 
+    * [HIVE-13260] - ReduceSinkDeDuplication throws exception when pRS key is empty
+    * [HIVE-13261] - Can not compute column stats for partition when schema evolves
+    * [HIVE-13262] - LLAP: Remove log levels from DebugUtils
+    * [HIVE-13263] - Vectorization: Unable to vectorize regexp_extract/regexp_replace " Udf: GenericUDFBridge, is not supported"
+    * [HIVE-13267] - Vectorization: Add SelectLikeStringColScalar for non-filter operations
+    * [HIVE-13269] - Simplify comparison expressions using column stats
+    * [HIVE-13283] - LLAP: make sure IO elevator is enabled by default in the daemons
+    * [HIVE-13285] - Orc concatenation may drop old files from moving to final path
+    * [HIVE-13286] - Query ID is being reused across queries
+    * [HIVE-13287] - Add logic to estimate stats for IN operator
+    * [HIVE-13291] - ORC BI Split strategy should consider block size instead of file size
+    * [HIVE-13293] - Query occurs performance degradation after enabling parallel order by for Hive on Spark
+    * [HIVE-13294] - AvroSerde leaks the connection in a case when reading schema from a url
+    * [HIVE-13296] - Add vectorized Q test with complex types showing count(*) etc work correctly
+    * [HIVE-13298] - nested join support causes undecipherable errors in SemanticAnalyzer
+    * [HIVE-13299] - Column Names trimmed of leading and trailing spaces
+    * [HIVE-13300] - Hive on spark throws exception for multi-insert with join
+    * [HIVE-13302] - direct SQL: cast to date doesn't work on Oracle
+    * [HIVE-13303] - spill to YARN directories, not tmp, when available
+    * [HIVE-13310] - Vectorized Projection Comparison Number Column to Scalar broken for !noNulls and selectedInUse
+    * [HIVE-13311] - MetaDataFormatUtils throws NPE when HiveDecimal.create is null
+    * [HIVE-13313] - TABLESAMPLE ROWS feature broken for Vectorization
+    * [HIVE-13320] - Apply HIVE-11544 to explicit conversions as well as implicit ones
+    * [HIVE-13322] - LLAP: ZK registry throws at shutdown due to slf4j trying to initialize a log4j logger
+    * [HIVE-13324] - LLAP: history log for FRAGMENT_START doesn't log DagId correctly
+    * [HIVE-13325] - Excessive logging when ORC PPD fails type conversions
+    * [HIVE-13326] - HiveServer2: Make ZK config publishing configurable
+    * [HIVE-13327] - SessionID added to HS2 threadname does not trim spaces
+    * [HIVE-13330] - ORC vectorized string dictionary reader does not differentiate null vs empty string dictionary
+    * [HIVE-13332] - support dumping all row indexes in ORC FileDump
+    * [HIVE-13333] - StatsOptimizer throws ClassCastException
+    * [HIVE-13338] - Differences in vectorized_casts.q output for vectorized and non-vectorized runs
+    * [HIVE-13339] - Vectorization: GenericUDFBetween in Projection mode 
+    * [HIVE-13340] - Vectorization: from_unixtime UDF shim
+    * [HIVE-13342] - Improve logging in llap decider and throw exception in case llap mode is all but we cannot run in llap.
+    * [HIVE-13343] - Need to disable hybrid grace hash join in llap mode except for dynamically partitioned hash join
+    * [HIVE-13346] - LLAP doesn't update metadata priority when reusing from cache; some tweaks in LRFU policy
+    * [HIVE-13361] - Orc concatenation should enforce the compression buffer size
+    * [HIVE-13372] - Hive Macro overwritten when multiple macros are used in one column
+    * [HIVE-13373] - Use most specific type for numerical constants
+    * [HIVE-13378] - LLAP help formatter is too narrow
+    * [HIVE-13379] - HIVE-12851 args do not work (slider-keytab-dir, etc.)
+    * [HIVE-13380] - Decimal should have lower precedence than double in type hierachy
+    * [HIVE-13381] - Timestamp & date should have precedence in type hierarchy than string group
+    * [HIVE-13388] - Fix inconsistent content due to Thrift changes
+    * [HIVE-13390] - HiveServer2: Add more test to ZK service discovery using MiniHS2
+    * [HIVE-13394] - Analyze table fails in tez on empty partitions/files/tables
+    * [HIVE-13395] - Lost Update problem in ACID
+    * [HIVE-13396] - LLAP: Include hadoop-metrics2.properties file LlapServiceDriver
+    * [HIVE-13401] - Kerberized HS2 with LDAP auth enabled fails kerberos/delegation token authentication
+    * [HIVE-13402] - Temporarily disable failing spark tests
+    * [HIVE-13405] - Fix Connection Leak in OrcRawRecordMerger
+    * [HIVE-13407] - Add more subtlety to TezCompiler Perf Logging
+    * [HIVE-13410] - PerfLog metrics scopes not closed if there are exceptions on HS2
+    * [HIVE-13415] - Decouple Sessions from thrift binary transport
+    * [HIVE-13417] - Some vector operators return "OP" as name
+    * [HIVE-13428] - ZK SM in LLAP should have unique paths per cluster
+    * [HIVE-13434] - BaseSemanticAnalyzer.unescapeSQLString doesn't unescape \u0000 style character literals.
+    * [HIVE-13437] - httpserver getPort does not return the actual port when attempting to use a dynamic port
+    * [HIVE-13438] - Add a service check script for llap
+    * [HIVE-13439] - JDBC: provide a way to retrieve GUID to query Yarn ATS
+    * [HIVE-13440] - remove hiveserver1 scripts and thrift generated files
+    * [HIVE-13445] - LLAP: token should encode application and cluster ids
+    * [HIVE-13446] - LLAP: set default management protocol acls to deny all
+    * [HIVE-13447] - LLAP: check ZK acls for registry and fail if they are too permissive
+    * [HIVE-13448] - LLAP: check ZK acls for ZKSM and fail if they are too permissive
+    * [HIVE-13449] - LLAP: HS2 should get the token directly, rather than from LLAP
+    * [HIVE-13458] - Heartbeater doesn't fail query when heartbeat fails
+    * [HIVE-13462] - HiveResultSetMetaData.getPrecision() fails for NULL columns
+    * [HIVE-13463] - Fix ImportSemanticAnalyzer to allow for different src/dst filesystems
+    * [HIVE-13465] - Add ZK settings to MiniLlapCluster clusterSpecificConfiguration
+    * [HIVE-13467] - Show llap info on hs2 ui when available
+    * [HIVE-13476] - HS2 ShutdownHookManager holds extra of Driver instance in nested compile
+    * [HIVE-13480] - Add hadoop2 metrics reporter for Codahale metrics
+    * [HIVE-13485] - Session id appended to thread name multiple times.
+    * [HIVE-13487] - Finish time is wrong when perflog is missing SUBMIT_TO_RUNNING
+    * [HIVE-13488] - Restore dag summary when tez exec print summary enabled and in-place updates disabled
+    * [HIVE-13491] - Testing  : log thread stacks when metastore fails to start
+    * [HIVE-13492] - TestMiniSparkOnYarnCliDriver.testCliDriver_index_bitmap3 is failing on master
+    * [HIVE-13493] - Fix TransactionBatchImpl.getCurrentTxnId() and mis logging fixes
+    * [HIVE-13494] - LLAP: Some metrics from daemon are not exposed to hadoop-metrics2
+    * [HIVE-13498] - cleardanglingscratchdir does not work if scratchdir is not on defaultFs
+    * [HIVE-13500] - Launching big queries fails with Out of Memory Exception
+    * [HIVE-13502] - Beeline doesnt support session parameters in JDBC URL as documentation states.
+    * [HIVE-13510] - Dynamic partitioning doesn’t work when remote metastore is used
+    * [HIVE-13512] - Make initializing dag ids in TezWork thread safe for parallel compilation
+    * [HIVE-13513] - cleardanglingscratchdir does not work in some version of HDFS
+    * [HIVE-13514] - TestClearDanglingScratchDir fail on branch-1
+    * [HIVE-13518] - Hive on Tez: Shuffle joins do not choose the right 'big' table.
+    * [HIVE-13522] - regexp_extract.q hangs on master
+    * [HIVE-13523] - Fix connection leak in ORC RecordReader and refactor for unit testing
+    * [HIVE-13525] - HoS hangs when job is empty
+    * [HIVE-13527] - Using deprecated APIs in HBase client causes zookeeper connection leaks.
+    * [HIVE-13530] - Hive on Spark throws Kryo exception in some cases
+    * [HIVE-13533] - Remove AST dump
+    * [HIVE-13542] - Missing stats for tables in TPCDS performance regression suite
+    * [HIVE-13551] - Make cleardanglingscratchdir work on Windows
+    * [HIVE-13552] - Templeton job does not write out log files on InterruptedException
+    * [HIVE-13553] - CTE with upperCase alias throws exception
+    * [HIVE-13561] - HiveServer2 is leaking ClassLoaders when add jar / temporary functions are used
+    * [HIVE-13568] - Add UDFs to support column-masking
+    * [HIVE-13570] - Some queries with Union all fail when CBO is off
+    * [HIVE-13572] - Redundant setting full file status in Hive::copyFiles
+    * [HIVE-13585] - Add counter metric for direct sql failures
+    * [HIVE-13588] - NPE is thrown from MapredLocalTask.executeInChildVM
+    * [HIVE-13592] - metastore calls map is not thread safe
+    * [HIVE-13596] - HS2 should be able to get UDFs on demand from metastore
+    * [HIVE-13597] - revert HIVE-12892
+    * [HIVE-13598] - Describe extended table should show the primary keys/foreign keys associated with the table
+    * [HIVE-13602] - TPCH q16 return wrong result when CBO is on
+    * [HIVE-13608] - We should provide better error message while constraints with duplicate names are created
+    * [HIVE-13609] - Fix UDTFs to allow local fetch task to fetch rows forwarded by GenericUDTF.close()
+    * [HIVE-13618] - Trailing spaces in partition column will be treated differently
+    * [HIVE-13619] - Bucket map join plan is incorrect
+    * [HIVE-13621] - compute stats in certain cases fails with NPE
+    * [HIVE-13622] - WriteSet tracking optimizations
+    * [HIVE-13628] - Support for permanent functions - error handling if no restart
+    * [HIVE-13632] - Hive failing on insert empty array into parquet table
+    * [HIVE-13642] - Update GUESS_FIELDS option in .reviewboardrc to support current version of RBTools.
+    * [HIVE-13645] - Beeline needs null-guard around hiveVars and hiveConfVars read
+    * [HIVE-13646] - make hive.optimize.sort.dynamic.partition compatible with ACID tables
+    * [HIVE-13653] - improve config error messages for LLAP cache size/etc
+    * [HIVE-13656] - need to set direct memory limit higher in LlapServiceDriver for certain edge case configurations
+    * [HIVE-13657] - Spark driver stderr logs should appear in hive client logs
+    * [HIVE-13659] - An empty where condition leads to vectorization exceptions instead of throwing a compile time error
+    * [HIVE-13669] - LLAP: io.enabled config is ignored on the server side
+    * [HIVE-13671] - Add PerfLogger to log4j2.properties logger
+    * [HIVE-13676] - Tests failing because metastore doesn't come up
+    * [HIVE-13682] - EOFException with fast hashtable
+    * [HIVE-13683] - Remove erroneously included patch file
+    * [HIVE-13686] - TestRecordReaderImpl is deleting target/tmp causing all the tests after it to fail
+    * [HIVE-13691] - No record with CQ_ID=0 found in COMPACTION_QUEUE
+    * [HIVE-13693] - Multi-insert query drops Filter before file output when there is a.val <> b.val
+    * [HIVE-13699] - Make JavaDataModel#get thread safe for parallel compilation
+    * [HIVE-13700] - TestHiveOperationType is failing on master
+    * [HIVE-13701] - LLAP: Use different prefix for llap task scheduler metrics
+    * [HIVE-13705] - Insert into table removes existing data
+    * [HIVE-13710] - LLAP registry ACL check causes error due to namespacing
+    * [HIVE-13712] - LLAP: LlapServiceDriver should package hadoop-metrics2-llapdaemon.properties when available
+    * [HIVE-13719] - TestConverters fails on master
+    * [HIVE-13720] - TestLlapTaskCommunicator fails on master
+    * [HIVE-13728] - TestHBaseSchemaTool fails on master
+    * [HIVE-13729] - FileSystem$Cache leaks in FileUtils.checkFileAccessWithImpersonation
+    * [HIVE-13730] - Avoid double spilling the same partition when memory threshold is set very low
+    * [HIVE-13743] - Data move codepath is broken with hive (2.1.0-SNAPSHOT)
+    * [HIVE-13751] - LlapOutputFormatService should have a configurable send buffer size
+    * [HIVE-13753] - Make metastore client thread safe in DbTxnManager
+    * [HIVE-13767] - Wrong type inferred in Semijoin condition leads to AssertionError
+    * [HIVE-13784] - Hive Metastore start failed on Oracle DB
+    * [HIVE-13787] - LLAP: bug in recent security patches (wrong argument order; using full user name in id)
+    * [HIVE-13810] - insert overwrite select from some table fails throwing org.apache.hadoop.security.AccessControlException
+    * [HIVE-13818] - Fast Vector MapJoin Long hashtable has to handle all integral types
+    * [HIVE-13821] - OrcSplit groups all delta files together into a single split
+    * [HIVE-13823] - Remove unnecessary log line in common join operator
+    * [HIVE-13826] - Make VectorUDFAdaptor work for GenericUDFBetween when used as FILTER
+    * [HIVE-13831] - Error pushing predicates to HBase storage handler
+    * [HIVE-13832] - Add missing license header to files
+    * [HIVE-13837] - current_timestamp() output format is different in some cases
+    * [HIVE-13840] - Orc split generation is reading file footers twice
+    * [HIVE-13841] - Orc split generation returns different strategies with cache enabled vs disabled
+    * [HIVE-13844] - Invalid index handler in org.apache.hadoop.hive.ql.index.HiveIndex class
+    * [HIVE-13845] - Delete beeline/pom.xml.orig
+    * [HIVE-13849] - Wrong plan for hive.optimize.sort.dynamic.partition=true
+    * [HIVE-13856] - Fetching transaction batches during ACID streaming against Hive Metastore using Oracle DB fails
+    * [HIVE-13857] - insert overwrite select from some table fails throwing org.apache.hadoop.security.AccessControlException - II
+    * [HIVE-13858] - LLAP: A preempted task can end up waiting on completeInitialization if some part of the executing code suppressed the interrupt
+    * [HIVE-13859] - mask() UDF not retaining day and month field values
+    * [HIVE-13861] - Fix up nullability issue that might be created by pull up constants rules
+    * [HIVE-13863] - Improve AnnotateWithStatistics with support for cartesian product
+    * [HIVE-13867] - restore HiveAuthorizer interface changes
+    * [HIVE-13870] - Decimal vector is not resized correctly
+    * [HIVE-13876] - Vectorization: Port HIVE-11544 to LazySimpleDeserializeRead
+    * [HIVE-13885] - Hive session close is not resetting thread name
+    * [HIVE-13927] - Adding missing header to Java files
 
 
 
 
 
-** Improvement
-    * [HIVE-12274] - Increase width of columns used for general configuration in the metastore.
-    * [HIVE-12299] - Hive Column Data Type definition in schema limited to 4000 characters - too small
-    * [HIVE-14145] - Too small length of column 'PARAM_VALUE' in table 'SERDE_PARAMS'
-    * [HIVE-15880] - Allow insert overwrite and truncate table query to use auto.purge table property
-    * [HIVE-16115] - Stop printing progress info from operation logs with beeline progress bar
-    * [HIVE-16164] - Provide mechanism for passing HMS notification ID between transactional and non-transactional listeners.
 
+** Improvement
+    * [HIVE-4570] - More information to user on GetOperationStatus in Hive Server2 when query is still executing
+    * [HIVE-4924] - JDBC: Support query timeout for jdbc
+    * [HIVE-5370] - format_number udf should take user specifed format as argument
+    * [HIVE-6535] - JDBC: provide an async API to execute query and fetch results
+    * [HIVE-10115] - HS2 running on a Kerberized cluster should offer Kerberos(GSSAPI) and Delegation token(DIGEST) when alternate authentication is enabled
+    * [HIVE-10249] - ACID: show locks should show who the lock is waiting for
+    * [HIVE-10468] - Create scripts to do metastore upgrade tests on jenkins for Oracle DB.
+    * [HIVE-10982] - Customizable the value of  java.sql.statement.setFetchSize in Hive JDBC Driver
+    * [HIVE-11424] - Rule to transform OR clauses into IN clauses in CBO
+    * [HIVE-11483] - Add encoding and decoding for query string config
+    * [HIVE-11487] - Add getNumPartitionsByFilter api in metastore api
+    * [HIVE-11752] - Pre-materializing complex CTE queries
+    * [HIVE-11793] - SHOW LOCKS with DbTxnManager ignores filter options
+    * [HIVE-11956] - SHOW LOCKS should indicate what acquired the lock
+    * [HIVE-12431] - Support timeout for compile lock
+    * [HIVE-12439] - CompactionTxnHandler.markCleaned() and TxnHandler.openTxns() misc improvements
+    * [HIVE-12467] - Add number of dynamic partitions to error message
+    * [HIVE-12481] - Occasionally "Request is a replay" will be thrown from HS2
+    * [HIVE-12515] - Clean the SparkCounters related code after remove counter based stats collection[Spark Branch]
+    * [HIVE-12541] - SymbolicTextInputFormat should supports the path with regex
+    * [HIVE-12545] - Add sessionId and queryId logging support for methods like getCatalogs in HiveSessionImpl class
+    * [HIVE-12595] - [REFACTOR] Make physical compiler more type safe
+    * [HIVE-12611] - Make sure spark.yarn.queue is effective and takes the value from mapreduce.job.queuename if given [Spark Branch]
+    * [HIVE-12637] - make retryable SQLExceptions in TxnHandler configurable
+    * [HIVE-12653] - The property  "serialization.encoding" in the class "org.apache.hadoop.hive.contrib.serde2.MultiDelimitSerDe" does not work
+    * [HIVE-12763] - Use bit vector to track NDV
+    * [HIVE-12776] - Add parse utility method for parsing any stand-alone HQL expression
+    * [HIVE-12777] - Add capability to restore session in CLIService and SessionManager
+    * [HIVE-12787] - Trace improvement - Inconsistent logging upon shutdown-start of the Hive metastore process
+    * [HIVE-12811] - Name yarn application name more meaning than just "Hive on Spark"
+    * [HIVE-12839] - Upgrade Hive to Calcite 1.6
+    * [HIVE-12897] - Improve dynamic partition loading
+    * [HIVE-12902] - Refactor TxnHandler to be an interface
+    * [HIVE-12907] - Improve dynamic partition loading - II
+    * [HIVE-12908] - Improve dynamic partition loading III
+    * [HIVE-12935] - LLAP: Replace Yarn registry with Zookeeper registry
+    * [HIVE-12942] - Remove Yarn WebApps from LLAP daemon instance
+    * [HIVE-12946] - alter table should also add default scheme and authority for the location similar to create table 
+    * [HIVE-12950] - get rid of the NullScan emptyFile madness
+    * [HIVE-12953] - Update description of hive.ppd.remove.duplicatefilters in HiveConf.java
+    * [HIVE-12958] - Make embedded Jetty server more configurable
+    * [HIVE-12959] - LLAP: Add task scheduler timeout when no nodes are alive
+    * [HIVE-12967] - Change LlapServiceDriver to read a properties file instead of llap-daemon-site
+    * [HIVE-12968] - genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND
+    * [HIVE-12970] - Add total open connections in HiveServer2
+    * [HIVE-12983] - Provide a builtin function to get Hive version
+    * [HIVE-12988] - Improve dynamic partition loading IV
+    * [HIVE-13027] - Configuration changes to improve logging performance
+    * [HIVE-13033] - SPDO unnecessarily duplicates columns in key & value of mapper output
+    * [HIVE-13034] - Add jdeb plugin to build debian
+    * [HIVE-13040] - Handle empty bucket creations more efficiently 
+    * [HIVE-13044] - Enable TLS encryption to HMS backend database
+    * [HIVE-13054] - LLAP: disable permanent fns by default (for now)
+    * [HIVE-13058] - Add session and operation_log directory deletion messages
+    * [HIVE-13063] - Create UDFs for CHR and REPLACE 
+    * [HIVE-13069] - Enable cartesian product merging
+    * [HIVE-13102] - CBO: Reduce operations in Calcite do not fold as tight as rule-based folding
+    * [HIVE-13106] - STARTUP_MSG and SHUTDOWN_MSG are added to HiveMetaStore
+    * [HIVE-13107] - LLAP: Rotate GC logs periodically to prevent full disks
+    * [HIVE-13116] - LLAP: allow ignoring the UDF check during compile time
+    * [HIVE-13118] - add some logging to LLAP token related paths
+    * [HIVE-13120] - propagate doAs when generating ORC splits
+    * [HIVE-13122] - LLAP: simple Model/View separation for UI
+    * [HIVE-13156] - Allow specifying the name of the queue in which llap will run
+    * [HIVE-13179] - Allow custom HiveConf to be passed to Authentication Providers
+    * [HIVE-13183] - More logs in operation logs
+    * [HIVE-13196] - UDFLike: reduce Regex NFA sizes
+    * [HIVE-13204] - Vectorization: Add ChainedCheckerFactory for LIKE 
+    * [HIVE-13206] - Create a test-sources.jar when -Psources profile is invoked
+    * [HIVE-13222] - Move rc-file-v0.rc used on TestRCFile.java to src/test/resources
+    * [HIVE-13226] - Improve tez print summary to print query execution breakdown
+    * [HIVE-13249] - Hard upper bound on number of open transactions
+    * [HIVE-13295] - Improvement to LDAP search queries in HS2 LDAP Authenticator
+    * [HIVE-13319] - Propagate external handles in task display
+    * [HIVE-13352] - Seems unnecessary for HBase tests to call QTestUtil.tearDown to close zookeeper and others.
+    * [HIVE-13354] - Add ability to specify Compaction options per table and per request
+    * [HIVE-13363] - Add hive.metastore.token.signature property to HiveConf
+    * [HIVE-13364] - Allow llap to work with dynamic ports for rpc, shuffle, ui
+    * [HIVE-13365] - Change the MiniLLAPCluster to work with a MiniZKCluster
+    * [HIVE-13367] - Extending HPLSQL parser
+    * [HIVE-13376] - HoS emits too many logs with application state
+    * [HIVE-13398] - LLAP: Simple /status and /peers web services
+    * [HIVE-13400] - Following up HIVE-12481, add retry for Zookeeper service discovery
+    * [HIVE-13413] - add a llapstatus command line tool
+    * [HIVE-13421] - Propagate job progress in operation status
+    * [HIVE-13429] - Tool to remove dangling scratch dir
+    * [HIVE-13430] - Pass error message to failure hook
+    * [HIVE-13436] - Allow the package directory to be specified for the llap setup script
+    * [HIVE-13469] - LLAP: Support delayed scheduling for locality
+    * [HIVE-13472] - Replace primitive wrapper's valueOf method with parse* method to avoid unnecessary boxing/unboxing
+    * [HIVE-13501] - Invoke failure hooks if query fails on exception
+    * [HIVE-13509] - HCatalog getSplits should ignore the partition with invalid path
+    * [HIVE-13516] - Adding BTEQ .IF, .QUIT, ERRORCODE to HPL/SQL
+    * [HIVE-13536] - LLAP: Add metrics for task scheduler
+    * [HIVE-13559] - Pass exception to failure hooks
+    * [HIVE-13562] - Enable vector bridge for all non-vectorized udfs
+    * [HIVE-13616] - Investigate renaming a table without invalidating the column stats
+    * [HIVE-13629] - Expose Merge-File task and Column-Truncate task from DDLTask
+    * [HIVE-13643] - Various enhancements / fixes to llap cli tools
+    * [HIVE-13661] - [Refactor] Move common FS operations out of shim layer
+    * [HIVE-13666] - LLAP Provide the log url for a task attempt to display on the UI
+    * [HIVE-13670] - Improve Beeline connect/reconnect semantics
+    * [HIVE-13679] - Pass diagnostic message to failure hooks
+    * [HIVE-13681] - Update README with latest Hive functionality
+    * [HIVE-13716] - Improve dynamic partition loading V
+    * [HIVE-13726] - Improve dynamic partition loading VI
+    * [HIVE-13750] - Avoid additional shuffle stage created by Sorted Dynamic Partition Optimizer when possible
+    * [HIVE-13783] - Display a secondary prompt on beeline for multi-line statements
+    * [HIVE-13789] - Repeatedly checking configuration in TextRecordWriter/Reader hurts performance
+    * [HIVE-13799] - Optimize TableScanRule::checkBucketedTable
+    * [HIVE-13902] - [Refactor] Minimize metastore jar dependencies on task nodes
 
 
 
 ** New Feature
-    * [HIVE-15434] - Add UDF to allow interrogation of uniontype values
-    * [HIVE-15691] - Create StrictRegexWriter to work with RegexSerializer for Flume Hive Sink
-
+    * [HIVE-12270] - Add DBTokenStore support to HS2 delegation token
+    * [HIVE-12634] - Add command to kill an ACID transaction
+    * [HIVE-12730] - MetadataUpdater: provide a mechanism to edit the basic statistics of a table (or a partition)
+    * [HIVE-12878] - Support Vectorization for TEXTFILE and other formats
+    * [HIVE-12994] - Implement support for NULLS FIRST/NULLS LAST
+    * [HIVE-13029] - NVDIMM support for LLAP Cache
+    * [HIVE-13095] - Support view column authorization
+    * [HIVE-13125] - Support masking and filtering of rows/columns
+    * [HIVE-13307] - LLAP: Slider package should contain permanent functions
+    * [HIVE-13418] - HiveServer2 HTTP mode should support X-Forwarded-Host header for authorization/audits
+    * [HIVE-13475] - Allow aggregate functions in over clause
+    * [HIVE-13736] - View's input/output formats are TEXT by default
 
 
 
@@ -89,12 +589,43 @@ Release Notes - Hive - Version 2.3.0
 
 
 
+** Task
+    * [HIVE-12205] - Spark: unify spark statististics aggregation between local and remote spark client
+    * [HIVE-12796] - Switch to 32-bits containers for HMS upgrade tests
+    * [HIVE-12828] - Update Spark version to 1.6
+    * [HIVE-12836] - Install wget & curl packages on LXC containers for HMS upgrade tests
+    * [HIVE-12940] - Cherry pick spark branch to master
+    * [HIVE-12987] - Add metrics for HS2 active users and SQL operations
+    * [HIVE-13097] - [Umbrella] Changes dependent on Tez 0.8.3
+    * [HIVE-13188] - Allow users of RetryingThriftClient to close transport
+    * [HIVE-13234] - Remove dead ODBC driver from Hive
+    * [HIVE-13385] - [Cleanup] Streamline Beeline instantiation
+    * [HIVE-13393] - Beeline: Print help message for the --incremental option
+    * [HIVE-13431] - Improvements to LLAPTaskReporter
+    * [HIVE-13433] - Fixes for additional incompatible changes in tez-0.8.3
+    * [HIVE-13537] - Update slf4j version to 1.7.10
+    * [HIVE-13603] - Fix ptest unit tests broken by HIVE13505
+    * [HIVE-13800] - Disable auth enabled by default on LLAP UI for secure clusters
+    * [HIVE-13835] - TestMiniTezCliDriver.vector_complex_all.q needs golden file update
 
 
 ** Test
-    * [HIVE-16288] - Add blobstore tests for ORC and RCFILE file formats
-    * [HIVE-16415] - Add tests covering single inserts of zero rows
-    * [HIVE-16454] - Add blobstore tests for inserting empty into dynamic partition/list bucket tables & inserting cross blobstore tables
+    * [HIVE-9147] - Add unit test for HIVE-7323
+    * [HIVE-11615] - Create test for max thrift message setting
+    * [HIVE-11887] - some tests break the build on a shared machine, can break HiveQA
+    * [HIVE-12079] - Add units tests for HiveServer2 LDAP filters added in HIVE-7193
+    * [HIVE-12279] - Testcase to verify session temporary files are removed after HIVE-11768
+    * [HIVE-12600] - Make index tests more robust
+    * [HIVE-12621] - PTest Backup additional Tez/Spark logs
+    * [HIVE-12628] - Eliminate flakiness in TestMetrics
+    * [HIVE-12715] - Unit test for HIVE-10685 fix
+    * [HIVE-12956] - run CBO in tests with mapred.mode=strict
+    * [HIVE-13055] - Add unit tests for HIVE-11512
+    * [HIVE-13268] - Add a HA mini cluster type in MiniHS2
+    * [HIVE-13371] - Fix test failure of testHasNull in TestColumnStatistics running on Windows
+    * [HIVE-13591] - TestSchemaTool is failing on master
+    * [HIVE-13615] - nomore_ambiguous_table_col.q is failing on master
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/pom.xml
----------------------------------------------------------------------
diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index edac1b1..d22a54c 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -62,16 +62,6 @@
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-common</artifactId>
       <version>${project.version}</version>
-        <exclusions>
-            <exclusion>
-                <groupId>org.eclipse.jetty.aggregate</groupId>
-                <artifactId>jetty-all</artifactId>
-            </exclusion>
-            <exclusion>
-                <groupId>org.eclipse.jetty.orbit</groupId>
-                <artifactId>javax.servlet</artifactId>
-            </exclusion>
-        </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
@@ -87,16 +77,6 @@
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-service</artifactId>
       <version>${project.version}</version>
-        <exclusions>
-            <exclusion>
-                <groupId>org.eclipse.jetty.aggregate</groupId>
-                <artifactId>jetty-all</artifactId>
-            </exclusion>
-            <exclusion>
-                <groupId>org.eclipse.jetty.orbit</groupId>
-                <artifactId>javax.servlet</artifactId>
-            </exclusion>
-        </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloDefaultIndexScanner.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloDefaultIndexScanner.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloDefaultIndexScanner.java
deleted file mode 100644
index 427a6c7..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloDefaultIndexScanner.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.accumulo;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloIndexParameters;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static java.util.Collections.EMPTY_SET;
-
-/**
- * This default index scanner expects indexes to be in the same format as presto's
- * accumulo index tables defined as:
- * [rowid=field value] [cf=cfname_cqname] [cq=rowid] [visibility] [value=""]
- * <p>
- * This handler looks for the following hive serde properties:
- * 'accumulo.indextable.name' = 'table_idx' (required - name of the corresponding index table)
- * 'accumulo.indexed.columns' = 'name,age,phone' (optional - comma separated list of indexed
- *                      hive columns if not defined or defined as '*' all columns are
- *                      assumed to be indexed )
- * 'accumulo.index.rows.max' = '20000' (optional - maximum number of match indexes to use
- *                      before converting to a full table scan default=20000'
- *                      Note: This setting controls the size of the in-memory list of rowids
- *                      each search predicate. Using large values for this setting or having
- *                      very large rowid values may require additional memory to prevent
- *                      out of memory errors
- * 'accumulo.index.scanner'  = 'org.apache.hadoop.hive.accumulo.AccumuloDefaultIndexScanner'
- *                      (optional - name of the index scanner)
- * <p>
- * To implement your own index table scheme it should be as simple as sub-classing
- * this class and overriding getIndexRowRanges() and optionally init() if you need more
- * config settings
- */
-public class AccumuloDefaultIndexScanner implements AccumuloIndexScanner {
-  private static final Logger LOG = LoggerFactory.getLogger(AccumuloDefaultIndexScanner.class);
-
-  private AccumuloConnectionParameters connectParams;
-  private AccumuloIndexParameters indexParams;
-  private int maxRowIds;
-  private Authorizations auths;
-  private String indexTable;
-  private Set<String> indexColumns = EMPTY_SET;
-  private Connector connect;
-  private Map<String, String> colMap;
-
-  /**
-   * Initialize object based on configuration.
-   *
-   * @param conf - Hive configuration
-   */
-  @Override
-  public void init(Configuration conf) {
-    connectParams = new AccumuloConnectionParameters(conf);
-    indexParams = new AccumuloIndexParameters(conf);
-    maxRowIds = indexParams.getMaxIndexRows();
-    auths = indexParams.getTableAuths();
-    indexTable = indexParams.getIndexTable();
-    indexColumns = indexParams.getIndexColumns();
-    colMap = createColumnMap(conf);
-
-  }
-
-  /**
-   * Get a list of rowid ranges by scanning a column index.
-   *
-   * @param column     - the hive column name
-   * @param indexRange - Key range to scan on the index table
-   * @return List of matching rowid ranges or null if too many matches found
-   * if index values are not found a newline range is added to list to
-   * short-circuit the query
-   */
-  @Override
-  public List<Range> getIndexRowRanges(String column, Range indexRange) {
-    List<Range> rowIds = new ArrayList<Range>();
-    Scanner scan = null;
-    String col = this.colMap.get(column);
-
-    if (col != null) {
-
-      try {
-        LOG.debug("Searching tab=" + indexTable + " column=" + column + " range=" + indexRange);
-        Connector conn = getConnector();
-        scan = conn.createScanner(indexTable, auths);
-        scan.setRange(indexRange);
-        Text cf = new Text(col);
-        LOG.debug("Using Column Family=" + toString());
-        scan.fetchColumnFamily(cf);
-
-        for (Map.Entry<Key, Value> entry : scan) {
-
-          rowIds.add(new Range(entry.getKey().getColumnQualifier()));
-
-          // if we have too many results return null for a full scan
-          if (rowIds.size() > maxRowIds) {
-            return null;
-          }
-        }
-
-        // no hits on the index so return a no match range
-        if (rowIds.isEmpty()) {
-          LOG.debug("Found 0 index matches");
-        } else {
-          LOG.debug("Found " + rowIds.size() + " index matches");
-        }
-
-        return rowIds;
-      } catch (AccumuloException | AccumuloSecurityException | TableNotFoundException e) {
-        LOG.error("Failed to scan index table: " + indexTable, e);
-      } finally {
-        if (scan != null) {
-          scan.close();
-        }
-      }
-    }
-
-    // assume the index is bad and do a full scan
-    LOG.debug("Index lookup failed for table " + indexTable);
-    return null;
-  }
-
-  /**
-   * Test if column is defined in the index table.
-   *
-   * @param column - hive column name
-   * @return true if the column is defined as part of the index table
-   */
-  @Override
-  public boolean isIndexed(String column) {
-    return indexTable != null
-        && (indexColumns.isEmpty() || indexColumns.contains("*")
-        || this.indexColumns.contains(column.toLowerCase())
-        || this.indexColumns.contains(column.toUpperCase()));
-
-  }
-
-  protected Map<String, String> createColumnMap(Configuration conf) {
-    Map<String, String> colsMap = new HashMap<String, String>();
-    String accColString = conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS);
-    if (accColString != null && !accColString.trim().isEmpty()) {
-      String[] accCols = accColString.split(",");
-      String[] hiveCols = conf.get(serdeConstants.LIST_COLUMNS).split(",");
-      for (int i = 0; i < accCols.length; i++) {
-        colsMap.put(hiveCols[i], accCols[i].replace(':', '_'));
-      }
-    }
-    return colsMap;
-  }
-
-  protected Connector getConnector() throws AccumuloSecurityException, AccumuloException {
-    if (connect == null) {
-      connect = connectParams.getConnector();
-    }
-    return connect;
-  }
-
-  public void setConnectParams(AccumuloConnectionParameters connectParams) {
-    this.connectParams = connectParams;
-  }
-
-  public AccumuloConnectionParameters getConnectParams() {
-    return connectParams;
-  }
-
-  public AccumuloIndexParameters getIndexParams() {
-    return indexParams;
-  }
-
-  public int getMaxRowIds() {
-    return maxRowIds;
-  }
-
-  public Authorizations getAuths() {
-    return auths;
-  }
-
-  public String getIndexTable() {
-    return indexTable;
-  }
-
-  public Set<String> getIndexColumns() {
-    return indexColumns;
-  }
-
-  public Connector getConnect() {
-    return connect;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexLexicoder.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexLexicoder.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexLexicoder.java
deleted file mode 100644
index 4ad35f8..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexLexicoder.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.accumulo;
-
-import org.apache.accumulo.core.client.lexicoder.BigIntegerLexicoder;
-import org.apache.accumulo.core.client.lexicoder.DoubleLexicoder;
-import org.apache.accumulo.core.client.lexicoder.IntegerLexicoder;
-import org.apache.accumulo.core.client.lexicoder.LongLexicoder;
-import org.apache.hadoop.hive.serde.serdeConstants;
-
-import java.math.BigInteger;
-import java.nio.ByteBuffer;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-/**
- * Utility class to encode index values for accumulo.
- */
-public final class AccumuloIndexLexicoder {
-  private static final IntegerLexicoder INTEGER_LEXICODER = new IntegerLexicoder();
-  private static final DoubleLexicoder DOUBLE_LEXICODER = new DoubleLexicoder();
-  private static final LongLexicoder LONG_LEXICODER = new LongLexicoder();
-  private static final BigIntegerLexicoder BIG_INTEGER_LEXICODER = new BigIntegerLexicoder();
-  private static final String DIM_PAT = "[(]+.*";
-
-
-  private AccumuloIndexLexicoder() {
-    // hide constructor
-  }
-
-  public static String getRawType(String hiveType) {
-    if (hiveType != null) {
-      return hiveType.toLowerCase().replaceFirst(DIM_PAT, "").trim();
-    }
-    return hiveType;
-  }
-
-  public static byte[] encodeValue(byte[] value, String hiveType, boolean stringEncoded) {
-    if (stringEncoded) {
-      return encodeStringValue(value, hiveType);
-    } else {
-      return encodeBinaryValue(value, hiveType);
-    }
-  }
-
-  public static byte[] encodeStringValue(byte[] value, String hiveType) {
-    String rawType = getRawType(hiveType);
-
-    switch(rawType) {
-      case serdeConstants.BOOLEAN_TYPE_NAME:
-        return Boolean.valueOf(new String(value)).toString().getBytes(UTF_8);
-      case serdeConstants.SMALLINT_TYPE_NAME :
-      case serdeConstants.TINYINT_TYPE_NAME :
-      case serdeConstants.INT_TYPE_NAME :
-        return INTEGER_LEXICODER.encode(Integer.valueOf(new String(value)));
-      case serdeConstants.FLOAT_TYPE_NAME :
-      case serdeConstants.DOUBLE_TYPE_NAME :
-        return DOUBLE_LEXICODER.encode(Double.valueOf(new String(value)));
-      case serdeConstants.BIGINT_TYPE_NAME :
-        return BIG_INTEGER_LEXICODER.encode(new BigInteger(new String(value), 10));
-      case serdeConstants.DECIMAL_TYPE_NAME :
-        return new String(value).getBytes(UTF_8);
-      default :
-        // return the passed in string value
-        return value;
-    }
-  }
-
-  public static byte[] encodeBinaryValue(byte[] value, String hiveType) {
-    String rawType = getRawType(hiveType);
-
-    switch(rawType) {
-      case serdeConstants.BOOLEAN_TYPE_NAME :
-        return String.valueOf(value[0] == 1).getBytes();
-      case serdeConstants.INT_TYPE_NAME :
-        return INTEGER_LEXICODER.encode(ByteBuffer.wrap(value).asIntBuffer().get());
-      case serdeConstants.SMALLINT_TYPE_NAME :
-        return INTEGER_LEXICODER.encode((int)(ByteBuffer.wrap(value).asShortBuffer().get()));
-      case serdeConstants.TINYINT_TYPE_NAME :
-        return INTEGER_LEXICODER.encode((int)value[0]);
-      case serdeConstants.FLOAT_TYPE_NAME :
-        return DOUBLE_LEXICODER.encode((double)ByteBuffer.wrap(value).asFloatBuffer().get());
-      case serdeConstants.DOUBLE_TYPE_NAME :
-        return DOUBLE_LEXICODER.encode(ByteBuffer.wrap(value).asDoubleBuffer().get());
-      case serdeConstants.BIGINT_TYPE_NAME :
-        return BIG_INTEGER_LEXICODER.encode(new BigInteger(value));
-      case serdeConstants.DECIMAL_TYPE_NAME :
-        return new String(value).getBytes(UTF_8);
-      default :
-        return value;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexScanner.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexScanner.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexScanner.java
deleted file mode 100644
index 8029f3c..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexScanner.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.accumulo;
-
-import org.apache.accumulo.core.data.Range;
-import org.apache.hadoop.conf.Configuration;
-
-import java.util.List;
-
-/**
- * Specification for implementing a AccumuloIndexScanner.
- */
-public interface AccumuloIndexScanner {
-
-  /**
-   * Initialize the index scanner implementation with the runtime configuration.
-   *
-   * @param conf  - the hadoop configuration
-   */
-  void init(Configuration conf);
-
-  /**
-   * Check if column is defined as being indexed.
-   *
-   * @param columnName - the hive column name
-   * @return true if the column is indexed
-   */
-  boolean isIndexed(String columnName);
-
-  /**
-   * Get a list of rowid ranges by scanning a column index.
-   *
-   * @param column     - the hive column name
-   * @param indexRange - Key range to scan on the index table
-   * @return List of matching rowid ranges or null if too many matches found
-   *
-   */
-  List<Range> getIndexRowRanges(String column, Range indexRange);
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexScannerException.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexScannerException.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexScannerException.java
deleted file mode 100644
index c50b606..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexScannerException.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.accumulo;
-
-/**
- * Exception class for AccumuloIndexScanner operations.
- */
-public class AccumuloIndexScannerException extends Exception {
-
-  private static final long serialVersionUID = 1L;
-
-  public AccumuloIndexScannerException() {
-    super();
-  }
-
-  public AccumuloIndexScannerException(String msg) {
-    super(msg);
-  }
-
-  public AccumuloIndexScannerException(String msg, Throwable cause) {
-    super(msg, cause);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java
index 62524e8..cdbc7f2 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java
@@ -1,11 +1,10 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
  *
  *     http://www.apache.org/licenses/LICENSE-2.0
  *
@@ -18,6 +17,10 @@
 
 package org.apache.hadoop.hive.accumulo;
 
+import java.io.IOException;
+import java.util.Map;
+import java.util.Properties;
+
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
@@ -36,7 +39,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableInputFormat;
 import org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableOutputFormat;
 import org.apache.hadoop.hive.accumulo.predicate.AccumuloPredicateHandler;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloIndexParameters;
 import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe;
 import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters;
 import org.apache.hadoop.hive.metastore.HiveMetaHook;
@@ -50,13 +52,13 @@ import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.AbstractSerDe;
 import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.serde2.AbstractSerDe;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.StringUtils;
@@ -64,18 +66,12 @@ import org.apache.zookeeper.ZooKeeper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
 /**
  * Create table mapping to Accumulo for Hive. Handle predicate pushdown if necessary.
  */
 public class AccumuloStorageHandler extends DefaultStorageHandler implements HiveMetaHook,
     HiveStoragePredicateHandler {
-  private static final Logger LOG = LoggerFactory.getLogger(AccumuloStorageHandler.class);
+  private static final Logger log = LoggerFactory.getLogger(AccumuloStorageHandler.class);
   private static final String DEFAULT_PREFIX = "default";
 
   protected AccumuloPredicateHandler predicateHandler = AccumuloPredicateHandler.getInstance();
@@ -92,7 +88,7 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
    *          Properties that will be added to the JobConf by Hive
    */
   @Override
-  public void configureTableJobProperties(TableDesc desc, Map<String, String> jobProps) {
+  public void configureTableJobProperties(TableDesc desc, Map<String,String> jobProps) {
     // Should not be getting invoked, configureInputJobProperties or configureOutputJobProperties
     // should be invoked instead.
     configureInputJobProperties(desc, jobProps);
@@ -123,21 +119,6 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
     }
   }
 
-  protected String getIndexTableName(Table table) {
-    // Use TBLPROPERTIES
-    String idxTableName = table.getParameters().get(AccumuloIndexParameters.INDEXTABLE_NAME);
-
-    if (null != idxTableName) {
-      return idxTableName;
-    }
-
-    // Then try SERDEPROPERTIES
-    idxTableName = table.getSd().getSerdeInfo().getParameters()
-        .get(AccumuloIndexParameters.INDEXTABLE_NAME);
-
-    return idxTableName;
-  }
-
   protected String getTableName(TableDesc tableDesc) {
     Properties props = tableDesc.getProperties();
     String tableName = props.getProperty(AccumuloSerDeParameters.TABLE_NAME);
@@ -154,18 +135,6 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
     return tableName;
   }
 
-  protected String getColumnTypes(TableDesc tableDesc)  {
-    Properties props = tableDesc.getProperties();
-    String columnTypes = props.getProperty(serdeConstants.LIST_COLUMN_TYPES);
-    return columnTypes;
-  }
-
-  protected String getIndexTableName(TableDesc tableDesc) {
-    Properties props = tableDesc.getProperties();
-    String tableName = props.getProperty(AccumuloIndexParameters.INDEXTABLE_NAME);
-    return tableName;
-  }
-
   @Override
   public Configuration getConf() {
     return conf;
@@ -194,7 +163,7 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
   }
 
   @Override
-  public void configureInputJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) {
+  public void configureInputJobProperties(TableDesc tableDesc, Map<String,String> jobProperties) {
     Properties props = tableDesc.getProperties();
 
     jobProperties.put(AccumuloSerDeParameters.COLUMN_MAPPINGS,
@@ -209,7 +178,7 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
 
     String useIterators = props.getProperty(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY);
     if (useIterators != null) {
-      if (!"true".equalsIgnoreCase(useIterators) && !"false".equalsIgnoreCase(useIterators)) {
+      if (!useIterators.equalsIgnoreCase("true") && !useIterators.equalsIgnoreCase("false")) {
         throw new IllegalArgumentException("Expected value of true or false for "
             + AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY);
       }
@@ -227,15 +196,15 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
       jobProperties.put(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, authValue);
     }
 
-    LOG.info("Computed input job properties of " + jobProperties);
+    log.info("Computed input job properties of " + jobProperties);
   }
 
   @Override
-  public void configureOutputJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) {
+  public void configureOutputJobProperties(TableDesc tableDesc, Map<String,String> jobProperties) {
     Properties props = tableDesc.getProperties();
     // Adding these job properties will make them available to the OutputFormat in checkOutputSpecs
-    String colMap = props.getProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS);
-    jobProperties.put(AccumuloSerDeParameters.COLUMN_MAPPINGS, colMap);
+    jobProperties.put(AccumuloSerDeParameters.COLUMN_MAPPINGS,
+        props.getProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS));
 
     String tableName = props.getProperty(AccumuloSerDeParameters.TABLE_NAME);
     if (null == tableName) {
@@ -243,19 +212,6 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
     }
     jobProperties.put(AccumuloSerDeParameters.TABLE_NAME, tableName);
 
-    String indexTable = props.getProperty(AccumuloIndexParameters.INDEXTABLE_NAME);
-    if (null == indexTable) {
-      indexTable = getIndexTableName(tableDesc);
-    }
-
-    if ( null != indexTable) {
-      jobProperties.put(AccumuloIndexParameters.INDEXTABLE_NAME, indexTable);
-
-      String indexColumns = props.getProperty(AccumuloIndexParameters.INDEXED_COLUMNS);
-      jobProperties.put(AccumuloIndexParameters.INDEXED_COLUMNS,
-          getIndexedColFamQuals(tableDesc, indexColumns, colMap));
-    }
-
     if (props.containsKey(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE)) {
       jobProperties.put(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE,
           props.getProperty(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE));
@@ -267,42 +223,6 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
     }
   }
 
-  private String getIndexedColFamQuals(TableDesc tableDesc, String indexColumns, String colMap) {
-    StringBuilder sb = new StringBuilder();
-
-    String cols = indexColumns;
-
-
-    String hiveColString = tableDesc.getProperties().getProperty(serdeConstants.LIST_COLUMNS);
-    // if there are actual accumulo index columns defined then build
-    // the comma separated list of accumulo columns
-    if (cols == null || cols.isEmpty() || "*".equals(indexColumns)) {
-      // skip rowid
-      cols = hiveColString.substring(hiveColString.indexOf(',')+1);
-    }
-
-    String[] hiveTypes = tableDesc.getProperties()
-        .getProperty(serdeConstants.LIST_COLUMN_TYPES).split(":");
-    String[] accCols = colMap.split(",");
-    String[] hiveCols = hiveColString.split(",");
-    Set<String> indexSet = new HashSet<String>();
-
-    for (String idx : cols.split(",")) {
-      indexSet.add(idx.trim());
-    }
-
-    for (int i = 0; i < hiveCols.length; i++) {
-      if (indexSet.contains(hiveCols[i].trim())) {
-        if (sb.length() > 0) {
-          sb.append(",");
-        }
-        sb.append(accCols[i].trim() + ":" + AccumuloIndexLexicoder.getRawType(hiveTypes[i]));
-      }
-    }
-
-    return sb.toString();
-  }
-
   @SuppressWarnings("rawtypes")
   @Override
   public Class<? extends InputFormat> getInputFormatClass() {
@@ -322,7 +242,7 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
       throw new MetaException("Location can't be specified for Accumulo");
     }
 
-    Map<String, String> serdeParams = table.getSd().getSerdeInfo().getParameters();
+    Map<String,String> serdeParams = table.getSd().getSerdeInfo().getParameters();
     String columnMapping = serdeParams.get(AccumuloSerDeParameters.COLUMN_MAPPINGS);
     if (columnMapping == null) {
       throw new MetaException(AccumuloSerDeParameters.COLUMN_MAPPINGS
@@ -348,16 +268,6 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
               + " already exists in Accumulo. Use CREATE EXTERNAL TABLE to register with Hive.");
         }
       }
-
-      String idxTable = getIndexTableName(table);
-
-      if (idxTable != null && !idxTable.isEmpty()) {
-
-        // create the index table if it does not exist
-        if (!tableOpts.exists(idxTable)) {
-          tableOpts.create(idxTable);
-        }
-      }
     } catch (AccumuloSecurityException e) {
       throw new MetaException(StringUtils.stringifyException(e));
     } catch (TableExistsException e) {
@@ -426,7 +336,7 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
     if (serDe.getIteratorPushdown()) {
       return predicateHandler.decompose(conf, desc);
     } else {
-      LOG.info("Set to ignore Accumulo iterator pushdown, skipping predicate handler.");
+      log.info("Set to ignore Accumulo iterator pushdown, skipping predicate handler.");
       return null;
     }
   }
@@ -438,24 +348,22 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
       Utils.addDependencyJars(jobConf, Tracer.class, Fate.class, Connector.class, Main.class,
           ZooKeeper.class, AccumuloStorageHandler.class);
     } catch (IOException e) {
-      LOG.error("Could not add necessary Accumulo dependencies to classpath", e);
+      log.error("Could not add necessary Accumulo dependencies to classpath", e);
     }
 
     Properties tblProperties = tableDesc.getProperties();
     AccumuloSerDeParameters serDeParams = null;
     try {
-      serDeParams =
-          new AccumuloSerDeParameters(jobConf, tblProperties, AccumuloSerDe.class.getName());
+      serDeParams = new AccumuloSerDeParameters(jobConf, tblProperties, AccumuloSerDe.class.getName());
     } catch (SerDeException e) {
-      LOG.error("Could not instantiate AccumuloSerDeParameters", e);
+      log.error("Could not instantiate AccumuloSerDeParameters", e);
       return;
     }
 
     try {
       serDeParams.getRowIdFactory().addDependencyJars(jobConf);
     } catch (IOException e) {
-      LOG.error("Could not add necessary dependencies for "
-          + serDeParams.getRowIdFactory().getClass(), e);
+      log.error("Could not add necessary dependencies for " + serDeParams.getRowIdFactory().getClass(), e);
     }
 
     // When Kerberos is enabled, we have to add the Accumulo delegation token to the
@@ -475,26 +383,25 @@ public class AccumuloStorageHandler extends DefaultStorageHandler implements Hiv
               connectionParams.getAccumuloUserName(), token);
         } catch (IllegalStateException e) {
           // The implementation balks when this method is invoked multiple times
-          LOG.debug("Ignoring IllegalArgumentException about re-setting connector information");
+          log.debug("Ignoring IllegalArgumentException about re-setting connector information");
         }
         try {
           OutputConfigurator.setConnectorInfo(AccumuloOutputFormat.class, jobConf,
               connectionParams.getAccumuloUserName(), token);
         } catch (IllegalStateException e) {
           // The implementation balks when this method is invoked multiple times
-          LOG.debug("Ignoring IllegalArgumentException about re-setting connector information");
+          log.debug("Ignoring IllegalArgumentException about re-setting connector information");
         }
 
         // Convert the Accumulo token in a Hadoop token
         Token<? extends TokenIdentifier> accumuloToken = helper.getHadoopToken(token);
 
-        LOG.info("Adding Hadoop Token for Accumulo to Job's Credentials");
+        log.info("Adding Hadoop Token for Accumulo to Job's Credentials");
 
         // Add the Hadoop token to the JobConf
         helper.mergeTokenIntoJobConf(jobConf, accumuloToken);
       } catch (Exception e) {
-        throw new RuntimeException("Failed to obtain DelegationToken for "
-            + connectionParams.getAccumuloUserName(), e);
+        throw new RuntimeException("Failed to obtain DelegationToken for " + connectionParams.getAccumuloUserName(), e);
       }
     }
   }


[27/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
deleted file mode 100644
index 4db2f34..0000000
--- a/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
+++ /dev/null
@@ -1,853 +0,0 @@
--- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
---
--- Host: localhost    Database: test
--- ------------------------------------------------------
--- Server version	5.5.25
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `BUCKETING_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `BUCKETING_COLS_N49` (`SD_ID`),
-  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `CDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `CDS` (
-  `CD_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `COLUMNS_V2`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
-  `CD_ID` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
-  KEY `COLUMNS_V2_N49` (`CD_ID`),
-  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DATABASE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
-  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
-  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DBS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DBS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`),
-  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DB_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
-  `DB_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_GRANT_ID`),
-  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `DB_PRIVS_N49` (`DB_ID`),
-  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `GLOBAL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
-  `USER_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`USER_GRANT_ID`),
-  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `IDXS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `IDXS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DEFERRED_REBUILD` bit(1) NOT NULL,
-  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`),
-  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
-  KEY `IDXS_N51` (`SD_ID`),
-  KEY `IDXS_N50` (`INDEX_TBL_ID`),
-  KEY `IDXS_N49` (`ORIG_TBL_ID`),
-  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `INDEX_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
-  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
-  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `NUCLEUS_TABLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
-  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`CLASS_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITIONS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITIONS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`),
-  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
-  KEY `PARTITIONS_N49` (`TBL_ID`),
-  KEY `PARTITIONS_N50` (`SD_ID`),
-  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_EVENTS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
-  `PART_NAME_ID` bigint(20) NOT NULL,
-  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `EVENT_TIME` bigint(20) NOT NULL,
-  `EVENT_TYPE` int(11) NOT NULL,
-  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_NAME_ID`),
-  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEYS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
-  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
-  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEY_VALS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
-  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
-  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
-  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
-  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
-  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
-  `PART_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_GRANT_ID`),
-  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `PART_PRIVS_N49` (`PART_ID`),
-  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLES` (
-  `ROLE_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`ROLE_ID`),
-  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLE_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
-  `ROLE_GRANT_ID` bigint(20) NOT NULL,
-  `ADD_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`ROLE_GRANT_ID`),
-  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `ROLE_MAP_N49` (`ROLE_ID`),
-  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SDS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `CD_ID` bigint(20) DEFAULT NULL,
-  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `IS_COMPRESSED` bit(1) NOT NULL,
-  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `NUM_BUCKETS` int(11) NOT NULL,
-  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SERDE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`),
-  KEY `SDS_N49` (`SERDE_ID`),
-  KEY `SDS_N50` (`CD_ID`),
-  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
-  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SD_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
-  KEY `SD_PARAMS_N49` (`SD_ID`),
-  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SEQUENCE_TABLE`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
-  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NEXT_VAL` bigint(20) NOT NULL,
-  PRIMARY KEY (`SEQUENCE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDES` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
-  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
-  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_NAMES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
-  `SD_ID` bigint(20) NOT NULL,
-  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
-  `SD_ID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
-  `SD_ID_OID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
-  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
-  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
-  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SORT_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SORT_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ORDER` int(11) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SORT_COLS_N49` (`SD_ID`),
-  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TABLE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
-  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
-  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBLS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `RETENTION` int(11) NOT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `VIEW_EXPANDED_TEXT` mediumtext,
-  `VIEW_ORIGINAL_TEXT` mediumtext,
-  `IS_REWRITE_ENABLED` bit(1) NOT NULL,
-  PRIMARY KEY (`TBL_ID`),
-  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
-  KEY `TBLS_N50` (`SD_ID`),
-  KEY `TBLS_N49` (`DB_ID`),
-  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
-  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
-  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
-  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
-  `TBL_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_GRANT_ID`),
-  KEY `TBL_PRIVS_N49` (`TBL_ID`),
-  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TAB_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TBL_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `PART_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PART_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;
-
---
--- Table structure for table `TYPES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPES` (
-  `TYPES_ID` bigint(20) NOT NULL,
-  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TYPES_ID`),
-  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TYPE_FIELDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
-  `TYPE_NAME` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
-  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
-  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
--- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
-CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
-(
-    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
-    `MASTER_KEY` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`KEY_ID`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
--- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
-(
-    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
-    `TOKEN` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`TOKEN_IDENT`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
---
--- Table structure for VERSION
---
-CREATE TABLE IF NOT EXISTS `VERSION` (
-  `VER_ID` BIGINT NOT NULL,
-  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
-  `VERSION_COMMENT` VARCHAR(255),
-  PRIMARY KEY (`VER_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table FUNCS
---
-CREATE TABLE IF NOT EXISTS `FUNCS` (
-  `FUNC_ID` BIGINT(20) NOT NULL,
-  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
-  `CREATE_TIME` INT(11) NOT NULL,
-  `DB_ID` BIGINT(20),
-  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
-  `FUNC_TYPE` INT(11) NOT NULL,
-  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
-  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
-  PRIMARY KEY (`FUNC_ID`),
-  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
-  KEY `FUNCS_N49` (`DB_ID`),
-  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table FUNC_RU
---
-CREATE TABLE IF NOT EXISTS `FUNC_RU` (
-  `FUNC_ID` BIGINT(20) NOT NULL,
-  `RESOURCE_TYPE` INT(11) NOT NULL,
-  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
-  `INTEGER_IDX` INT(11) NOT NULL,
-  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
-  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG`
-(
-    `NL_ID` BIGINT(20) NOT NULL,
-    `EVENT_ID` BIGINT(20) NOT NULL,
-    `EVENT_TIME` INT(11) NOT NULL,
-    `EVENT_TYPE` varchar(32) NOT NULL,
-    `DB_NAME` varchar(128),
-    `TBL_NAME` varchar(256),
-    `MESSAGE` longtext,
-    `MESSAGE_FORMAT` varchar(16),
-    PRIMARY KEY (`NL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE`
-(
-    `NNI_ID` BIGINT(20) NOT NULL,
-    `NEXT_EVENT_ID` BIGINT(20) NOT NULL,
-    PRIMARY KEY (`NNI_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS`
-(
-  `CHILD_CD_ID` BIGINT,
-  `CHILD_INTEGER_IDX` INT(11),
-  `CHILD_TBL_ID` BIGINT,
-  `PARENT_CD_ID` BIGINT NOT NULL,
-  `PARENT_INTEGER_IDX` INT(11) NOT NULL,
-  `PARENT_TBL_ID` BIGINT NOT NULL,
-  `POSITION` BIGINT NOT NULL,
-  `CONSTRAINT_NAME` VARCHAR(400) NOT NULL,
-  `CONSTRAINT_TYPE` SMALLINT(6)  NOT NULL,
-  `UPDATE_RULE` SMALLINT(6),
-  `DELETE_RULE` SMALLINT(6),
-  `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL,
-  PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE;
-
--- ----------------------------
--- Transaction and Lock Tables
--- ----------------------------
-SOURCE hive-txn-schema-3.0.0.mysql.sql;
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0');
-
-/*!40101 SET character_set_client = @saved_cs_client */;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/hive-txn-schema-2.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-txn-schema-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-txn-schema-2.2.0.mysql.sql
index ac39a32..58835cb 100644
--- a/metastore/scripts/upgrade/mysql/hive-txn-schema-2.2.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-txn-schema-2.2.0.mysql.sql
@@ -41,7 +41,7 @@ CREATE TABLE TXN_COMPONENTS (
 CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TXNID bigint NOT NULL,
   CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(256),
+  CTC_TABLE varchar(128),
   CTC_PARTITION varchar(767)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/hive-txn-schema-2.3.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-txn-schema-2.3.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-txn-schema-2.3.0.mysql.sql
deleted file mode 100644
index 1df32c4..0000000
--- a/metastore/scripts/upgrade/mysql/hive-txn-schema-2.3.0.mysql.sql
+++ /dev/null
@@ -1,135 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
---
--- Tables for transaction management
--- 
-
-CREATE TABLE TXNS (
-  TXN_ID bigint PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED bigint NOT NULL,
-  TXN_LAST_HEARTBEAT bigint NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL,
-  TXN_AGENT_INFO varchar(128),
-  TXN_META_INFO varchar(128),
-  TXN_HEARTBEAT_COUNT int
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID bigint NOT NULL,
-  TC_DATABASE varchar(128) NOT NULL,
-  TC_TABLE varchar(128) NOT NULL,
-  TC_PARTITION varchar(767),
-  TC_OPERATION_TYPE char(1) NOT NULL,
-  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID bigint NOT NULL,
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(256),
-  CTC_PARTITION varchar(767)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT bigint NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID bigint NOT NULL,
-  HL_LOCK_INT_ID bigint NOT NULL,
-  HL_TXNID bigint,
-  HL_DB varchar(128) NOT NULL,
-  HL_TABLE varchar(128),
-  HL_PARTITION varchar(767),
-  HL_LOCK_STATE char(1) not null,
-  HL_LOCK_TYPE char(1) not null,
-  HL_LAST_HEARTBEAT bigint NOT NULL,
-  HL_ACQUIRED_AT bigint,
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  HL_HEARTBEAT_COUNT int,
-  HL_AGENT_INFO varchar(128),
-  HL_BLOCKEDBY_EXT_ID bigint,
-  HL_BLOCKEDBY_INT_ID bigint,
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
-  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT bigint NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID bigint PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_TBLPROPERTIES varchar(2048),
-  CQ_WORKER_ID varchar(128),
-  CQ_START bigint,
-  CQ_RUN_AS varchar(128),
-  CQ_HIGHEST_TXN_ID bigint,
-  CQ_META_INFO varbinary(2048),
-  CQ_HADOOP_JOB_ID varchar(32)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE COMPLETED_COMPACTIONS (
-  CC_ID bigint PRIMARY KEY,
-  CC_DATABASE varchar(128) NOT NULL,
-  CC_TABLE varchar(128) NOT NULL,
-  CC_PARTITION varchar(767),
-  CC_STATE char(1) NOT NULL,
-  CC_TYPE char(1) NOT NULL,
-  CC_TBLPROPERTIES varchar(2048),
-  CC_WORKER_ID varchar(128),
-  CC_START bigint,
-  CC_END bigint,
-  CC_RUN_AS varchar(128),
-  CC_HIGHEST_TXN_ID bigint,
-  CC_META_INFO varbinary(2048),
-  CC_HADOOP_JOB_ID varchar(32)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT bigint NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE AUX_TABLE (
-  MT_KEY1 varchar(128) NOT NULL,
-  MT_KEY2 bigint NOT NULL,
-  MT_COMMENT varchar(255),
-  PRIMARY KEY(MT_KEY1, MT_KEY2)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE WRITE_SET (
-  WS_DATABASE varchar(128) NOT NULL,
-  WS_TABLE varchar(128) NOT NULL,
-  WS_PARTITION varchar(767),
-  WS_TXNID bigint NOT NULL,
-  WS_COMMIT_ID bigint NOT NULL,
-  WS_OPERATION_TYPE char(1) NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/hive-txn-schema-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-txn-schema-3.0.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-txn-schema-3.0.0.mysql.sql
deleted file mode 100644
index 1df32c4..0000000
--- a/metastore/scripts/upgrade/mysql/hive-txn-schema-3.0.0.mysql.sql
+++ /dev/null
@@ -1,135 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
---
--- Tables for transaction management
--- 
-
-CREATE TABLE TXNS (
-  TXN_ID bigint PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED bigint NOT NULL,
-  TXN_LAST_HEARTBEAT bigint NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL,
-  TXN_AGENT_INFO varchar(128),
-  TXN_META_INFO varchar(128),
-  TXN_HEARTBEAT_COUNT int
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID bigint NOT NULL,
-  TC_DATABASE varchar(128) NOT NULL,
-  TC_TABLE varchar(128) NOT NULL,
-  TC_PARTITION varchar(767),
-  TC_OPERATION_TYPE char(1) NOT NULL,
-  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID bigint NOT NULL,
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(256),
-  CTC_PARTITION varchar(767)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT bigint NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID bigint NOT NULL,
-  HL_LOCK_INT_ID bigint NOT NULL,
-  HL_TXNID bigint,
-  HL_DB varchar(128) NOT NULL,
-  HL_TABLE varchar(128),
-  HL_PARTITION varchar(767),
-  HL_LOCK_STATE char(1) not null,
-  HL_LOCK_TYPE char(1) not null,
-  HL_LAST_HEARTBEAT bigint NOT NULL,
-  HL_ACQUIRED_AT bigint,
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  HL_HEARTBEAT_COUNT int,
-  HL_AGENT_INFO varchar(128),
-  HL_BLOCKEDBY_EXT_ID bigint,
-  HL_BLOCKEDBY_INT_ID bigint,
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
-  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT bigint NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID bigint PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_TBLPROPERTIES varchar(2048),
-  CQ_WORKER_ID varchar(128),
-  CQ_START bigint,
-  CQ_RUN_AS varchar(128),
-  CQ_HIGHEST_TXN_ID bigint,
-  CQ_META_INFO varbinary(2048),
-  CQ_HADOOP_JOB_ID varchar(32)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE COMPLETED_COMPACTIONS (
-  CC_ID bigint PRIMARY KEY,
-  CC_DATABASE varchar(128) NOT NULL,
-  CC_TABLE varchar(128) NOT NULL,
-  CC_PARTITION varchar(767),
-  CC_STATE char(1) NOT NULL,
-  CC_TYPE char(1) NOT NULL,
-  CC_TBLPROPERTIES varchar(2048),
-  CC_WORKER_ID varchar(128),
-  CC_START bigint,
-  CC_END bigint,
-  CC_RUN_AS varchar(128),
-  CC_HIGHEST_TXN_ID bigint,
-  CC_META_INFO varbinary(2048),
-  CC_HADOOP_JOB_ID varchar(32)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT bigint NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE AUX_TABLE (
-  MT_KEY1 varchar(128) NOT NULL,
-  MT_KEY2 bigint NOT NULL,
-  MT_COMMENT varchar(255),
-  PRIMARY KEY(MT_KEY1, MT_KEY2)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE WRITE_SET (
-  WS_DATABASE varchar(128) NOT NULL,
-  WS_TABLE varchar(128) NOT NULL,
-  WS_PARTITION varchar(767),
-  WS_TXNID bigint NOT NULL,
-  WS_COMMIT_ID bigint NOT NULL,
-  WS_OPERATION_TYPE char(1) NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
index f4c69a5..68300d3 100644
--- a/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
@@ -3,7 +3,6 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' ';
 SOURCE 037-HIVE-14496.mysql.sql;
 SOURCE 038-HIVE-14637.mysql.sql;
 SOURCE 038-HIVE-10562.mysql.sql;
-SOURCE 039-HIVE-12274.mysql.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' ';

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql
deleted file mode 100644
index 9da8757..0000000
--- a/metastore/scripts/upgrade/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql
+++ /dev/null
@@ -1,7 +0,0 @@
-SELECT 'Upgrading MetaStore schema from 2.2.0 to 2.3.0' AS ' ';
-
-SOURCE 040-HIVE-16399.mysql.sql;
-
-UPDATE VERSION SET SCHEMA_VERSION='2.3.0', VERSION_COMMENT='Hive release version 2.3.0' where VER_ID=1;
-SELECT 'Finished upgrading MetaStore schema from 2.2.0 to 2.3.0' AS ' ';
-

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
deleted file mode 100644
index e5d82e1..0000000
--- a/metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
+++ /dev/null
@@ -1,5 +0,0 @@
-SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' ';
-
-UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
-SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' ';
-

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/mysql/upgrade.order.mysql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/upgrade.order.mysql b/metastore/scripts/upgrade/mysql/upgrade.order.mysql
index d7091b5..420174a 100644
--- a/metastore/scripts/upgrade/mysql/upgrade.order.mysql
+++ b/metastore/scripts/upgrade/mysql/upgrade.order.mysql
@@ -12,5 +12,3 @@
 1.2.0-to-2.0.0
 2.0.0-to-2.1.0
 2.1.0-to-2.2.0
-2.2.0-to-2.3.0
-2.3.0-to-3.0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/039-HIVE-12274.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/039-HIVE-12274.oracle.sql b/metastore/scripts/upgrade/oracle/039-HIVE-12274.oracle.sql
deleted file mode 100644
index 4080685..0000000
--- a/metastore/scripts/upgrade/oracle/039-HIVE-12274.oracle.sql
+++ /dev/null
@@ -1,21 +0,0 @@
--- change PARAM_VALUE to CLOBs
-ALTER TABLE COLUMNS_V2 MODIFY (TYPE_NAME CLOB);;
-ALTER TABLE TABLE_PARAMS MODIFY (PARAM_VALUE CLOB);
-ALTER TABLE SERDE_PARAMS MODIFY (PARAM_VALUE CLOB);
-ALTER TABLE SD_PARAMS MODIFY (PARAM_VALUE CLOB);
-
--- Expand the hive table name length to 256
-ALTER TABLE TBLS MODIFY (TBL_NAME VARCHAR2(256));
-ALTER TABLE NOTIFICATION_LOG MODIFY (TBL_NAME VARCHAR2(256));
-ALTER TABLE PARTITION_EVENTS MODIFY (TBL_NAME VARCHAR2(256));
-ALTER TABLE TAB_COL_STATS MODIFY (TABLE_NAME VARCHAR2(256));
-ALTER TABLE PART_COL_STATS MODIFY (TABLE_NAME VARCHAR2(256));
-ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY (CTC_TABLE VARCHAR2(256));
-
--- Expand the hive column name length to 767
-ALTER TABLE COLUMNS_V2 MODIFY (COLUMN_NAME VARCHAR(767) NOT NULL);
-ALTER TABLE PART_COL_PRIVS MODIFY (COLUMN_NAME VARCHAR2(767));
-ALTER TABLE TBL_COL_PRIVS MODIFY (COLUMN_NAME VARCHAR2(767));
-ALTER TABLE SORT_COLS MODIFY (COLUMN_NAME VARCHAR2(767));
-ALTER TABLE TAB_COL_STATS MODIFY (COLUMN_NAME VARCHAR2(767));
-ALTER TABLE PART_COL_STATS MODIFY (COLUMN_NAME VARCHAR2(767) NOT NULL);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/040-HIVE-16399.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/040-HIVE-16399.oracle.sql b/metastore/scripts/upgrade/oracle/040-HIVE-16399.oracle.sql
deleted file mode 100644
index f6cc31f..0000000
--- a/metastore/scripts/upgrade/oracle/040-HIVE-16399.oracle.sql
+++ /dev/null
@@ -1 +0,0 @@
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
index c9b1aeb..70592bc 100644
--- a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
@@ -29,7 +29,7 @@ ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_N
 CREATE TABLE PART_COL_PRIVS
 (
     PART_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(767) NULL,
+    "COLUMN_NAME" VARCHAR2(1000) NULL,
     CREATE_TIME NUMBER (10) NOT NULL,
     GRANT_OPTION NUMBER (5) NOT NULL,
     GRANTOR VARCHAR2(128) NULL,
@@ -55,8 +55,8 @@ CREATE TABLE COLUMNS_V2
 (
     CD_ID NUMBER NOT NULL,
     "COMMENT" VARCHAR2(256) NULL,
-    "COLUMN_NAME" VARCHAR2(767) NOT NULL,
-    TYPE_NAME CLOB NOT NULL,
+    "COLUMN_NAME" VARCHAR2(1000) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
     INTEGER_IDX NUMBER(10) NOT NULL
 );
 
@@ -166,7 +166,7 @@ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PA
 CREATE TABLE TBL_COL_PRIVS
 (
     TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(767) NULL,
+    "COLUMN_NAME" VARCHAR2(1000) NULL,
     CREATE_TIME NUMBER (10) NOT NULL,
     GRANT_OPTION NUMBER (5) NOT NULL,
     GRANTOR VARCHAR2(128) NULL,
@@ -222,7 +222,7 @@ CREATE TABLE SD_PARAMS
 (
     SD_ID NUMBER NOT NULL,
     PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE CLOB NULL
+    PARAM_VALUE VARCHAR2(4000) NULL
 );
 
 ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
@@ -263,7 +263,7 @@ CREATE TABLE TABLE_PARAMS
 (
     TBL_ID NUMBER NOT NULL,
     PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE CLOB NULL
+    PARAM_VALUE VARCHAR2(4000) NULL
 );
 
 ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
@@ -272,7 +272,7 @@ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARA
 CREATE TABLE SORT_COLS
 (
     SD_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(767) NULL,
+    "COLUMN_NAME" VARCHAR2(1000) NULL,
     "ORDER" NUMBER (10) NOT NULL,
     INTEGER_IDX NUMBER(10) NOT NULL
 );
@@ -325,7 +325,7 @@ CREATE TABLE SERDE_PARAMS
 (
     SERDE_ID NUMBER NOT NULL,
     PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE CLOB NULL
+    PARAM_VALUE VARCHAR2(4000) NULL
 );
 
 ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
@@ -372,7 +372,7 @@ CREATE TABLE TBLS
     OWNER VARCHAR2(767) NULL,
     RETENTION NUMBER (10) NOT NULL,
     SD_ID NUMBER NULL,
-    TBL_NAME VARCHAR2(256) NULL,
+    TBL_NAME VARCHAR2(128) NULL,
     TBL_TYPE VARCHAR2(128) NULL,
     VIEW_EXPANDED_TEXT CLOB NULL,
     VIEW_ORIGINAL_TEXT CLOB NULL,
@@ -391,7 +391,7 @@ CREATE TABLE PARTITION_EVENTS
     EVENT_TIME NUMBER NOT NULL,
     EVENT_TYPE NUMBER (10) NOT NULL,
     PARTITION_NAME VARCHAR2(767) NULL,
-    TBL_NAME VARCHAR2(256) NULL
+    TBL_NAME VARCHAR2(128) NULL
 );
 
 ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
@@ -469,8 +469,8 @@ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OI
 CREATE TABLE TAB_COL_STATS (
  CS_ID NUMBER NOT NULL,
  DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(256) NOT NULL,
- COLUMN_NAME VARCHAR2(767) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL, 
+ COLUMN_NAME VARCHAR2(1000) NOT NULL,
  COLUMN_TYPE VARCHAR2(128) NOT NULL,
  TBL_ID NUMBER NOT NULL,
  LONG_LOW_VALUE NUMBER,
@@ -504,9 +504,9 @@ CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
 CREATE TABLE PART_COL_STATS (
  CS_ID NUMBER NOT NULL,
  DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(256) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
  PARTITION_NAME VARCHAR2(767) NOT NULL,
- COLUMN_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(1000) NOT NULL,
  COLUMN_TYPE VARCHAR2(128) NOT NULL,
  PART_ID NUMBER NOT NULL,
  LONG_LOW_VALUE NUMBER,
@@ -561,7 +561,7 @@ CREATE TABLE NOTIFICATION_LOG
     EVENT_TIME NUMBER(10) NOT NULL,
     EVENT_TYPE VARCHAR2(32) NOT NULL,
     DB_NAME VARCHAR2(128),
-    TBL_NAME VARCHAR2(256),
+    TBL_NAME VARCHAR2(128),
     MESSAGE CLOB NULL,
     MESSAGE_FORMAT VARCHAR(16) NULL
 );

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/oracle/hive-schema-2.3.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.3.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-2.3.0.oracle.sql
deleted file mode 100644
index 259a2ad..0000000
--- a/metastore/scripts/upgrade/oracle/hive-schema-2.3.0.oracle.sql
+++ /dev/null
@@ -1,811 +0,0 @@
--- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE SEQUENCE_TABLE
-(
-   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
-   NEXT_VAL NUMBER NOT NULL
-);
-
-ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
-
--- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
--- This table is required if datanucleus.autoStartMechanism=SchemaTable
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE NUCLEUS_TABLES
-(
-   CLASS_NAME VARCHAR2(128) NOT NULL,
-   TABLE_NAME VARCHAR2(128) NOT NULL,
-   TYPE VARCHAR2(4) NOT NULL,
-   OWNER VARCHAR2(2) NOT NULL,
-   VERSION VARCHAR2(20) NOT NULL,
-   INTERFACE_NAME VARCHAR2(255) NULL
-);
-
-ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
-
--- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-CREATE TABLE PART_COL_PRIVS
-(
-    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(767) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_COL_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
-
--- Table CDS.
-CREATE TABLE CDS
-(
-    CD_ID NUMBER NOT NULL
-);
-
-ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
-
--- Table COLUMNS_V2 for join relationship
-CREATE TABLE COLUMNS_V2
-(
-    CD_ID NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    "COLUMN_NAME" VARCHAR2(767) NOT NULL,
-    TYPE_NAME CLOB NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
-
--- Table PARTITION_KEY_VALS for join relationship
-CREATE TABLE PARTITION_KEY_VALS
-(
-    PART_ID NUMBER NOT NULL,
-    PART_KEY_VAL VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
-
--- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE TABLE DBS
-(
-    DB_ID NUMBER NOT NULL,
-    "DESC" VARCHAR2(4000) NULL,
-    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
-    "NAME" VARCHAR2(128) NULL,
-    OWNER_NAME VARCHAR2(128) NULL,
-    OWNER_TYPE VARCHAR2(10) NULL
-);
-
-ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
-
--- Table PARTITION_PARAMS for join relationship
-CREATE TABLE PARTITION_PARAMS
-(
-    PART_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
-
--- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-CREATE TABLE SERDES
-(
-    SERDE_ID NUMBER NOT NULL,
-    "NAME" VARCHAR2(128) NULL,
-    SLIB VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
-
--- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
-CREATE TABLE TYPES
-(
-    TYPES_ID NUMBER NOT NULL,
-    TYPE_NAME VARCHAR2(128) NULL,
-    TYPE1 VARCHAR2(767) NULL,
-    TYPE2 VARCHAR2(767) NULL
-);
-
-ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
-
--- Table PARTITION_KEYS for join relationship
-CREATE TABLE PARTITION_KEYS
-(
-    TBL_ID NUMBER NOT NULL,
-    PKEY_COMMENT VARCHAR2(4000) NULL,
-    PKEY_NAME VARCHAR2(128) NOT NULL,
-    PKEY_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
-
--- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE TABLE ROLES
-(
-    ROLE_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    OWNER_NAME VARCHAR2(128) NULL,
-    ROLE_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
-
--- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
-CREATE TABLE PARTITIONS
-(
-    PART_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    PART_NAME VARCHAR2(767) NULL,
-    SD_ID NUMBER NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
-
--- Table INDEX_PARAMS for join relationship
-CREATE TABLE INDEX_PARAMS
-(
-    INDEX_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
-
--- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-CREATE TABLE TBL_COL_PRIVS
-(
-    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(767) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_COL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
-
--- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
-CREATE TABLE IDXS
-(
-    INDEX_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
-    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
-    INDEX_NAME VARCHAR2(128) NULL,
-    INDEX_TBL_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    ORIG_TBL_ID NUMBER NULL,
-    SD_ID NUMBER NULL
-);
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
-
--- Table BUCKETING_COLS for join relationship
-CREATE TABLE BUCKETING_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    BUCKET_COL_NAME VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TYPE_FIELDS for join relationship
-CREATE TABLE TYPE_FIELDS
-(
-    TYPE_NAME NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    FIELD_NAME VARCHAR2(128) NOT NULL,
-    FIELD_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
-
--- Table SD_PARAMS for join relationship
-CREATE TABLE SD_PARAMS
-(
-    SD_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE CLOB NULL
-);
-
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
-
--- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE TABLE GLOBAL_PRIVS
-(
-    USER_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    USER_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
-
--- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-CREATE TABLE SDS
-(
-    SD_ID NUMBER NOT NULL,
-    CD_ID NUMBER NULL,
-    INPUT_FORMAT VARCHAR2(4000) NULL,
-    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
-    LOCATION VARCHAR2(4000) NULL,
-    NUM_BUCKETS NUMBER (10) NOT NULL,
-    OUTPUT_FORMAT VARCHAR2(4000) NULL,
-    SERDE_ID NUMBER NULL,
-    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
-);
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
-
--- Table TABLE_PARAMS for join relationship
-CREATE TABLE TABLE_PARAMS
-(
-    TBL_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE CLOB NULL
-);
-
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
-
--- Table SORT_COLS for join relationship
-CREATE TABLE SORT_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(767) NULL,
-    "ORDER" NUMBER (10) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-CREATE TABLE TBL_PRIVS
-(
-    TBL_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
-
--- Table DATABASE_PARAMS for join relationship
-CREATE TABLE DATABASE_PARAMS
-(
-    DB_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(180) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
-
--- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
-CREATE TABLE ROLE_MAP
-(
-    ROLE_GRANT_ID NUMBER NOT NULL,
-    ADD_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    ROLE_ID NUMBER NULL
-);
-
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
-
--- Table SERDE_PARAMS for join relationship
-CREATE TABLE SERDE_PARAMS
-(
-    SERDE_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE CLOB NULL
-);
-
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
-
--- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-CREATE TABLE PART_PRIVS
-(
-    PART_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
-
--- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-CREATE TABLE DB_PRIVS
-(
-    DB_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    DB_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
-
--- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
-CREATE TABLE TBLS
-(
-    TBL_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    OWNER VARCHAR2(767) NULL,
-    RETENTION NUMBER (10) NOT NULL,
-    SD_ID NUMBER NULL,
-    TBL_NAME VARCHAR2(256) NULL,
-    TBL_TYPE VARCHAR2(128) NULL,
-    VIEW_EXPANDED_TEXT CLOB NULL,
-    VIEW_ORIGINAL_TEXT CLOB NULL,
-    IS_REWRITE_ENABLED NUMBER(1) NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
-);
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
-
--- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE TABLE PARTITION_EVENTS
-(
-    PART_NAME_ID NUMBER NOT NULL,
-    DB_NAME VARCHAR2(128) NULL,
-    EVENT_TIME NUMBER NOT NULL,
-    EVENT_TYPE NUMBER (10) NOT NULL,
-    PARTITION_NAME VARCHAR2(767) NULL,
-    TBL_NAME VARCHAR2(256) NULL
-);
-
-ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
-
--- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
-CREATE TABLE SKEWED_STRING_LIST
-(
-    STRING_LIST_ID NUMBER NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
-
-CREATE TABLE SKEWED_STRING_LIST_VALUES
-(
-    STRING_LIST_ID NUMBER NOT NULL,
-    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_NAMES
-(
-    SD_ID NUMBER NOT NULL,
-    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
-(
-    SD_ID NUMBER NOT NULL,
-    STRING_LIST_ID_KID NUMBER NOT NULL,
-    "LOCATION" VARCHAR2(4000) NULL
-);
-
-CREATE TABLE MASTER_KEYS
-(
-    KEY_ID NUMBER (10) NOT NULL,
-    MASTER_KEY VARCHAR2(767) NULL
-);
-
-CREATE TABLE DELEGATION_TOKENS
-(
-    TOKEN_IDENT VARCHAR2(767) NOT NULL,
-    TOKEN VARCHAR2(767) NULL
-);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_VALUES
-(
-    SD_ID_OID NUMBER NOT NULL,
-    STRING_LIST_ID_EID NUMBER NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
--- column statistics
-
-CREATE TABLE TAB_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(256) NOT NULL,
- COLUMN_NAME VARCHAR2(767) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- TBL_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-CREATE TABLE VERSION (
-  VER_ID NUMBER NOT NULL,
-  SCHEMA_VERSION VARCHAR(127) NOT NULL,
-  VERSION_COMMENT VARCHAR(255)
-);
-ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
-
-CREATE TABLE PART_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(256) NOT NULL,
- PARTITION_NAME VARCHAR2(767) NOT NULL,
- COLUMN_NAME VARCHAR2(767) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- PART_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
-
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
-CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
-
-CREATE TABLE FUNCS (
-  FUNC_ID NUMBER NOT NULL,
-  CLASS_NAME VARCHAR2(4000),
-  CREATE_TIME NUMBER(10) NOT NULL,
-  DB_ID NUMBER,
-  FUNC_NAME VARCHAR2(128),
-  FUNC_TYPE NUMBER(10) NOT NULL,
-  OWNER_NAME VARCHAR2(128),
-  OWNER_TYPE VARCHAR2(10)
-);
-
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
-
-CREATE TABLE FUNC_RU (
-  FUNC_ID NUMBER NOT NULL,
-  RESOURCE_TYPE NUMBER(10) NOT NULL,
-  RESOURCE_URI VARCHAR2(4000),
-  INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
-
-CREATE TABLE NOTIFICATION_LOG
-(
-    NL_ID NUMBER NOT NULL,
-    EVENT_ID NUMBER NOT NULL,
-    EVENT_TIME NUMBER(10) NOT NULL,
-    EVENT_TYPE VARCHAR2(32) NOT NULL,
-    DB_NAME VARCHAR2(128),
-    TBL_NAME VARCHAR2(256),
-    MESSAGE CLOB NULL,
-    MESSAGE_FORMAT VARCHAR(16) NULL
-);
-
-ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
-
-CREATE TABLE NOTIFICATION_SEQUENCE
-(
-    NNI_ID NUMBER NOT NULL,
-    NEXT_EVENT_ID NUMBER NOT NULL
-);
-
-ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
-
-
-
--- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
-
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table COLUMNS_V2
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
-
-
--- Constraints for table PARTITION_KEY_VALS
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
-
-
--- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
-
-
--- Constraints for table PARTITION_PARAMS
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
-
-
--- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-
--- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
-CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
-
-
--- Constraints for table PARTITION_KEYS
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
-
-
--- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
-
-
--- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
-
-CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
-
-CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
-
-
--- Constraints for table INDEX_PARAMS
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
-
-
--- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
-
-
--- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
-
-CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
-
-CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
-
-
--- Constraints for table BUCKETING_COLS
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
-
-
--- Constraints for table TYPE_FIELDS
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
-
-
--- Constraints for table SD_PARAMS
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
-
-
--- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
-CREATE INDEX SDS_N50 ON SDS (CD_ID);
-
-
--- Constraints for table TABLE_PARAMS
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
-
-
--- Constraints for table SORT_COLS
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
-
-
--- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
-
-CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DATABASE_PARAMS
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
-
-
--- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
-
-CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SERDE_PARAMS
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
-
-
--- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
-
-
--- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
-
-
--- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
-
-CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
-
-CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
-
-
--- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
-
-
--- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
-
-CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
-
-CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
-
-
--- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
-
-CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
-
-CREATE TABLE KEY_CONSTRAINTS
-(
-  CHILD_CD_ID NUMBER,
-  CHILD_INTEGER_IDX NUMBER,
-  CHILD_TBL_ID NUMBER,
-  PARENT_CD_ID NUMBER NOT NULL,
-  PARENT_INTEGER_IDX NUMBER NOT NULL,
-  PARENT_TBL_ID NUMBER NOT NULL,
-  POSITION NUMBER NOT NULL,
-  CONSTRAINT_NAME VARCHAR(400) NOT NULL,
-  CONSTRAINT_TYPE NUMBER NOT NULL,
-  UPDATE_RULE NUMBER,
-  DELETE_RULE NUMBER,
-  ENABLE_VALIDATE_RELY NUMBER NOT NULL
-) ;
-
-ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
-
-CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
-
-
-------------------------------
--- Transaction and lock tables
-------------------------------
-@hive-txn-schema-2.3.0.oracle.sql;
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.3.0', 'Hive release version 2.3.0');


[36/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/main/java/org/hadoop/hive/jdbc/SSLTestUtils.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/main/java/org/hadoop/hive/jdbc/SSLTestUtils.java b/itests/hive-unit/src/main/java/org/hadoop/hive/jdbc/SSLTestUtils.java
deleted file mode 100644
index 6cbcf8c..0000000
--- a/itests/hive-unit/src/main/java/org/hadoop/hive/jdbc/SSLTestUtils.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.hadoop.hive.jdbc;
-
-import java.io.File;
-import java.net.URLEncoder;
-import java.sql.Connection;
-import java.sql.Statement;
-import java.util.Map;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-
-
-public class SSLTestUtils {
-
-  private static final String LOCALHOST_KEY_STORE_NAME = "keystore.jks";
-  private static final String TRUST_STORE_NAME = "truststore.jks";
-  private static final String KEY_STORE_TRUST_STORE_PASSWORD = "HiveJdbc";
-  private static final String HS2_BINARY_MODE = "binary";
-  private static final String HS2_HTTP_MODE = "http";
-  private static final String HS2_HTTP_ENDPOINT = "cliservice";
-  private static final String HS2_BINARY_AUTH_MODE = "NONE";
-
-  private static final HiveConf conf = new HiveConf();
-  private static final String dataFileDir = !System.getProperty("test.data.files", "").isEmpty() ? System.getProperty(
-          "test.data.files") : conf.get("test.data.files").replace('\\', '/').replace("c:", "");
-
-  public static final String SSL_CONN_PARAMS = "ssl=true;sslTrustStore="
-      + URLEncoder.encode(dataFileDir + File.separator + TRUST_STORE_NAME) + ";trustStorePassword="
-      + KEY_STORE_TRUST_STORE_PASSWORD;
-
-  public static void setSslConfOverlay(Map<String, String> confOverlay) {
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_USE_SSL.varname, "true");
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname,
-            dataFileDir + File.separator + LOCALHOST_KEY_STORE_NAME);
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname,
-            KEY_STORE_TRUST_STORE_PASSWORD);
-  }
-
-  public static void setMetastoreSslConf(HiveConf conf) {
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_METASTORE_USE_SSL, true);
-    conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH,
-            dataFileDir + File.separator + LOCALHOST_KEY_STORE_NAME);
-    conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD,
-            KEY_STORE_TRUST_STORE_PASSWORD);
-    conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH,
-            dataFileDir + File.separator + TRUST_STORE_NAME);
-    conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD,
-            KEY_STORE_TRUST_STORE_PASSWORD);
-  }
-
-  public static void clearSslConfOverlay(Map<String, String> confOverlay) {
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_USE_SSL.varname, "false");
-  }
-
-  public static void setHttpConfOverlay(Map<String, String> confOverlay) {
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE.varname, HS2_HTTP_MODE);
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH.varname, HS2_HTTP_ENDPOINT);
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "true");
-  }
-
-  public static void setBinaryConfOverlay(Map<String, String> confOverlay) {
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE.varname, HS2_BINARY_MODE);
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION.varname, HS2_BINARY_AUTH_MODE);
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "true");
-  }
-
-  public static void setupTestTableWithData(String tableName, Path dataFilePath,
-      Connection hs2Conn) throws Exception {
-    Statement stmt = hs2Conn.createStatement();
-    stmt.execute("set hive.support.concurrency = false");
-
-    stmt.execute("drop table if exists " + tableName);
-    stmt.execute("create table " + tableName
-        + " (under_col int comment 'the under column', value string)");
-
-    // load data
-    stmt.execute("load data local inpath '"
-        + dataFilePath.toString() + "' into table " + tableName);
-    stmt.close();
-  }
-
-  public static String getDataFileDir() {
-    return dataFileDir;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
index bc00d11..c6a906a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
@@ -26,6 +26,8 @@ public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore {
   @Override
   protected void setUp() throws Exception {
     super.setUp();
+    hiveConf.setBoolean(
+        HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, true);
     warehouse = new Warehouse(hiveConf);
     try {
       client = new HiveMetaStoreClient(hiveConf);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index b95c25c..af125c3 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -171,6 +171,14 @@ public abstract class TestHiveMetaStore extends TestCase {
       db = client.getDatabase(dbName);
       Path dbPath = new Path(db.getLocationUri());
       FileSystem fs = FileSystem.get(dbPath.toUri(), hiveConf);
+      boolean inheritPerms = hiveConf.getBoolVar(
+          HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+      FsPermission dbPermission = fs.getFileStatus(dbPath).getPermission();
+      if (inheritPerms) {
+         //Set different perms for the database dir for further tests
+         dbPermission = new FsPermission((short)488);
+         fs.setPermission(dbPath, dbPermission);
+      }
 
       client.dropType(typeName);
       Type typ1 = new Type();
@@ -231,6 +239,9 @@ public abstract class TestHiveMetaStore extends TestCase {
         tbl = client.getTable(dbName, tblName);
       }
 
+      assertEquals(dbPermission, fs.getFileStatus(new Path(tbl.getSd().getLocation()))
+          .getPermission());
+
       Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
       Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
       Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
@@ -248,12 +259,20 @@ public abstract class TestHiveMetaStore extends TestCase {
       assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
       Partition retp = client.add_partition(part);
       assertNotNull("Unable to create partition " + part, retp);
+      assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
+          .getPermission());
       Partition retp2 = client.add_partition(part2);
       assertNotNull("Unable to create partition " + part2, retp2);
+      assertEquals(dbPermission, fs.getFileStatus(new Path(retp2.getSd().getLocation()))
+          .getPermission());
       Partition retp3 = client.add_partition(part3);
       assertNotNull("Unable to create partition " + part3, retp3);
+      assertEquals(dbPermission, fs.getFileStatus(new Path(retp3.getSd().getLocation()))
+          .getPermission());
       Partition retp4 = client.add_partition(part4);
       assertNotNull("Unable to create partition " + part4, retp4);
+      assertEquals(dbPermission, fs.getFileStatus(new Path(retp4.getSd().getLocation()))
+          .getPermission());
 
       Partition part_get = client.getPartition(dbName, tblName, part.getValues());
       if(isThriftClient) {
@@ -375,6 +394,8 @@ public abstract class TestHiveMetaStore extends TestCase {
       // tested
       retp = client.add_partition(part);
       assertNotNull("Unable to create partition " + part, retp);
+      assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
+          .getPermission());
 
       // test add_partitions
 
@@ -410,8 +431,9 @@ public abstract class TestHiveMetaStore extends TestCase {
 
       // create dir for /mpart5
       Path mp5Path = new Path(mpart5.getSd().getLocation());
-      warehouse.mkdirs(mp5Path);
+      warehouse.mkdirs(mp5Path, true);
       assertTrue(fs.exists(mp5Path));
+      assertEquals(dbPermission, fs.getFileStatus(mp5Path).getPermission());
 
       // add_partitions(5,4) : err = duplicate keyvals on mpart4
       savedException = null;
@@ -2480,7 +2502,7 @@ public abstract class TestHiveMetaStore extends TestCase {
       //test params
       //test_param_2 = "50"
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-          "test_param_2 LIKE \"50\"";
+          "test_param_2 = \"50\"";
 
       tableNames = client.listTableNamesByFilter(dbName, filter, (short)-1);
       assertEquals(2, tableNames.size());
@@ -2489,31 +2511,30 @@ public abstract class TestHiveMetaStore extends TestCase {
 
       //test_param_2 = "75"
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-          "test_param_2 LIKE \"75\"";
+          "test_param_2 = \"75\"";
 
       tableNames = client.listTableNamesByFilter(dbName, filter, (short)-1);
       assertEquals(0, tableNames.size());
 
       //key_dne = "50"
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-          "key_dne LIKE \"50\"";
+          "key_dne = \"50\"";
 
       tableNames = client.listTableNamesByFilter(dbName, filter, (short)-1);
       assertEquals(0, tableNames.size());
 
       //test_param_1 != "yellow"
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-          "test_param_1 NOT LIKE \"yellow\"";
+          "test_param_1 <> \"yellow\"";
 
-      // Commenting as part of HIVE-12274 != and <> are not supported for CLOBs
-      // tableNames = client.listTableNamesByFilter(dbName, filter, (short) 2);
-      // assertEquals(2, tableNames.size());
+      tableNames = client.listTableNamesByFilter(dbName, filter, (short) 2);
+      assertEquals(2, tableNames.size());
 
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-          "test_param_1 NOT LIKE \"yellow\"";
+          "test_param_1 != \"yellow\"";
 
-      // tableNames = client.listTableNamesByFilter(dbName, filter, (short) 2);
-      // assertEquals(2, tableNames.size());
+      tableNames = client.listTableNamesByFilter(dbName, filter, (short) 2);
+      assertEquals(2, tableNames.size());
 
       //owner = "testOwner1" and (lastAccessTime = 30 or test_param_1 = "hi")
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_OWNER +
@@ -2521,7 +2542,7 @@ public abstract class TestHiveMetaStore extends TestCase {
         org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_LAST_ACCESS +
         " = 30 or " +
         org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-        "test_param_1 LIKE \"hi\")";
+        "test_param_1 = \"hi\")";
       tableNames = client.listTableNamesByFilter(dbName, filter, (short)-1);
 
       assertEquals(2, tableNames.size());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
index 1002be7..a0f18c6 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
@@ -128,7 +128,7 @@ public class TestHiveMetaStoreTxns {
     Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
         validTxns.isTxnRangeValid(5L, 10L));
 
-    validTxns = new ValidReadTxnList("10:5:4,5,6:");
+    validTxns = new ValidReadTxnList("10:5:4:5:6");
     Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
         validTxns.isTxnRangeValid(4,6));
     Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
@@ -223,15 +223,15 @@ public class TestHiveMetaStoreTxns {
   @Test
   public void stringifyValidTxns() throws Exception {
     // Test with just high water mark
-    ValidTxnList validTxns = new ValidReadTxnList("1:" + Long.MAX_VALUE + "::");
+    ValidTxnList validTxns = new ValidReadTxnList("1:" + Long.MAX_VALUE + ":");
     String asString = validTxns.toString();
-    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
+    Assert.assertEquals("1:" + Long.MAX_VALUE + ":", asString);
     validTxns = new ValidReadTxnList(asString);
     Assert.assertEquals(1, validTxns.getHighWatermark());
     Assert.assertNotNull(validTxns.getInvalidTransactions());
     Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
     asString = validTxns.toString();
-    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
+    Assert.assertEquals("1:" + Long.MAX_VALUE + ":", asString);
     validTxns = new ValidReadTxnList(asString);
     Assert.assertEquals(1, validTxns.getHighWatermark());
     Assert.assertNotNull(validTxns.getInvalidTransactions());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
index 7188af6..6d1673d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
@@ -189,13 +189,13 @@ public class TestMetastoreVersion extends TestCase {
 
   //  write the given version to metastore
   private String getVersion(HiveConf conf) throws HiveMetaException {
-    MetaStoreSchemaInfo schemInfo = new MetaStoreSchemaInfo(metaStoreRoot, "derby");
+    MetaStoreSchemaInfo schemInfo = new MetaStoreSchemaInfo(metaStoreRoot, conf, "derby");
     return getMetaStoreVersion();
   }
 
   //  write the given version to metastore
   private void setVersion(HiveConf conf, String version) throws HiveMetaException {
-    MetaStoreSchemaInfo schemInfo = new MetaStoreSchemaInfo(metaStoreRoot, "derby");
+    MetaStoreSchemaInfo schemInfo = new MetaStoreSchemaInfo(metaStoreRoot, conf, "derby");
     setMetaStoreVersion(version, "setVersion test");
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
index 3f9eec3..1ac4d01 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
@@ -151,15 +151,15 @@ public class TestReplChangeManager {
     Partition part3 = createPartition(dbName, tblName, columns, values, serdeInfo);
     client.add_partition(part3);
 
-    Path part1Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160101")), "part");
+    Path part1Path = new Path(warehouse.getPartitionPath(db, tblName, ImmutableMap.of("dt", "20160101")), "part");
     createFile(part1Path, "p1");
     String path1Chksum = ReplChangeManager.getChksumString(part1Path, fs);
 
-    Path part2Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160102")), "part");
+    Path part2Path = new Path(warehouse.getPartitionPath(db, tblName, ImmutableMap.of("dt", "20160102")), "part");
     createFile(part2Path, "p2");
     String path2Chksum = ReplChangeManager.getChksumString(part2Path, fs);
 
-    Path part3Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160103")), "part");
+    Path part3Path = new Path(warehouse.getPartitionPath(db, tblName, ImmutableMap.of("dt", "20160103")), "part");
     createFile(part3Path, "p3");
     String path3Chksum = ReplChangeManager.getChksumString(part3Path, fs);
 
@@ -221,15 +221,15 @@ public class TestReplChangeManager {
 
     client.createTable(tbl);
 
-    Path filePath1 = new Path(warehouse.getDefaultTablePath(db, tblName), "part1");
+    Path filePath1 = new Path(warehouse.getTablePath(db, tblName), "part1");
     createFile(filePath1, "f1");
     String fileChksum1 = ReplChangeManager.getChksumString(filePath1, fs);
 
-    Path filePath2 = new Path(warehouse.getDefaultTablePath(db, tblName), "part2");
+    Path filePath2 = new Path(warehouse.getTablePath(db, tblName), "part2");
     createFile(filePath2, "f2");
     String fileChksum2 = ReplChangeManager.getChksumString(filePath2, fs);
 
-    Path filePath3 = new Path(warehouse.getDefaultTablePath(db, tblName), "part3");
+    Path filePath3 = new Path(warehouse.getTablePath(db, tblName), "part3");
     createFile(filePath3, "f3");
     String fileChksum3 = ReplChangeManager.getChksumString(filePath3, fs);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAutoPurgeTables.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAutoPurgeTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAutoPurgeTables.java
deleted file mode 100644
index abf9769..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAutoPurgeTables.java
+++ /dev/null
@@ -1,436 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hive.jdbc.miniHS2.MiniHS2;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TestAutoPurgeTables {
-  private static final String driverName = "org.apache.hive.jdbc.HiveDriver";
-  private static final String testDbName = "auto_purge_test_db";
-  //private static final String testTableName = "auto_purge_test_table";
-  private static final String INSERT_OVERWRITE_COMMAND_FORMAT =
-      "insert overwrite table " + testDbName + ".%s select 1, \"test\"";
-  private static final String TRUNCATE_TABLE_COMMAND_FORMAT =
-      "truncate table " + testDbName + ".%s";
-  private static final String partitionedColumnName = "partCol";
-  private static final String partitionedColumnValue1 = "20090619";
-  private static final String INSERT_OVERWRITE_COMMAND_PARTITIONED_FORMAT =
-      "insert overwrite table " + testDbName + ".%s PARTITION ("
-          + partitionedColumnName + "=" + partitionedColumnValue1 + ")" + " select 1, \"test\"";
-  private static final String partitionedColumnValue2 = "20100720";
-  private static HiveConf conf;
-  private static Connection con;
-  private static MiniHS2 miniHS2;
-  static final private Logger LOG = LoggerFactory.getLogger("TestAutoPurgeTables");
-
-  @Rule
-  public TestName name = new TestName();
-
-  private static Connection getConnection(String url) throws SQLException {
-    Connection con1;
-    con1 = DriverManager.getConnection(url, "", "");
-    Assert.assertNotNull("Connection is null", con1);
-    Assert.assertFalse("Connection should not be closed", con1.isClosed());
-    return con1;
-  }
-
-  private static void createTestTable(Statement stmt, String isAutopurge, boolean isExternal,
-      boolean isPartitioned, String testTableName) throws SQLException {
-    String createTablePrefix;
-    if (isExternal) {
-      createTablePrefix = "create external table ";
-    } else {
-      createTablePrefix = "create table ";
-    }
-    if (isPartitioned) {
-      // create a partitioned table
-      stmt.execute(createTablePrefix + testDbName + "." + testTableName + " (id int, value string) "
-          + " partitioned by (" + partitionedColumnName + " STRING)");
-      // load data
-      stmt.execute("insert into " + testDbName + "." + testTableName + " PARTITION ("
-          + partitionedColumnName + "=" + partitionedColumnValue1
-          + ") values (1, \"dummy1\"), (2, \"dummy2\"), (3, \"dummy3\")");
-      stmt.execute("insert into " + testDbName + "." + testTableName + " PARTITION ("
-          + partitionedColumnName + "=" + partitionedColumnValue2
-          + ") values (4, \"dummy4\"), (5, \"dummy5\"), (6, \"dummy6\")");
-    } else {
-      // create a table
-      stmt.execute(createTablePrefix + testDbName + "." + testTableName + " (id int, value string)");
-      // load data
-      stmt.execute("insert into " + testDbName + "." + testTableName
-          + " values (1, \"dummy1\"), (2, \"dummy2\"), (3, \"dummy3\")");
-    }
-    if (isAutopurge != null) {
-      stmt.execute("alter table " + testDbName + "." + testTableName
-          + " set tblproperties (\"auto.purge\"=\"" + isAutopurge + "\")");
-    }
-  }
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    conf = new HiveConf(TestAutoPurgeTables.class);
-    // enable trash so it can be tested
-    conf.setFloat("fs.trash.checkpoint.interval", 30);
-    conf.setFloat("fs.trash.interval", 30);
-    // Create test database and base tables once for all the test
-    miniHS2 = new MiniHS2.Builder().withConf(conf).build();
-    miniHS2.start(new HashMap<String, String>());
-    Class.forName(driverName);
-    con = getConnection(miniHS2.getBaseJdbcURL() + ";create=true");
-    try (Statement stmt = con.createStatement()) {
-      Assert.assertNotNull("Statement is null", stmt);
-      stmt.execute("set hive.support.concurrency = false");
-      stmt.execute("drop database if exists " + testDbName + " cascade");
-      stmt.execute("create database " + testDbName);
-    }
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() {
-    Statement stmt = null;
-    try {
-      stmt = con.createStatement();
-      // drop test db and its tables and views
-      stmt.execute("set hive.support.concurrency = false");
-      stmt.execute("drop database if exists " + testDbName + " cascade");
-      FileSystem fs = FileSystem.get(conf);
-      fs.deleteOnExit(ShimLoader.getHadoopShims().getCurrentTrashPath(conf, fs));
-    } catch (SQLException | IOException e) {
-      e.printStackTrace();
-    } finally {
-      if (stmt != null) {
-        try {
-          stmt.close();
-        } catch (SQLException e) {
-          //
-        }
-      }
-      if (con != null) {
-        try {
-          con.close();
-        } catch (SQLException e) {
-          //
-        }
-      }
-      if (miniHS2 != null) {
-        miniHS2.cleanup();
-        miniHS2.stop();
-        miniHS2 = null;
-      }
-    }
-  }
-
-  @Before
-  public void afterTest() throws Exception {
-    FileSystem fs = FileSystem.get(conf);
-    Path trashDir = ShimLoader.getHadoopShims().getCurrentTrashPath(conf, fs);
-    fs.delete(trashDir, true);
-  }
-
-  /**
-   * Tests if previous table data skips trash when insert overwrite table .. is run against a table
-   * which has auto.purge property set
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("true", false, false, false, name.getMethodName());
-  }
-
-  /**
-   * Tests when auto.purge is set to a invalid string, trash should be used for insert overwrite
-   * queries
-   * 
-   * @throws Exception
-   */
-  @Test
-  public void testAutoPurgeInvalid() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("invalid", false, false, false, name.getMethodName());
-  }
-
-  /**
-   * Test when auto.purge property is not set. Data should be moved to trash for insert overwrite
-   * queries
-   * 
-   * @throws Exception
-   */
-  @Test
-  public void testAutoPurgeUnset() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(null, false, false, false, name.getMethodName());
-  }
-
-  /**
-   * Tests if the auto.purge property works correctly for external tables. Old data should skip
-   * trash when insert overwrite table .. is run when auto.purge is set to true
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testExternalTable() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("true", true, false, false, name.getMethodName());
-  }
-
-  /**
-   * Tests auto.purge when managed table is partitioned. Old data should skip trash when insert
-   * overwrite table .. is run and auto.purge property is set to true
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testPartitionedTable() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("true", false, true, false, name.getMethodName());
-  }
-
-  /**
-   * Tests auto.purge for an external, partitioned table. Old partition data should skip trash when
-   * auto.purge is set to true
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testExternalPartitionedTable() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("true", true, true, false, name.getMethodName());
-  }
-
-  /**
-   * Tests when auto.purge is set to false, older data is moved to Trash when insert overwrite table
-   * .. is run
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("false", false, false, false, name.getMethodName());
-  }
-
-  /**
-   * Tests when auto.purge is set to false on a external table, older data is moved to Trash when
-   * insert overwrite table .. is run
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testExternalNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("false", true, false, false, name.getMethodName());
-  }
-
-  /**
-   * Tests when auto.purge is set to false on a partitioned table, older data is moved to Trash when
-   * insert overwrite table .. is run
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testPartitionedNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("false", false, true, false, name.getMethodName());
-  }
-
-  /**
-   * Tests when auto.purge is set to false on a partitioned external table, older data is moved to
-   * Trash when insert overwrite table .. is run
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testPartitionedExternalNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("false", true, true, false, name.getMethodName());
-  }
-
-  //truncate on external table is not allowed
-  @Test(expected = SQLException.class)
-  public void testTruncatePartitionedExternalNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(false), true, true, true, name.getMethodName());
-  }
-
-  //truncate on external table is not allowed
-  @Test(expected = SQLException.class)
-  public void testTruncateExternalNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(false), true, false, true, name.getMethodName());
-  }
-
-  @Test
-  public void testTruncatePartitionedNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(false), false, true, true, name.getMethodName());
-  }
-
-  @Test
-  public void testTruncateNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(false), false, false, true, name.getMethodName());
-  }
-
-  @Test
-  public void testTruncateInvalidAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("invalid", false, false, true, name.getMethodName());
-  }
-
-  @Test
-  public void testTruncateUnsetAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(null, false, false, true, name.getMethodName());
-  }
-
-  //truncate on external table is not allowed
-  @Test(expected = SQLException.class)
-  public void testTruncatePartitionedExternalAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(true), true, true, true, name.getMethodName());
-  }
-
-  //truncate on external table is not allowed
-  @Test(expected = SQLException.class)
-  public void testTruncateExternalAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(true), true, false, true, name.getMethodName());
-  }
-
-  @Test
-  public void testTruncatePartitionedAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(true), false, true, true, name.getMethodName());
-  }
-
-  /**
-   * Test util method to run the insert overwrite table or truncate table test on a table
-   * 
-   * @param autoPurgePropValue - string value of the auto.purge property for the test table. Ignored
-   *          if null
-   * @param isExternal - if set creates a external table for the test
-   * @param isPartitioned - if set creates a partitioned table for the test
-   * @param isTruncateTest - if set uses truncate table command for the test. Otherwise uses Insert
-   *          overwrite table command for the test
-   * @param testTableName - test table name
-   * @throws Exception
-   */
-  private void testUtil(String autoPurgePropValue, boolean isExternal, boolean isPartitioned,
-      boolean isTruncateTest, String testTableName) throws Exception {
-    testUtil(autoPurgePropValue, isExternal, isPartitioned,
-        !"true".equalsIgnoreCase(autoPurgePropValue), isTruncateTest, testTableName);
-  }
-  /**
-   * Test util method to run the insert overwrite table or truncate table test on a table
-   * 
-   * @param isAutoPurge - If set, creates a table with auto.purge with the given value
-   * @param isExternal - if set creates a external table for the test
-   * @param isPartitioned - if set creates a partitioned table for the test
-   * @param purgeExpected - if set the assert condition for the test is such that it expectes old
-   *          table data to be moved to trash. If not creates a assert condition to make sure that
-   *          data is not moved to trash
-   * @param isTruncateTest - if set uses truncate table command for the test. Otherwise uses Insert
-   *          overwrite table command for the test
-   * @param testTableName - table name for the test table
-   * @throws Exception
-   */
-  private void testUtil(String isAutoPurge, boolean isExternal, boolean isPartitioned,
-      boolean purgeExpected, boolean isTruncateTest, String testTableName) throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      // create a test table with auto.purge = true
-      createTestTable(stmt, isAutoPurge, isExternal, isPartitioned, testTableName);
-      int numFilesInTrashBefore = getTrashFileCount();
-      String command = getCommand(isTruncateTest, isPartitioned, testTableName);
-      stmt.execute(command);
-      int numFilesInTrashAfter = getTrashFileCount();
-      if (purgeExpected) {
-        Assert.assertTrue(
-            String.format(
-                "Data should have been moved to trash. Number of files in trash: before : %d after %d",
-                numFilesInTrashBefore, numFilesInTrashAfter),
-            numFilesInTrashBefore < numFilesInTrashAfter);
-      } else {
-        Assert.assertEquals(
-            String.format(
-                "Data should not have been moved to trash. Number of files in trash: before : %d after %d",
-                numFilesInTrashBefore, numFilesInTrashAfter),
-            numFilesInTrashBefore, numFilesInTrashAfter);
-      }
-    }
-  }
-
-  private static String getCommand(boolean isTruncateTest, boolean isPartitioned, String testTableName) {
-    if (isTruncateTest) {
-      return String.format(TRUNCATE_TABLE_COMMAND_FORMAT, testTableName);
-    } else if (isPartitioned) {
-      return String.format(INSERT_OVERWRITE_COMMAND_PARTITIONED_FORMAT, testTableName);
-    } else {
-      return String.format(INSERT_OVERWRITE_COMMAND_FORMAT, testTableName);
-    }
-  }
-
-  private int getTrashFileCount() throws Exception {
-    FileSystem fs = FileSystem.get(conf);
-    Path trashDir = ShimLoader.getHadoopShims().getCurrentTrashPath(conf, fs);
-    return getFileCount(fs, trashDir);
-  }
-
-  private int getFileCount(FileSystem fs, Path path) throws Exception {
-    try {
-      int count = 0;
-      if (!fs.exists(path)) {
-        return count;
-      }
-      RemoteIterator<LocatedFileStatus> lfs = fs.listFiles(path, true);
-      while (lfs.hasNext()) {
-        LocatedFileStatus lf = lfs.next();
-        LOG.info(lf.getPath().toString());
-        if (lf.isFile()) {
-          count++;
-        }
-      }
-      return count;
-    } catch (IOException e) {
-      throw new Exception("Exception while list files on " + path, e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
index ce8fe60..bfb25aa 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
@@ -52,7 +52,6 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
   private static final String Table4Name = "table4_nondefault_nn";
   private static final String Table5Name = "table5_nondefault_nn";
   private static final String Table6Name = "table6_nondefault_nn";
-  private static final String Table7Name = "table7_nondefault_nn";
   private static final String Index1Name = "index1_table1_nondefault_nn";
   private static final String Index2Name = "index2_table1_nondefault_nn";
   private static final String tmpdir = System.getProperty("test.tmp.dir");
@@ -198,27 +197,6 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
     }
   }
 
-  private void alterPartitionAndCheck(Table table, String column,
-      String value, String location) throws CommandNeedRetryException, HiveException {
-    assertNotNull(location);
-    executeQuery("ALTER TABLE " + table.getTableName() +
-        " PARTITION (" + column + "='" + value + "')" +
-        " SET LOCATION '" + location + "'");
-    HashMap<String, String> partitions = new HashMap<String, String>();
-    partitions.put(column, value);
-    Partition partition = db.getPartition(table, partitions, false);
-    assertNotNull("Partition object is expected for " + table.getTableName() , partition);
-    String locationActual = partition.getLocation();
-    if (new Path(location).toUri().getScheme() != null) {
-      assertEquals("Partition should be located in the first filesystem",
-          fs.makeQualified(new Path(location)).toString(), locationActual);
-    }
-    else {
-      assertEquals("Partition should be located in the second filesystem",
-          fs2.makeQualified(new Path(location)).toString(), locationActual);
-    }
-  }
-
   private Table createTableAndCheck(String tableName, String tableLocation)
           throws CommandNeedRetryException, HiveException, URISyntaxException {
     return createTableAndCheck(null, tableName, tableLocation);
@@ -316,15 +294,6 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
     createTableAndCheck(table1, Table6Name, null);
   }
 
-  public void testAlterPartitionSetLocationNonDefaultNameNode() throws Exception {
-    assertTrue("Test suite should have been initialized", isInitialized);
-    String tableLocation = tmppathFs2 + "/" + "test_set_part_loc";
-    Table table = createTableAndCheck(Table7Name, tableLocation);
-
-    addPartitionAndCheck(table, "p", "p1", "/tmp/test/1");
-    alterPartitionAndCheck(table, "p", "p1", "/tmp/test/2");
-  }
-
   public void testCreateDatabaseWithTableNonDefaultNameNode() throws Exception {
     assertTrue("Test suite should be initialied", isInitialized );
     final String tableLocation = tmppathFs2 + "/" + Table3Name;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java
index c17ca10..0688846 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java
@@ -53,11 +53,10 @@ public class TestLocationQueries extends BaseTestQueries {
      * @return non-zero if it failed
      */
     @Override
-    public QTestProcessExecResult checkCliDriverResults(String tname) throws Exception {
+    public int checkCliDriverResults(String tname) throws Exception {
       File logFile = new File(logDir, tname + ".out");
 
       int failedCount = 0;
-      StringBuilder fileNames = new StringBuilder("Files failing the location check:");
       FileReader fr = new FileReader(logFile);
       BufferedReader in = new BufferedReader(fr);
       try {
@@ -70,20 +69,19 @@ public class TestLocationQueries extends BaseTestQueries {
             File f = new File(m.group(1));
             if (!f.getName().equals(locationSubdir)) {
               failedCount++;
-              fileNames.append(f.getName()).append("\r\n");
             }
             locationCount++;
           }
         }
         // we always have to find at least one location, otw the test is useless
         if (locationCount == 0) {
-          return QTestProcessExecResult.create(Integer.MAX_VALUE, "0 locations tested");
+          return Integer.MAX_VALUE;
         }
       } finally {
         in.close();
       }
 
-      return QTestProcessExecResult.create(failedCount, fileNames.toString());
+      return failedCount;
     }
 
     public CheckResults(String outDir, String logDir, MiniClusterType miniMr,

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java
deleted file mode 100644
index 191d4a3..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.HiveMetaStore;
-import org.apache.hive.jdbc.miniHS2.MiniHS2;
-import org.apache.hive.service.cli.HiveSQLException;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * This unit test is for testing HIVE-13884 with more complex queries and
- * hive.metastore.limit.partition.request enabled.
- * It covers cases when the query predicates can be pushed down and the
- * number of partitions can be retrieved via directSQL.
- * It also covers cases when the number of partitions cannot be retrieved
- * via directSQL, so it falls back to ORM.
- */
-public class TestMetaStoreLimitPartitionRequest {
-
-  private static final String DB_NAME = "max_partition_test_db";
-  private static final String TABLE_NAME = "max_partition_test_table";
-  private static int PARTITION_REQUEST_LIMIT = 4;
-  private static MiniHS2 miniHS2 = null;
-  private static HiveConf conf;
-  private Connection hs2Conn = null;
-  private Statement stmt;
-
-  @BeforeClass
-  public static void beforeTest() throws Exception {
-    Class.forName(MiniHS2.getJdbcDriverName());
-    conf = new HiveConf();
-    DriverManager.setLoginTimeout(0);
-
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
-    conf.setIntVar(HiveConf.ConfVars.METASTORE_LIMIT_PARTITION_REQUEST, PARTITION_REQUEST_LIMIT);
-    conf.setBoolVar(HiveConf.ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN, true);
-    conf.setBoolVar(HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL, true);
-    conf.setBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING, true);
-    conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_CBO_ENABLED, false);
-
-    miniHS2 = new MiniHS2.Builder().withConf(conf).build();
-    Map<String, String> overlayProps = new HashMap<String, String>();
-    miniHS2.start(overlayProps);
-    createDb();
-  }
-
-  private static void createDb() throws Exception {
-    Connection conn =
-        DriverManager.getConnection(miniHS2.getJdbcURL(), System.getProperty("user.name"), "bar");
-    Statement stmt2 = conn.createStatement();
-    stmt2.execute("DROP DATABASE IF EXISTS " + DB_NAME + " CASCADE");
-    stmt2.execute("CREATE DATABASE " + DB_NAME);
-    stmt2.close();
-    conn.close();
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(DB_NAME),
-        System.getProperty("user.name"), "bar");
-    stmt = hs2Conn.createStatement();
-    stmt.execute("USE " + DB_NAME);
-    createTable();
-  }
-
-  private void createTable() throws Exception {
-    String tmpTableName = TABLE_NAME + "_tmp";
-    stmt.execute("CREATE TABLE " + tmpTableName
-        + " (id string, value string, num string, ds date) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' LINES TERMINATED BY '\n' STORED AS TEXTFILE");
-    stmt.execute("INSERT OVERWRITE TABLE " + tmpTableName
-        + " VALUES ('1', 'value1', '25', '2008-04-09'), ('2', 'value2', '30', '2008-04-09'), "
-        + "('3', 'value3', '35', '2008-04-09'), ('4', 'value4', '40', '2008-04-09'), "
-        + "('5', 'value5', '25', '2008-05-09'), ('6', 'value6', '30', '2008-05-09'), "
-        + "('7', 'value7', '35', '2008-05-09'), ('8', 'value8', '40', '2008-05-09'), "
-        + "('9', 'value9', '25', '2009-04-09'), ('10', 'value10', '30', '2009-04-09'), "
-        + "('11', 'value11', '35', '2009-04-09'), ('12', 'value12', '40', '2009-04-09')");
-
-    stmt.execute("CREATE TABLE " + TABLE_NAME + " (id string, value string) PARTITIONED BY (num string, ds date)");
-    stmt.execute("INSERT OVERWRITE TABLE " + TABLE_NAME + " PARTITION (num, ds) SELECT id, value, num, ds FROM " + tmpTableName);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    String tmpTableName = TABLE_NAME + "_tmp";
-    stmt.execute("DROP TABLE IF EXISTS " + TABLE_NAME);
-    stmt.execute("DROP TABLE IF EXISTS " + tmpTableName);
-    stmt.execute("DROP TABLE IF EXISTS " + TABLE_NAME + "_num_tmp");
-
-    if (hs2Conn != null) {
-      hs2Conn.close();
-    }
-  }
-
-  @AfterClass
-  public static void afterTest() throws Exception {
-    if (miniHS2 != null && miniHS2.isStarted()) {
-      miniHS2.stop();
-    }
-  }
-
-  /* Tests with queries which can be pushed down and executed with directSQL */
-
-  @Test
-  public void testSimpleQueryWithDirectSql() throws Exception {
-    String queryString = "select value from %s where num='25' and ds='2008-04-09'";
-    executeQuery(queryString, "value1");
-  }
-
-  @Test
-  public void testMoreComplexQueryWithDirectSql() throws Exception {
-    String queryString = "select value from %s where (ds between '2009-01-01' and '2009-12-31' and num='25') or (ds between '2008-01-01' and '2008-12-31' and num='30')";
-    executeQuery(queryString, "value2", "value6", "value9");
-  }
-
-  /*
-   * Tests with queries which can be pushed down and executed with directSQL, but the number of
-   * partitions which should be fetched is bigger than the maximum set by the
-   * hive.metastore.limit.partition.request parameter.
-   */
-
-  @Test
-  public void testSimpleQueryWithDirectSqlTooManyPartitions() throws Exception {
-    String queryString = "select value from %s where ds>'2008-04-20'";
-    executeQueryExceedPartitionLimit(queryString, 8);
-  }
-
-  @Test
-  public void testMoreComplexQueryWithDirectSqlTooManyPartitions() throws Exception {
-    String queryString = "select value from %s where num='25' or (num='30' and ds between '2008-01-01' and '2008-12-31')";
-    executeQueryExceedPartitionLimit(queryString, 5);
-  }
-
-  /*
-   * Tests with queries which cannot be executed with directSQL, because of type mismatch. The type
-   * of the num column is string, but the parameters used in the where clause are numbers. After
-   * falling back to ORM, the number of partitions can be fetched by the
-   * ObjectStore.getNumPartitionsViaOrmFilter method.
-   */
-
-  @Test
-  public void testQueryWithFallbackToORM1() throws Exception {
-    String queryString = "select value from %s where num!=25 and num!=35 and num!=40";
-    executeQuery(queryString, "value2", "value6", "value10");
-  }
-
-  @Test
-  public void testQueryWithFallbackToORMTooManyPartitions1() throws Exception {
-    String queryString = "select value from %s where num=30 or num=25";
-    executeQueryExceedPartitionLimit(queryString, 6);
-  }
-
-  /*
-   * Tests with queries which cannot be executed with directSQL, because of type mismatch. The type
-   * of the num column is string, but the parameters used in the where clause are numbers. After
-   * falling back to ORM the number of partitions cannot be fetched by the
-   * ObjectStore.getNumPartitionsViaOrmFilter method. They are fetched by the
-   * ObjectStore.getPartitionNamesPrunedByExprNoTxn method.
-   */
-
-  @Test
-  public void testQueryWithFallbackToORM2() throws Exception {
-    String queryString = "select value from %s where num!=25 and ds='2008-04-09'";
-    executeQuery(queryString, "value2", "value3", "value4");
-  }
-
-  @Test
-  public void testQueryWithFallbackToORM3() throws Exception {
-    String queryString = "select value from %s where num between 26 and 31";
-    executeQuery(queryString, "value2", "value6", "value10");
-  }
-
-  @Test
-  public void testQueryWithFallbackToORMTooManyPartitions2() throws Exception {
-    String queryString = "select value from %s where num!=25 and (ds='2008-04-09' or ds='2008-05-09')";
-    executeQueryExceedPartitionLimit(queryString, 6);
-  }
-
-  @Test
-  public void testQueryWithFallbackToORMTooManyPartitions3() throws Exception {
-    String queryString = "select value from %s where num>=30";
-    executeQueryExceedPartitionLimit(queryString, 9);
-  }
-
-  @Test
-  public void testQueryWithFallbackToORMTooManyPartitions4() throws Exception {
-    String queryString = "select value from %s where num between 20 and 50";
-    executeQueryExceedPartitionLimit(queryString, 12);
-  }
-
-  /*
-   * Tests with queries which cannot be executed with directSQL, because the contain like or in.
-   * After falling back to ORM the number of partitions cannot be fetched by the
-   * ObjectStore.getNumPartitionsViaOrmFilter method. They are fetched by the
-   * ObjectStore.getPartitionNamesPrunedByExprNoTxn method.
-   */
-
-  @Test
-  public void testQueryWithInWithFallbackToORM() throws Exception {
-    setupNumTmpTable();
-    String queryString = "select value from %s a where ds='2008-04-09' and a.num in (select value from " + TABLE_NAME + "_num_tmp)";
-    executeQuery(queryString, "value1", "value2");
-  }
-
-  @Test
-  public void testQueryWithInWithFallbackToORMTooManyPartitions() throws Exception {
-    setupNumTmpTable();
-    String queryString = "select value from %s a where a.num in (select value from " + TABLE_NAME + "_num_tmp)";
-    executeQueryExceedPartitionLimit(queryString, 12);
-  }
-
-  @Test
-  public void testQueryWithInWithFallbackToORMTooManyPartitions2() throws Exception {
-    setupNumTmpTable();
-    String queryString = "select value from %s a where a.num in (select value from " + TABLE_NAME + "_num_tmp where value='25')";
-    executeQueryExceedPartitionLimit(queryString, 12);
-  }
-
-  @Test
-  public void testQueryWithLikeWithFallbackToORMTooManyPartitions() throws Exception {
-    String queryString = "select value from %s where num like '3%%'";
-    executeQueryExceedPartitionLimit(queryString, 6);
-  }
-
-  private void setupNumTmpTable() throws SQLException {
-    stmt.execute("CREATE TABLE " + TABLE_NAME + "_num_tmp (value string)");
-    stmt.execute("INSERT INTO " + TABLE_NAME + "_num_tmp VALUES ('25')");
-    stmt.execute("INSERT INTO " + TABLE_NAME + "_num_tmp VALUES ('30')");
-  }
-
-  private void executeQuery(String query, String... expectedValues) throws SQLException {
-    String queryStr = String.format(query, TABLE_NAME);
-    ResultSet result = stmt.executeQuery(queryStr);
-    assertTrue(result != null);
-    Set<String> expectedValueSet = new HashSet<>(Arrays.asList(expectedValues));
-    Set<String> resultValues = getResultValues(result);
-    String errorMsg = getWrongResultErrorMsg(queryStr, expectedValueSet.toString(), resultValues.toString());
-    assertTrue(errorMsg, resultValues.equals(expectedValueSet));
-  }
-
-  private Set<String> getResultValues(ResultSet result) throws SQLException {
-    Set<String> resultValues = new HashSet<>();
-    while(result.next()) {
-      resultValues.add(result.getString(1));
-    }
-    return resultValues;
-  }
-
-  private void executeQueryExceedPartitionLimit(String query, int expectedPartitionNumber) throws Exception {
-    try {
-      String queryStr = String.format(query, TABLE_NAME);
-      stmt.executeQuery(queryStr);
-      fail("The query should have failed, because the number of requested partitions are bigger than "
-              + PARTITION_REQUEST_LIMIT);
-    } catch (HiveSQLException e) {
-      String exceedLimitMsg = String.format(HiveMetaStore.PARTITION_NUMBER_EXCEED_LIMIT_MSG, expectedPartitionNumber,
-          TABLE_NAME, PARTITION_REQUEST_LIMIT, ConfVars.METASTORE_LIMIT_PARTITION_REQUEST.varname);
-      assertTrue(getWrongExceptionMessage(exceedLimitMsg, e.getMessage()),
-          e.getMessage().contains(exceedLimitMsg.toString()));
-    }
-  }
-
-  private String getWrongResultErrorMsg(String query, String expectedValues, String resultValues) {
-    StringBuilder errorMsg = new StringBuilder();
-    errorMsg.append("The query '");
-    errorMsg.append(query);
-    errorMsg.append("' returned wrong values. It returned the values ");
-    errorMsg.append(resultValues);
-    errorMsg.append(" instead of the expected ");
-    errorMsg.append(expectedValues);
-    return errorMsg.toString();
-  }
-
-  private String getWrongExceptionMessage(String exceedLimitMsg, String exceptionMessage) {
-    StringBuilder errorMsg = new StringBuilder();
-    errorMsg.append("The message of the exception doesn't contain the expected '");
-    errorMsg.append(exceedLimitMsg.toString());
-    errorMsg.append("'. It is: ");
-    errorMsg.append(exceptionMessage);
-    return errorMsg.toString();
-  }
-
-}


[38/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/orc_buckets.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/orc_buckets.q.out b/itests/hive-blobstore/src/test/results/clientpositive/orc_buckets.q.out
deleted file mode 100644
index e54b84e..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/orc_buckets.q.out
+++ /dev/null
@@ -1,183 +0,0 @@
-PREHOOK: query: DROP TABLE blobstore_source
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_source
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/orc_buckets/blobstore_source
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/orc_buckets/blobstore_source
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: DROP TABLE orc_buckets
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE orc_buckets
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE orc_buckets (a STRING, value DOUBLE)
-PARTITIONED BY (b STRING)
-CLUSTERED BY (a) INTO 10 BUCKETS
-STORED AS ORC
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/orc_buckets/orc_buckets
-PREHOOK: Output: database:default
-PREHOOK: Output: default@orc_buckets
-POSTHOOK: query: CREATE TABLE orc_buckets (a STRING, value DOUBLE)
-PARTITIONED BY (b STRING)
-CLUSTERED BY (a) INTO 10 BUCKETS
-STORED AS ORC
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/orc_buckets/orc_buckets
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@orc_buckets
-PREHOOK: query: INSERT OVERWRITE TABLE orc_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@orc_buckets
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@orc_buckets@b=abc
-POSTHOOK: Output: default@orc_buckets@b=ajss
-POSTHOOK: Output: default@orc_buckets@b=data
-POSTHOOK: Output: default@orc_buckets@b=def
-POSTHOOK: Output: default@orc_buckets@b=djns
-POSTHOOK: Output: default@orc_buckets@b=ne
-POSTHOOK: Output: default@orc_buckets@b=random
-POSTHOOK: Lineage: orc_buckets PARTITION(b=abc).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=abc).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=ajss).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=ajss).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=data).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=data).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=def).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=def).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=djns).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=djns).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=ne).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=ne).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=random).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=random).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-PREHOOK: query: SELECT * FROM orc_buckets
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_buckets
-PREHOOK: Input: default@orc_buckets@b=abc
-PREHOOK: Input: default@orc_buckets@b=ajss
-PREHOOK: Input: default@orc_buckets@b=data
-PREHOOK: Input: default@orc_buckets@b=def
-PREHOOK: Input: default@orc_buckets@b=djns
-PREHOOK: Input: default@orc_buckets@b=ne
-PREHOOK: Input: default@orc_buckets@b=random
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM orc_buckets
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_buckets
-POSTHOOK: Input: default@orc_buckets@b=abc
-POSTHOOK: Input: default@orc_buckets@b=ajss
-POSTHOOK: Input: default@orc_buckets@b=data
-POSTHOOK: Input: default@orc_buckets@b=def
-POSTHOOK: Input: default@orc_buckets@b=djns
-POSTHOOK: Input: default@orc_buckets@b=ne
-POSTHOOK: Input: default@orc_buckets@b=random
-#### A masked pattern was here ####
-1	10.5	abc
-3	90.23232	ajss
-6	3.002	data
-2	11.5	def
-4	89.02002	djns
-7	71.9084	ne
-5	2.99	random
-PREHOOK: query: INSERT INTO TABLE orc_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@orc_buckets
-POSTHOOK: query: INSERT INTO TABLE orc_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@orc_buckets@b=abc
-POSTHOOK: Output: default@orc_buckets@b=ajss
-POSTHOOK: Output: default@orc_buckets@b=data
-POSTHOOK: Output: default@orc_buckets@b=def
-POSTHOOK: Output: default@orc_buckets@b=djns
-POSTHOOK: Output: default@orc_buckets@b=ne
-POSTHOOK: Output: default@orc_buckets@b=random
-POSTHOOK: Lineage: orc_buckets PARTITION(b=abc).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=abc).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=ajss).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=ajss).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=data).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=data).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=def).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=def).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=djns).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=djns).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=ne).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=ne).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=random).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_buckets PARTITION(b=random).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-PREHOOK: query: SELECT * FROM orc_buckets
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_buckets
-PREHOOK: Input: default@orc_buckets@b=abc
-PREHOOK: Input: default@orc_buckets@b=ajss
-PREHOOK: Input: default@orc_buckets@b=data
-PREHOOK: Input: default@orc_buckets@b=def
-PREHOOK: Input: default@orc_buckets@b=djns
-PREHOOK: Input: default@orc_buckets@b=ne
-PREHOOK: Input: default@orc_buckets@b=random
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM orc_buckets
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_buckets
-POSTHOOK: Input: default@orc_buckets@b=abc
-POSTHOOK: Input: default@orc_buckets@b=ajss
-POSTHOOK: Input: default@orc_buckets@b=data
-POSTHOOK: Input: default@orc_buckets@b=def
-POSTHOOK: Input: default@orc_buckets@b=djns
-POSTHOOK: Input: default@orc_buckets@b=ne
-POSTHOOK: Input: default@orc_buckets@b=random
-#### A masked pattern was here ####
-1	10.5	abc
-1	10.5	abc
-3	90.23232	ajss
-3	90.23232	ajss
-6	3.002	data
-6	3.002	data
-2	11.5	def
-2	11.5	def
-4	89.02002	djns
-4	89.02002	djns
-7	71.9084	ne
-7	71.9084	ne
-5	2.99	random
-5	2.99	random

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/orc_format_nonpart.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/orc_format_nonpart.q.out b/itests/hive-blobstore/src/test/results/clientpositive/orc_format_nonpart.q.out
deleted file mode 100644
index f470568..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/orc_format_nonpart.q.out
+++ /dev/null
@@ -1,195 +0,0 @@
-PREHOOK: query: DROP TABLE blobstore_source
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_source
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/orc_format_nonpart/blobstore_source
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/orc_format_nonpart/blobstore_source
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: DROP TABLE orc_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE orc_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE EXTERNAL TABLE orc_table (a INT, b STRING, value DOUBLE) STORED AS ORC
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/orc_format_nonpart/orc_table
-PREHOOK: Output: database:default
-PREHOOK: Output: default@orc_table
-POSTHOOK: query: CREATE EXTERNAL TABLE orc_table (a INT, b STRING, value DOUBLE) STORED AS ORC
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/orc_format_nonpart/orc_table
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@orc_table
-PREHOOK: query: INSERT OVERWRITE TABLE orc_table
-SELECT * FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@orc_table
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_table
-SELECT * FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@orc_table
-PREHOOK: query: SELECT * FROM orc_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM orc_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-1	abc	10.5
-2	def	11.5
-3	ajss	90.23232
-4	djns	89.02002
-5	random	2.99
-6	data	3.002
-7	ne	71.9084
-PREHOOK: query: SELECT a FROM orc_table GROUP BY a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a FROM orc_table GROUP BY a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-1
-2
-3
-4
-5
-6
-7
-PREHOOK: query: SELECT b FROM orc_table GROUP BY b
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT b FROM orc_table GROUP BY b
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-abc
-ajss
-data
-def
-djns
-ne
-random
-PREHOOK: query: SELECT value FROM orc_table GROUP BY value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT value FROM orc_table GROUP BY value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-2.99
-3.002
-10.5
-11.5
-71.9084
-89.02002
-90.23232
-PREHOOK: query: INSERT INTO TABLE orc_table
-SELECT * FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@orc_table
-POSTHOOK: query: INSERT INTO TABLE orc_table
-SELECT * FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@orc_table
-PREHOOK: query: SELECT * FROM orc_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM orc_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-1	abc	10.5
-2	def	11.5
-3	ajss	90.23232
-4	djns	89.02002
-5	random	2.99
-6	data	3.002
-7	ne	71.9084
-1	abc	10.5
-2	def	11.5
-3	ajss	90.23232
-4	djns	89.02002
-5	random	2.99
-6	data	3.002
-7	ne	71.9084
-PREHOOK: query: SELECT a FROM orc_table GROUP BY a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a FROM orc_table GROUP BY a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-1
-2
-3
-4
-5
-6
-7
-PREHOOK: query: SELECT b FROM orc_table GROUP BY b
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT b FROM orc_table GROUP BY b
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-abc
-ajss
-data
-def
-djns
-ne
-random
-PREHOOK: query: SELECT value FROM orc_table GROUP BY value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT value FROM orc_table GROUP BY value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_table
-#### A masked pattern was here ####
-2.99
-3.002
-10.5
-11.5
-71.9084
-89.02002
-90.23232

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/orc_format_part.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/orc_format_part.q.out b/itests/hive-blobstore/src/test/results/clientpositive/orc_format_part.q.out
deleted file mode 100644
index 5d1319f..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/orc_format_part.q.out
+++ /dev/null
@@ -1,274 +0,0 @@
-PREHOOK: query: DROP TABLE src_events
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE src_events
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/orc_format_part/src_events
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_events
-POSTHOOK: query: CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/orc_format_part/src_events
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_events
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_events
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_events
-PREHOOK: query: DROP TABLE orc_events
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE orc_events
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE orc_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS ORC
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/orc_format_part/orc_events
-PREHOOK: Output: database:default
-PREHOOK: Output: default@orc_events
-POSTHOOK: query: CREATE TABLE orc_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS ORC
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/orc_format_part/orc_events
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@orc_events
-PREHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@orc_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@orc_events@run_date=20121121/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS orc_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@orc_events
-POSTHOOK: query: SHOW PARTITIONS orc_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@orc_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-200
-PREHOOK: query: SELECT COUNT(*) FROM orc_events WHERE run_date=20120921
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events WHERE run_date=20120921
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-50
-PREHOOK: query: SELECT COUNT(*) FROM orc_events WHERE run_date=20121121
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events WHERE run_date=20121121
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-100
-PREHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201211, game_id, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,game_id,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events@run_date=201211
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201211, game_id, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,game_id,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201211,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS orc_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@orc_events
-POSTHOOK: query: SHOW PARTITIONS orc_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@orc_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-300
-PREHOOK: query: INSERT INTO TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events@run_date=201209/game_id=39
-POSTHOOK: query: INSERT INTO TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name)
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid,event_name FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-350
-PREHOOK: query: INSERT INTO TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT INTO TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-400
-PREHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209' AND game_id=39 AND event_name='hq_change'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-350

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/orc_nonstd_partitions_loc.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/orc_nonstd_partitions_loc.q.out b/itests/hive-blobstore/src/test/results/clientpositive/orc_nonstd_partitions_loc.q.out
deleted file mode 100644
index 70e72f7..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/orc_nonstd_partitions_loc.q.out
+++ /dev/null
@@ -1,513 +0,0 @@
-PREHOOK: query: DROP TABLE src_events
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE src_events
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/orc_nonstd_partitions_loc/src_events
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_events
-POSTHOOK: query: CREATE TABLE src_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT,
-  run_date    STRING,
-  game_id     INT,
-  event_name  STRING
-)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/orc_nonstd_partitions_loc/src_events
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_events
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_events
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/events.txt' INTO TABLE src_events
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_events
-PREHOOK: query: DROP TABLE orc_events
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE orc_events
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE orc_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS ORC
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/orc_nonstd_partitions_loc/orc_events
-PREHOOK: Output: database:default
-PREHOOK: Output: default@orc_events
-POSTHOOK: query: CREATE TABLE orc_events
-(
-  log_id      BIGINT,
-  time        BIGINT,
-  uid         BIGINT,
-  user_id     BIGINT,
-  type        INT,
-  event_data  STRING,
-  session_id  STRING,
-  full_uid    BIGINT
-)
-PARTITIONED BY (run_date STRING, game_id INT, event_name STRING)
-STORED AS ORC
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/orc_nonstd_partitions_loc/orc_events
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@orc_events
-PREHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@orc_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@orc_events@run_date=20121121/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS orc_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@orc_events
-POSTHOOK: query: SHOW PARTITIONS orc_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@orc_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-200
-PREHOOK: query: ALTER TABLE orc_events ADD PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Input: ### test.blobstore.path ###/orc_nonstd_partitions_loc/orc_nonstd_loc/ns-part-1
-PREHOOK: Output: default@orc_events
-POSTHOOK: query: ALTER TABLE orc_events ADD PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Input: ### test.blobstore.path ###/orc_nonstd_partitions_loc/orc_nonstd_loc/ns-part-1
-POSTHOOK: Output: default@orc_events
-POSTHOOK: Output: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: query: SHOW PARTITIONS orc_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@orc_events
-POSTHOOK: query: SHOW PARTITIONS orc_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@orc_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-300
-PREHOOK: query: INSERT INTO TABLE orc_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT INTO TABLE orc_events PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201211'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: query: SHOW PARTITIONS orc_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@orc_events
-POSTHOOK: query: SHOW PARTITIONS orc_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@orc_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-400
-PREHOOK: query: ALTER TABLE orc_events ADD PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Input: ### test.blobstore.path ###/orc_nonstd_partitions_loc/orc_nonstd_loc/ns-part-2
-PREHOOK: Output: default@orc_events
-POSTHOOK: query: ALTER TABLE orc_events ADD PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Input: ### test.blobstore.path ###/orc_nonstd_partitions_loc/orc_nonstd_loc/ns-part-2
-POSTHOOK: Output: default@orc_events
-POSTHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: INSERT INTO TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT INTO TABLE orc_events PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=201209,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS orc_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@orc_events
-POSTHOOK: query: SHOW PARTITIONS orc_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@orc_events
-run_date=201209/game_id=39/event_name=hq_change
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-500
-PREHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@orc_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@orc_events@run_date=20121121/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS orc_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@orc_events
-POSTHOOK: query: SHOW PARTITIONS orc_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@orc_events
-run_date=201209/game_id=39/event_name=hq_change
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-500
-PREHOOK: query: ALTER TABLE orc_events ADD PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Input: ### test.blobstore.path ###/orc_nonstd_partitions_loc/orc_nonstd_loc/ns-part-3
-PREHOOK: Output: default@orc_events
-POSTHOOK: query: ALTER TABLE orc_events ADD PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-#### A masked pattern was here ####
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Input: ### test.blobstore.path ###/orc_nonstd_partitions_loc/orc_nonstd_loc/ns-part-3
-POSTHOOK: Output: default@orc_events
-POSTHOOK: Output: default@orc_events@run_date=201207/game_id=39/event_name=hq_change
-PREHOOK: query: INSERT INTO TABLE orc_events PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events@run_date=201207/game_id=39/event_name=hq_change
-POSTHOOK: query: INSERT INTO TABLE orc_events PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-SELECT log_id,time,uid,user_id,type,event_data,session_id,full_uid FROM src_events
-WHERE SUBSTR(run_date,1,6)='201209'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=201207/game_id=39/event_name=hq_change
-PREHOOK: query: SHOW PARTITIONS orc_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@orc_events
-POSTHOOK: query: SHOW PARTITIONS orc_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@orc_events
-run_date=201207/game_id=39/event_name=hq_change
-run_date=201209/game_id=39/event_name=hq_change
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-PREHOOK: Input: default@orc_events@run_date=201207/game_id=39/event_name=hq_change
-PREHOOK: Input: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: Input: default@orc_events@run_date=20120921/game_id=39/event_name=hq_change
-PREHOOK: Input: default@orc_events@run_date=20121021/game_id=39/event_name=hq_change
-PREHOOK: Input: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: Input: default@orc_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-POSTHOOK: Input: default@orc_events@run_date=201207/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@orc_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@orc_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@orc_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-550
-PREHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_events
-PREHOOK: Output: default@orc_events
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_events PARTITION (run_date, game_id, event_name)
-SELECT * FROM src_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_events
-POSTHOOK: Output: default@orc_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@orc_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Output: default@orc_events@run_date=20121121/game_id=39/event_name=hq_change
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20120921,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121021,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).event_data SIMPLE [(src_events)src_events.FieldSchema(name:event_data, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).full_uid SIMPLE [(src_events)src_events.FieldSchema(name:full_uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).log_id SIMPLE [(src_events)src_events.FieldSchema(name:log_id, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).session_id SIMPLE [(src_events)src_events.FieldSchema(name:session_id, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).time SIMPLE [(src_events)src_events.FieldSchema(name:time, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).type SIMPLE [(src_events)src_events.FieldSchema(name:type, type:int, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).uid SIMPLE [(src_events)src_events.FieldSchema(name:uid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_events PARTITION(run_date=20121121,game_id=39,event_name=hq_change).user_id SIMPLE [(src_events)src_events.FieldSchema(name:user_id, type:bigint, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS orc_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@orc_events
-POSTHOOK: query: SHOW PARTITIONS orc_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@orc_events
-run_date=201207/game_id=39/event_name=hq_change
-run_date=201209/game_id=39/event_name=hq_change
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=201211/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-PREHOOK: Input: default@orc_events@run_date=201207/game_id=39/event_name=hq_change
-PREHOOK: Input: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: Input: default@orc_events@run_date=20120921/game_id=39/event_name=hq_change
-PREHOOK: Input: default@orc_events@run_date=20121021/game_id=39/event_name=hq_change
-PREHOOK: Input: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: Input: default@orc_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-POSTHOOK: Input: default@orc_events@run_date=201207/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@orc_events@run_date=20120921/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@orc_events@run_date=20121021/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: Input: default@orc_events@run_date=20121121/game_id=39/event_name=hq_change
-#### A masked pattern was here ####
-550
-PREHOOK: query: ALTER TABLE orc_events DROP PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-PREHOOK: type: ALTERTABLE_DROPPARTS
-PREHOOK: Input: default@orc_events
-PREHOOK: Output: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-POSTHOOK: query: ALTER TABLE orc_events DROP PARTITION (run_date=201211, game_id=39, event_name='hq_change')
-POSTHOOK: type: ALTERTABLE_DROPPARTS
-POSTHOOK: Input: default@orc_events
-POSTHOOK: Output: default@orc_events@run_date=201211/game_id=39/event_name=hq_change
-PREHOOK: query: ALTER TABLE orc_events DROP PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-PREHOOK: type: ALTERTABLE_DROPPARTS
-PREHOOK: Input: default@orc_events
-PREHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-POSTHOOK: query: ALTER TABLE orc_events DROP PARTITION (run_date=201209, game_id=39, event_name='hq_change')
-POSTHOOK: type: ALTERTABLE_DROPPARTS
-POSTHOOK: Input: default@orc_events
-POSTHOOK: Output: default@orc_events@run_date=201209/game_id=39/event_name=hq_change
-PREHOOK: query: ALTER TABLE orc_events DROP PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-PREHOOK: type: ALTERTABLE_DROPPARTS
-PREHOOK: Input: default@orc_events
-PREHOOK: Output: default@orc_events@run_date=201207/game_id=39/event_name=hq_change
-POSTHOOK: query: ALTER TABLE orc_events DROP PARTITION (run_date=201207, game_id=39, event_name='hq_change')
-POSTHOOK: type: ALTERTABLE_DROPPARTS
-POSTHOOK: Input: default@orc_events
-POSTHOOK: Output: default@orc_events@run_date=201207/game_id=39/event_name=hq_change
-PREHOOK: query: SHOW PARTITIONS orc_events
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@orc_events
-POSTHOOK: query: SHOW PARTITIONS orc_events
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@orc_events
-run_date=20120921/game_id=39/event_name=hq_change
-run_date=20121021/game_id=39/event_name=hq_change
-run_date=20121121/game_id=39/event_name=hq_change
-PREHOOK: query: SELECT COUNT(*) FROM orc_events
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM orc_events
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_events
-#### A masked pattern was here ####
-200

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/rcfile_buckets.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/rcfile_buckets.q.out b/itests/hive-blobstore/src/test/results/clientpositive/rcfile_buckets.q.out
deleted file mode 100644
index 6bcfd0a..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/rcfile_buckets.q.out
+++ /dev/null
@@ -1,183 +0,0 @@
-PREHOOK: query: DROP TABLE blobstore_source
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_source
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_buckets/blobstore_source
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_buckets/blobstore_source
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: DROP TABLE rcfile_buckets
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE rcfile_buckets
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE rcfile_buckets (a STRING, value DOUBLE)
-PARTITIONED BY (b STRING)
-CLUSTERED BY (a) INTO 10 BUCKETS
-STORED AS RCFILE
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_buckets/rcfile_buckets
-PREHOOK: Output: database:default
-PREHOOK: Output: default@rcfile_buckets
-POSTHOOK: query: CREATE TABLE rcfile_buckets (a STRING, value DOUBLE)
-PARTITIONED BY (b STRING)
-CLUSTERED BY (a) INTO 10 BUCKETS
-STORED AS RCFILE
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_buckets/rcfile_buckets
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@rcfile_buckets
-PREHOOK: query: INSERT OVERWRITE TABLE rcfile_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@rcfile_buckets
-POSTHOOK: query: INSERT OVERWRITE TABLE rcfile_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@rcfile_buckets@b=abc
-POSTHOOK: Output: default@rcfile_buckets@b=ajss
-POSTHOOK: Output: default@rcfile_buckets@b=data
-POSTHOOK: Output: default@rcfile_buckets@b=def
-POSTHOOK: Output: default@rcfile_buckets@b=djns
-POSTHOOK: Output: default@rcfile_buckets@b=ne
-POSTHOOK: Output: default@rcfile_buckets@b=random
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=abc).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=abc).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=ajss).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=ajss).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=data).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=data).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=def).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=def).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=djns).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=djns).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=ne).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=ne).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=random).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=random).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-PREHOOK: query: SELECT * FROM rcfile_buckets
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_buckets
-PREHOOK: Input: default@rcfile_buckets@b=abc
-PREHOOK: Input: default@rcfile_buckets@b=ajss
-PREHOOK: Input: default@rcfile_buckets@b=data
-PREHOOK: Input: default@rcfile_buckets@b=def
-PREHOOK: Input: default@rcfile_buckets@b=djns
-PREHOOK: Input: default@rcfile_buckets@b=ne
-PREHOOK: Input: default@rcfile_buckets@b=random
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM rcfile_buckets
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_buckets
-POSTHOOK: Input: default@rcfile_buckets@b=abc
-POSTHOOK: Input: default@rcfile_buckets@b=ajss
-POSTHOOK: Input: default@rcfile_buckets@b=data
-POSTHOOK: Input: default@rcfile_buckets@b=def
-POSTHOOK: Input: default@rcfile_buckets@b=djns
-POSTHOOK: Input: default@rcfile_buckets@b=ne
-POSTHOOK: Input: default@rcfile_buckets@b=random
-#### A masked pattern was here ####
-1	10.5	abc
-3	90.23232	ajss
-6	3.002	data
-2	11.5	def
-4	89.02002	djns
-7	71.9084	ne
-5	2.99	random
-PREHOOK: query: INSERT INTO TABLE rcfile_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@rcfile_buckets
-POSTHOOK: query: INSERT INTO TABLE rcfile_buckets
-PARTITION (b)
-SELECT a, c, b FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@rcfile_buckets@b=abc
-POSTHOOK: Output: default@rcfile_buckets@b=ajss
-POSTHOOK: Output: default@rcfile_buckets@b=data
-POSTHOOK: Output: default@rcfile_buckets@b=def
-POSTHOOK: Output: default@rcfile_buckets@b=djns
-POSTHOOK: Output: default@rcfile_buckets@b=ne
-POSTHOOK: Output: default@rcfile_buckets@b=random
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=abc).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=abc).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=ajss).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=ajss).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=data).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=data).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=def).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=def).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=djns).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=djns).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=ne).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=ne).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=random).a SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: rcfile_buckets PARTITION(b=random).value SIMPLE [(blobstore_source)blobstore_source.FieldSchema(name:c, type:double, comment:null), ]
-PREHOOK: query: SELECT * FROM rcfile_buckets
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_buckets
-PREHOOK: Input: default@rcfile_buckets@b=abc
-PREHOOK: Input: default@rcfile_buckets@b=ajss
-PREHOOK: Input: default@rcfile_buckets@b=data
-PREHOOK: Input: default@rcfile_buckets@b=def
-PREHOOK: Input: default@rcfile_buckets@b=djns
-PREHOOK: Input: default@rcfile_buckets@b=ne
-PREHOOK: Input: default@rcfile_buckets@b=random
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM rcfile_buckets
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_buckets
-POSTHOOK: Input: default@rcfile_buckets@b=abc
-POSTHOOK: Input: default@rcfile_buckets@b=ajss
-POSTHOOK: Input: default@rcfile_buckets@b=data
-POSTHOOK: Input: default@rcfile_buckets@b=def
-POSTHOOK: Input: default@rcfile_buckets@b=djns
-POSTHOOK: Input: default@rcfile_buckets@b=ne
-POSTHOOK: Input: default@rcfile_buckets@b=random
-#### A masked pattern was here ####
-1	10.5	abc
-1	10.5	abc
-3	90.23232	ajss
-3	90.23232	ajss
-6	3.002	data
-6	3.002	data
-2	11.5	def
-2	11.5	def
-4	89.02002	djns
-4	89.02002	djns
-7	71.9084	ne
-7	71.9084	ne
-5	2.99	random
-5	2.99	random

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-blobstore/src/test/results/clientpositive/rcfile_format_nonpart.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/rcfile_format_nonpart.q.out b/itests/hive-blobstore/src/test/results/clientpositive/rcfile_format_nonpart.q.out
deleted file mode 100644
index 44a1f11..0000000
--- a/itests/hive-blobstore/src/test/results/clientpositive/rcfile_format_nonpart.q.out
+++ /dev/null
@@ -1,195 +0,0 @@
-PREHOOK: query: DROP TABLE blobstore_source
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE blobstore_source
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_format_nonpart/blobstore_source
-PREHOOK: Output: database:default
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: CREATE TABLE blobstore_source(a STRING, b STRING, c DOUBLE)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY ' '
-COLLECTION ITEMS TERMINATED BY '\t'
-LINES TERMINATED BY '\n'
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_format_nonpart/blobstore_source
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@blobstore_source
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/3col_data.txt' INTO TABLE blobstore_source
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@blobstore_source
-PREHOOK: query: DROP TABLE rcfile_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE rcfile_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE rcfile_table (a INT, b STRING, value DOUBLE) STORED AS RCFILE
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-PREHOOK: Input: ### test.blobstore.path ###/rcfile_format_nonpart/rcfile_table
-PREHOOK: Output: database:default
-PREHOOK: Output: default@rcfile_table
-POSTHOOK: query: CREATE TABLE rcfile_table (a INT, b STRING, value DOUBLE) STORED AS RCFILE
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Input: ### test.blobstore.path ###/rcfile_format_nonpart/rcfile_table
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@rcfile_table
-PREHOOK: query: INSERT OVERWRITE TABLE rcfile_table
-SELECT * FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@rcfile_table
-POSTHOOK: query: INSERT OVERWRITE TABLE rcfile_table
-SELECT * FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@rcfile_table
-PREHOOK: query: SELECT * FROM rcfile_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM rcfile_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-1	abc	10.5
-2	def	11.5
-3	ajss	90.23232
-4	djns	89.02002
-5	random	2.99
-6	data	3.002
-7	ne	71.9084
-PREHOOK: query: SELECT a FROM rcfile_table GROUP BY a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a FROM rcfile_table GROUP BY a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-1
-2
-3
-4
-5
-6
-7
-PREHOOK: query: SELECT b FROM rcfile_table GROUP BY b
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT b FROM rcfile_table GROUP BY b
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-abc
-ajss
-data
-def
-djns
-ne
-random
-PREHOOK: query: SELECT VALUE FROM rcfile_table GROUP BY VALUE
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT VALUE FROM rcfile_table GROUP BY VALUE
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-2.99
-3.002
-10.5
-11.5
-71.9084
-89.02002
-90.23232
-PREHOOK: query: INSERT INTO TABLE rcfile_table
-SELECT * FROM blobstore_source
-PREHOOK: type: QUERY
-PREHOOK: Input: default@blobstore_source
-PREHOOK: Output: default@rcfile_table
-POSTHOOK: query: INSERT INTO TABLE rcfile_table
-SELECT * FROM blobstore_source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@blobstore_source
-POSTHOOK: Output: default@rcfile_table
-PREHOOK: query: SELECT * FROM rcfile_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM rcfile_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-1	abc	10.5
-2	def	11.5
-3	ajss	90.23232
-4	djns	89.02002
-5	random	2.99
-6	data	3.002
-7	ne	71.9084
-1	abc	10.5
-2	def	11.5
-3	ajss	90.23232
-4	djns	89.02002
-5	random	2.99
-6	data	3.002
-7	ne	71.9084
-PREHOOK: query: SELECT a FROM rcfile_table GROUP BY a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a FROM rcfile_table GROUP BY a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-1
-2
-3
-4
-5
-6
-7
-PREHOOK: query: SELECT b FROM rcfile_table GROUP BY b
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT b FROM rcfile_table GROUP BY b
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-abc
-ajss
-data
-def
-djns
-ne
-random
-PREHOOK: query: SELECT value FROM rcfile_table GROUP BY value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT value FROM rcfile_table GROUP BY value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@rcfile_table
-#### A masked pattern was here ####
-2.99
-3.002
-10.5
-11.5
-71.9084
-89.02002
-90.23232


[25/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/hive-schema-2.3.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-schema-2.3.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-2.3.0.postgres.sql
deleted file mode 100644
index a6d976f..0000000
--- a/metastore/scripts/upgrade/postgres/hive-schema-2.3.0.postgres.sql
+++ /dev/null
@@ -1,1478 +0,0 @@
---
--- PostgreSQL database dump
---
-
-SET statement_timeout = 0;
-SET client_encoding = 'UTF8';
-SET standard_conforming_strings = off;
-SET check_function_bodies = false;
-SET client_min_messages = warning;
-SET escape_string_warning = off;
-
-SET search_path = public, pg_catalog;
-
-SET default_tablespace = '';
-
-SET default_with_oids = false;
-
---
--- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "BUCKETING_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "CDS" (
-    "CD_ID" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_V2" (
-    "CD_ID" bigint NOT NULL,
-    "COMMENT" character varying(4000),
-    "COLUMN_NAME" character varying(767) NOT NULL,
-    "TYPE_NAME" text,
-    "INTEGER_IDX" integer NOT NULL
-);
-
-
---
--- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DATABASE_PARAMS" (
-    "DB_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(180) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DBS" (
-    "DB_ID" bigint NOT NULL,
-    "DESC" character varying(4000) DEFAULT NULL::character varying,
-    "DB_LOCATION_URI" character varying(4000) NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying,
-    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
-    "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DB_PRIVS" (
-    "DB_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "GLOBAL_PRIVS" (
-    "USER_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "IDXS" (
-    "INDEX_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DEFERRED_REBUILD" boolean NOT NULL,
-    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
-    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
-    "INDEX_TBL_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "ORIG_TBL_ID" bigint,
-    "SD_ID" bigint
-);
-
-
---
--- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "INDEX_PARAMS" (
-    "INDEX_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "NUCLEUS_TABLES" (
-    "CLASS_NAME" character varying(128) NOT NULL,
-    "TABLE_NAME" character varying(128) NOT NULL,
-    "TYPE" character varying(4) NOT NULL,
-    "OWNER" character varying(2) NOT NULL,
-    "VERSION" character varying(20) NOT NULL,
-    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITIONS" (
-    "PART_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
-    "SD_ID" bigint,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_EVENTS" (
-    "PART_NAME_ID" bigint NOT NULL,
-    "DB_NAME" character varying(128),
-    "EVENT_TIME" bigint NOT NULL,
-    "EVENT_TYPE" integer NOT NULL,
-    "PARTITION_NAME" character varying(767),
-    "TBL_NAME" character varying(256)
-);
-
-
---
--- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEYS" (
-    "TBL_ID" bigint NOT NULL,
-    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
-    "PKEY_NAME" character varying(128) NOT NULL,
-    "PKEY_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEY_VALS" (
-    "PART_ID" bigint NOT NULL,
-    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_PARAMS" (
-    "PART_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_PRIVS" (
-    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_PRIVS" (
-    "PART_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLES" (
-    "ROLE_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLE_MAP" (
-    "ROLE_GRANT_ID" bigint NOT NULL,
-    "ADD_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_ID" bigint
-);
-
-
---
--- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SDS" (
-    "SD_ID" bigint NOT NULL,
-    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "IS_COMPRESSED" boolean NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
-    "NUM_BUCKETS" bigint NOT NULL,
-    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "SERDE_ID" bigint,
-    "CD_ID" bigint,
-    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
-);
-
-
---
--- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SD_PARAMS" (
-    "SD_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" text DEFAULT NULL
-);
-
-
---
--- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SEQUENCE_TABLE" (
-    "SEQUENCE_NAME" character varying(255) NOT NULL,
-    "NEXT_VAL" bigint NOT NULL
-);
-
-
---
--- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDES" (
-    "SERDE_ID" bigint NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying,
-    "SLIB" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDE_PARAMS" (
-    "SERDE_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" text DEFAULT NULL
-);
-
-
---
--- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SORT_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
-    "ORDER" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TABLE_PARAMS" (
-    "TBL_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" text DEFAULT NULL
-);
-
-
---
--- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBLS" (
-    "TBL_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "OWNER" character varying(767) DEFAULT NULL::character varying,
-    "RETENTION" bigint NOT NULL,
-    "SD_ID" bigint,
-    "TBL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "VIEW_EXPANDED_TEXT" text,
-    "VIEW_ORIGINAL_TEXT" text,
-    "IS_REWRITE_ENABLED" boolean NOT NULL
-);
-
-
---
--- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_COL_PRIVS" (
-    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_PRIVS" (
-    "TBL_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPES" (
-    "TYPES_ID" bigint NOT NULL,
-    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TYPE1" character varying(767) DEFAULT NULL::character varying,
-    "TYPE2" character varying(767) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPE_FIELDS" (
-    "TYPE_NAME" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "FIELD_NAME" character varying(128) NOT NULL,
-    "FIELD_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST" (
-    "STRING_LIST_ID" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
-    "STRING_LIST_ID" bigint NOT NULL,
-    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_NAMES" (
-    "SD_ID" bigint NOT NULL,
-    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
-    "SD_ID" bigint NOT NULL,
-    "STRING_LIST_ID_KID" bigint NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying
-);
-
-CREATE TABLE "SKEWED_VALUES" (
-    "SD_ID_OID" bigint NOT NULL,
-    "STRING_LIST_ID_EID" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE  "MASTER_KEYS"
-(
-    "KEY_ID" SERIAL,
-    "MASTER_KEY" varchar(767) NULL,
-    PRIMARY KEY ("KEY_ID")
-);
-
-CREATE TABLE  "DELEGATION_TOKENS"
-(
-    "TOKEN_IDENT" varchar(767) NOT NULL,
-    "TOKEN" varchar(767) NULL,
-    PRIMARY KEY ("TOKEN_IDENT")
-);
-
-CREATE TABLE "TAB_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "TBL_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Table structure for VERSION
---
-CREATE TABLE "VERSION" (
-  "VER_ID" bigint,
-  "SCHEMA_VERSION" character varying(127) NOT NULL,
-  "VERSION_COMMENT" character varying(255) NOT NULL
-);
-
---
--- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
- "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "PART_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Table structure for FUNCS
---
-CREATE TABLE "FUNCS" (
-  "FUNC_ID" BIGINT NOT NULL,
-  "CLASS_NAME" VARCHAR(4000),
-  "CREATE_TIME" INTEGER NOT NULL,
-  "DB_ID" BIGINT,
-  "FUNC_NAME" VARCHAR(128),
-  "FUNC_TYPE" INTEGER NOT NULL,
-  "OWNER_NAME" VARCHAR(128),
-  "OWNER_TYPE" VARCHAR(10),
-  PRIMARY KEY ("FUNC_ID")
-);
-
---
--- Table structure for FUNC_RU
---
-CREATE TABLE "FUNC_RU" (
-  "FUNC_ID" BIGINT NOT NULL,
-  "RESOURCE_TYPE" INTEGER NOT NULL,
-  "RESOURCE_URI" VARCHAR(4000),
-  "INTEGER_IDX" INTEGER NOT NULL,
-  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
-);
-
-CREATE TABLE "NOTIFICATION_LOG"
-(
-    "NL_ID" BIGINT NOT NULL,
-    "EVENT_ID" BIGINT NOT NULL,
-    "EVENT_TIME" INTEGER NOT NULL,
-    "EVENT_TYPE" VARCHAR(32) NOT NULL,
-    "DB_NAME" VARCHAR(128),
-    "TBL_NAME" VARCHAR(256),
-    "MESSAGE" text,
-    "MESSAGE_FORMAT" VARCHAR(16),
-    PRIMARY KEY ("NL_ID")
-);
-
-CREATE TABLE "NOTIFICATION_SEQUENCE"
-(
-    "NNI_ID" BIGINT NOT NULL,
-    "NEXT_EVENT_ID" BIGINT NOT NULL,
-    PRIMARY KEY ("NNI_ID")
-);
-
-CREATE TABLE "KEY_CONSTRAINTS"
-(
-  "CHILD_CD_ID" BIGINT,
-  "CHILD_INTEGER_IDX" BIGINT,
-  "CHILD_TBL_ID" BIGINT,
-  "PARENT_CD_ID" BIGINT NOT NULL,
-  "PARENT_INTEGER_IDX" BIGINT NOT NULL,
-  "PARENT_TBL_ID" BIGINT NOT NULL,
-  "POSITION" BIGINT NOT NULL,
-  "CONSTRAINT_NAME" VARCHAR(400) NOT NULL,
-  "CONSTRAINT_TYPE" SMALLINT NOT NULL,
-  "UPDATE_RULE" SMALLINT,
-  "DELETE_RULE"	SMALLINT,
-  "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL,
-  PRIMARY KEY ("CONSTRAINT_NAME", "POSITION")
-) ;
-
-CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID");
-
---
--- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "CDS"
-    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
-
-
---
--- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
-
-
---
--- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
-
-
---
--- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
-
-
---
--- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
-
-
---
--- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
-
-
---
--- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
-
-
---
--- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
-
-
---
--- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "NUCLEUS_TABLES"
-    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
-
-
---
--- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
-
-
---
--- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_EVENTS"
-    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
-
-
---
--- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
-
-
---
--- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
-
-
---
--- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
-
-
---
--- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
-
-
---
--- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
-
-
---
--- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
-
-
---
--- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
-
-
---
--- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
-
-
---
--- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
-
-
---
--- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
-
-
---
--- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SEQUENCE_TABLE"
-    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
-
-
---
--- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDES"
-    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
-
-
---
--- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
-
-
---
--- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
-
-
---
--- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
-
-
---
--- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
-
-
---
--- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
-
-
---
--- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
-
-
---
--- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
-
---
--- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
-
-
---
--- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
-
-
---
--- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
-
-
---
--- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
-
-
---
--- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
-
-
---
--- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
-
-
---
--- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
-
-
---
--- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
-
-
---
--- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
-
-
---
--- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
-
-
---
--- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
-
-
---
--- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
-
-
---
--- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
-
-
---
--- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
-
-
---
--- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
-
-
---
--- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
-
-
---
--- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
-
-
---
--- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
-
-
---
--- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
-
-
---
--- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
-
-
---
--- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
-
-
---
--- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
-
-
---
--- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
-
-
---
--- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
-
-
---
--- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
-
-
---
--- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
-
---
--- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
-
---
--- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
-
---
--- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
-
---
--- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
-
---
--- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
-
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
-
-
---
--- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
-
---
--- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
-
--- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
-ALTER TABLE ONLY "FUNCS"
-    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
-
--- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
-ALTER TABLE ONLY "FUNC_RU"
-    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
-
---
--- Name: public; Type: ACL; Schema: -; Owner: hiveuser
---
-
-REVOKE ALL ON SCHEMA public FROM PUBLIC;
-GRANT ALL ON SCHEMA public TO PUBLIC;
-
---
--- PostgreSQL database dump complete
---
-
-------------------------------
--- Transaction and lock tables
-------------------------------
-\i hive-txn-schema-2.3.0.postgres.sql;
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '2.3.0', 'Hive release version 2.3.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
deleted file mode 100644
index 49976d0..0000000
--- a/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
+++ /dev/null
@@ -1,1478 +0,0 @@
---
--- PostgreSQL database dump
---
-
-SET statement_timeout = 0;
-SET client_encoding = 'UTF8';
-SET standard_conforming_strings = off;
-SET check_function_bodies = false;
-SET client_min_messages = warning;
-SET escape_string_warning = off;
-
-SET search_path = public, pg_catalog;
-
-SET default_tablespace = '';
-
-SET default_with_oids = false;
-
---
--- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "BUCKETING_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "CDS" (
-    "CD_ID" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_V2" (
-    "CD_ID" bigint NOT NULL,
-    "COMMENT" character varying(4000),
-    "COLUMN_NAME" character varying(767) NOT NULL,
-    "TYPE_NAME" text,
-    "INTEGER_IDX" integer NOT NULL
-);
-
-
---
--- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DATABASE_PARAMS" (
-    "DB_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(180) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DBS" (
-    "DB_ID" bigint NOT NULL,
-    "DESC" character varying(4000) DEFAULT NULL::character varying,
-    "DB_LOCATION_URI" character varying(4000) NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying,
-    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
-    "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DB_PRIVS" (
-    "DB_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "GLOBAL_PRIVS" (
-    "USER_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "IDXS" (
-    "INDEX_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DEFERRED_REBUILD" boolean NOT NULL,
-    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
-    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
-    "INDEX_TBL_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "ORIG_TBL_ID" bigint,
-    "SD_ID" bigint
-);
-
-
---
--- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "INDEX_PARAMS" (
-    "INDEX_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "NUCLEUS_TABLES" (
-    "CLASS_NAME" character varying(128) NOT NULL,
-    "TABLE_NAME" character varying(128) NOT NULL,
-    "TYPE" character varying(4) NOT NULL,
-    "OWNER" character varying(2) NOT NULL,
-    "VERSION" character varying(20) NOT NULL,
-    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITIONS" (
-    "PART_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
-    "SD_ID" bigint,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_EVENTS" (
-    "PART_NAME_ID" bigint NOT NULL,
-    "DB_NAME" character varying(128),
-    "EVENT_TIME" bigint NOT NULL,
-    "EVENT_TYPE" integer NOT NULL,
-    "PARTITION_NAME" character varying(767),
-    "TBL_NAME" character varying(256)
-);
-
-
---
--- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEYS" (
-    "TBL_ID" bigint NOT NULL,
-    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
-    "PKEY_NAME" character varying(128) NOT NULL,
-    "PKEY_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEY_VALS" (
-    "PART_ID" bigint NOT NULL,
-    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_PARAMS" (
-    "PART_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_PRIVS" (
-    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_PRIVS" (
-    "PART_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLES" (
-    "ROLE_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLE_MAP" (
-    "ROLE_GRANT_ID" bigint NOT NULL,
-    "ADD_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_ID" bigint
-);
-
-
---
--- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SDS" (
-    "SD_ID" bigint NOT NULL,
-    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "IS_COMPRESSED" boolean NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
-    "NUM_BUCKETS" bigint NOT NULL,
-    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "SERDE_ID" bigint,
-    "CD_ID" bigint,
-    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
-);
-
-
---
--- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SD_PARAMS" (
-    "SD_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" text DEFAULT NULL
-);
-
-
---
--- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SEQUENCE_TABLE" (
-    "SEQUENCE_NAME" character varying(255) NOT NULL,
-    "NEXT_VAL" bigint NOT NULL
-);
-
-
---
--- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDES" (
-    "SERDE_ID" bigint NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying,
-    "SLIB" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDE_PARAMS" (
-    "SERDE_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" text DEFAULT NULL
-);
-
-
---
--- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SORT_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
-    "ORDER" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TABLE_PARAMS" (
-    "TBL_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" text DEFAULT NULL
-);
-
-
---
--- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBLS" (
-    "TBL_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "OWNER" character varying(767) DEFAULT NULL::character varying,
-    "RETENTION" bigint NOT NULL,
-    "SD_ID" bigint,
-    "TBL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "VIEW_EXPANDED_TEXT" text,
-    "VIEW_ORIGINAL_TEXT" text,
-    "IS_REWRITE_ENABLED" boolean NOT NULL
-);
-
-
---
--- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_COL_PRIVS" (
-    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_PRIVS" (
-    "TBL_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPES" (
-    "TYPES_ID" bigint NOT NULL,
-    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TYPE1" character varying(767) DEFAULT NULL::character varying,
-    "TYPE2" character varying(767) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPE_FIELDS" (
-    "TYPE_NAME" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "FIELD_NAME" character varying(128) NOT NULL,
-    "FIELD_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST" (
-    "STRING_LIST_ID" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
-    "STRING_LIST_ID" bigint NOT NULL,
-    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_NAMES" (
-    "SD_ID" bigint NOT NULL,
-    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
-    "SD_ID" bigint NOT NULL,
-    "STRING_LIST_ID_KID" bigint NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying
-);
-
-CREATE TABLE "SKEWED_VALUES" (
-    "SD_ID_OID" bigint NOT NULL,
-    "STRING_LIST_ID_EID" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE  "MASTER_KEYS"
-(
-    "KEY_ID" SERIAL,
-    "MASTER_KEY" varchar(767) NULL,
-    PRIMARY KEY ("KEY_ID")
-);
-
-CREATE TABLE  "DELEGATION_TOKENS"
-(
-    "TOKEN_IDENT" varchar(767) NOT NULL,
-    "TOKEN" varchar(767) NULL,
-    PRIMARY KEY ("TOKEN_IDENT")
-);
-
-CREATE TABLE "TAB_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "TBL_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Table structure for VERSION
---
-CREATE TABLE "VERSION" (
-  "VER_ID" bigint,
-  "SCHEMA_VERSION" character varying(127) NOT NULL,
-  "VERSION_COMMENT" character varying(255) NOT NULL
-);
-
---
--- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
- "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "PART_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Table structure for FUNCS
---
-CREATE TABLE "FUNCS" (
-  "FUNC_ID" BIGINT NOT NULL,
-  "CLASS_NAME" VARCHAR(4000),
-  "CREATE_TIME" INTEGER NOT NULL,
-  "DB_ID" BIGINT,
-  "FUNC_NAME" VARCHAR(128),
-  "FUNC_TYPE" INTEGER NOT NULL,
-  "OWNER_NAME" VARCHAR(128),
-  "OWNER_TYPE" VARCHAR(10),
-  PRIMARY KEY ("FUNC_ID")
-);
-
---
--- Table structure for FUNC_RU
---
-CREATE TABLE "FUNC_RU" (
-  "FUNC_ID" BIGINT NOT NULL,
-  "RESOURCE_TYPE" INTEGER NOT NULL,
-  "RESOURCE_URI" VARCHAR(4000),
-  "INTEGER_IDX" INTEGER NOT NULL,
-  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
-);
-
-CREATE TABLE "NOTIFICATION_LOG"
-(
-    "NL_ID" BIGINT NOT NULL,
-    "EVENT_ID" BIGINT NOT NULL,
-    "EVENT_TIME" INTEGER NOT NULL,
-    "EVENT_TYPE" VARCHAR(32) NOT NULL,
-    "DB_NAME" VARCHAR(128),
-    "TBL_NAME" VARCHAR(256),
-    "MESSAGE" text,
-    "MESSAGE_FORMAT" VARCHAR(16),
-    PRIMARY KEY ("NL_ID")
-);
-
-CREATE TABLE "NOTIFICATION_SEQUENCE"
-(
-    "NNI_ID" BIGINT NOT NULL,
-    "NEXT_EVENT_ID" BIGINT NOT NULL,
-    PRIMARY KEY ("NNI_ID")
-);
-
-CREATE TABLE "KEY_CONSTRAINTS"
-(
-  "CHILD_CD_ID" BIGINT,
-  "CHILD_INTEGER_IDX" BIGINT,
-  "CHILD_TBL_ID" BIGINT,
-  "PARENT_CD_ID" BIGINT NOT NULL,
-  "PARENT_INTEGER_IDX" BIGINT NOT NULL,
-  "PARENT_TBL_ID" BIGINT NOT NULL,
-  "POSITION" BIGINT NOT NULL,
-  "CONSTRAINT_NAME" VARCHAR(400) NOT NULL,
-  "CONSTRAINT_TYPE" SMALLINT NOT NULL,
-  "UPDATE_RULE" SMALLINT,
-  "DELETE_RULE"	SMALLINT,
-  "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL,
-  PRIMARY KEY ("CONSTRAINT_NAME", "POSITION")
-) ;
-
-CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID");
-
---
--- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "CDS"
-    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
-
-
---
--- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
-
-
---
--- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
-
-
---
--- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
-
-
---
--- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
-
-
---
--- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
-
-
---
--- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
-
-
---
--- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
-
-
---
--- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "NUCLEUS_TABLES"
-    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
-
-
---
--- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
-
-
---
--- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_EVENTS"
-    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
-
-
---
--- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
-
-
---
--- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
-
-
---
--- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
-
-
---
--- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
-
-
---
--- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
-
-
---
--- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
-
-
---
--- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
-
-
---
--- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
-
-
---
--- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
-
-
---
--- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
-
-
---
--- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SEQUENCE_TABLE"
-    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
-
-
---
--- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDES"
-    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
-
-
---
--- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
-
-
---
--- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
-
-
---
--- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
-
-
---
--- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
-
-
---
--- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
-
-
---
--- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
-
-
---
--- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
-
---
--- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
-
-
---
--- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
-
-
---
--- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
-
-
---
--- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
-
-
---
--- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
-
-
---
--- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
-
-
---
--- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
-
-
---
--- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
-
-
---
--- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
-
-
---
--- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
-
-
---
--- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
-
-
---
--- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
-
-
---
--- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
-
-
---
--- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
-
-
---
--- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
-
-
---
--- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
-
-
---
--- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
-
-
---
--- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
-
-
---
--- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
-
-
---
--- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
-
-
---
--- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
-
-
---
--- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
-
-
---
--- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
-
-
---
--- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
-
-
---
--- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
-
-
---
--- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
-
---
--- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
-
---
--- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
-
---
--- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
-
---
--- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
-
---
--- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
-
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
-
-
---
--- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
-
---
--- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
-
--- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
-ALTER TABLE ONLY "FUNCS"
-    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
-
--- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
-ALTER TABLE ONLY "FUNC_RU"
-    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
-
---
--- Name: public; Type: ACL; Schema: -; Owner: hiveuser
---
-
-REVOKE ALL ON SCHEMA public FROM PUBLIC;
-GRANT ALL ON SCHEMA public TO PUBLIC;
-
---
--- PostgreSQL database dump complete
---
-
-------------------------------
--- Transaction and lock tables
-------------------------------
-\i hive-txn-schema-3.0.0.postgres.sql;
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '3.0.0', 'Hive release version 3.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/scripts/upgrade/postgres/hive-txn-schema-2.2.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-txn-schema-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-txn-schema-2.2.0.postgres.sql
index 8471c32..63ce2ee 100644
--- a/metastore/scripts/upgrade/postgres/hive-txn-schema-2.2.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/hive-txn-schema-2.2.0.postgres.sql
@@ -40,7 +40,7 @@ CREATE TABLE TXN_COMPONENTS (
 CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TXNID bigint,
   CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(256),
+  CTC_TABLE varchar(128),
   CTC_PARTITION varchar(767)
 );
 


[48/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
index 5f3baab..339da07 100644
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
+++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
@@ -16,15 +16,20 @@
  */
 package org.apache.hadoop.hive.accumulo.predicate;
 
-import com.google.common.collect.Lists;
+import static org.junit.Assert.assertNotNull;
+
+import java.sql.Date;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants;
-import org.apache.hadoop.hive.accumulo.TestAccumuloDefaultIndexScanner;
 import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding;
 import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
 import org.apache.hadoop.hive.ql.lib.Dispatcher;
@@ -37,29 +42,22 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.udf.UDFLike;
 import org.apache.hadoop.hive.ql.udf.UDFToString;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPPlus;
-import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.sql.Date;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-
-import static org.junit.Assert.assertNotNull;
+import com.google.common.collect.Lists;
 
 /**
  *
@@ -68,14 +66,12 @@ public class TestAccumuloRangeGenerator {
 
   private AccumuloPredicateHandler handler;
   private HiveAccumuloRowIdColumnMapping rowIdMapping;
-  private Configuration conf;
 
   @Before
   public void setup() {
     handler = AccumuloPredicateHandler.getInstance();
     rowIdMapping = new HiveAccumuloRowIdColumnMapping(AccumuloHiveConstants.ROWID,
-        ColumnEncoding.STRING,"row", TypeInfoFactory.stringTypeInfo.toString());
-    conf = new Configuration(true);
+        ColumnEncoding.STRING, "row", TypeInfoFactory.stringTypeInfo.toString());
   }
 
   @Test
@@ -112,7 +108,7 @@ public class TestAccumuloRangeGenerator {
     List<Range> expectedRanges = Arrays
         .asList(new Range(new Key("f"), true, new Key("m\0"), false));
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -167,7 +163,7 @@ public class TestAccumuloRangeGenerator {
     // Should generate (-inf,+inf)
     List<Range> expectedRanges = Arrays.asList(new Range());
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -240,7 +236,7 @@ public class TestAccumuloRangeGenerator {
     // Should generate ['q', +inf)
     List<Range> expectedRanges = Arrays.asList(new Range(new Key("q"), true, null, false));
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -295,7 +291,7 @@ public class TestAccumuloRangeGenerator {
     // Should generate [f,+inf)
     List<Range> expectedRanges = Arrays.asList(new Range(new Key("f"), true, null, false));
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -353,7 +349,7 @@ public class TestAccumuloRangeGenerator {
     List<Range> expectedRanges = Arrays.asList(new Range(new Key("2014-01-01"), true, new Key(
         "2014-07-01"), false));
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -401,7 +397,7 @@ public class TestAccumuloRangeGenerator {
     ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
         new GenericUDFOPEqualOrGreaterThan(), Arrays.asList(key, cast));
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "key");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "key");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -450,7 +446,7 @@ public class TestAccumuloRangeGenerator {
     ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
         new GenericUDFOPAnd(), bothFilters);
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -468,161 +464,4 @@ public class TestAccumuloRangeGenerator {
     Object result = nodeOutput.get(both);
     Assert.assertNull(result);
   }
-
-  @Test
-  public void testRangeOverStringIndexedField() throws Exception {
-    // age >= '10'
-    ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "age", null, false);
-    ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "10");
-    List<ExprNodeDesc> children = Lists.newArrayList();
-    children.add(column);
-    children.add(constant);
-    ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
-        new GenericUDFOPEqualOrGreaterThan(), children);
-    assertNotNull(node);
-
-    // age <= '50'
-    ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "age", null,
-        false);
-    ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "50");
-    List<ExprNodeDesc> children2 = Lists.newArrayList();
-    children2.add(column2);
-    children2.add(constant2);
-    ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
-        new GenericUDFOPEqualOrLessThan(), children2);
-    assertNotNull(node2);
-
-    // And UDF
-    List<ExprNodeDesc> bothFilters = Lists.newArrayList();
-    bothFilters.add(node);
-    bothFilters.add(node2);
-    ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
-        new GenericUDFOPAnd(), bothFilters);
-
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
-    rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
-    Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
-        Collections.<Rule,NodeProcessor> emptyMap(), null);
-    GraphWalker ogw = new DefaultGraphWalker(disp);
-    ArrayList<Node> topNodes = new ArrayList<Node>();
-    topNodes.add(both);
-    HashMap<Node,Object> nodeOutput = new HashMap<Node,Object>();
-
-    try {
-      ogw.startWalking(topNodes, nodeOutput);
-    } catch (SemanticException ex) {
-      throw new RuntimeException(ex);
-    }
-
-    // Filters are using an index which should match 3 rows
-    Object result = nodeOutput.get(both);
-    if ( result instanceof  List) {
-      List results = (List) result;
-      Assert.assertEquals(3, results.size());
-      Assert.assertTrue("does not contain row1", results.contains(new Range("row1")));
-      Assert.assertTrue("does not contain row2", results.contains(new Range("row2")));
-      Assert.assertTrue("does not contain row3", results.contains(new Range("row3")));
-    } else {
-      Assert.fail("Results not a list");
-    }
-  }
-
-  @Test
-  public void testRangeOverIntegerIndexedField() throws Exception {
-    // cars >= 2
-    ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "cars", null, false);
-    ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, 2);
-    List<ExprNodeDesc> children = Lists.newArrayList();
-    children.add(column);
-    children.add(constant);
-    ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo,
-        new GenericUDFOPEqualOrGreaterThan(), children);
-    assertNotNull(node);
-
-    //  cars <= 9
-    ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "cars", null,
-        false);
-    ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, 9);
-    List<ExprNodeDesc> children2 = Lists.newArrayList();
-    children2.add(column2);
-    children2.add(constant2);
-    ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo,
-        new GenericUDFOPEqualOrLessThan(), children2);
-    assertNotNull(node2);
-
-    // And UDF
-    List<ExprNodeDesc> bothFilters = Lists.newArrayList();
-    bothFilters.add(node);
-    bothFilters.add(node2);
-    ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
-        new GenericUDFOPAnd(), bothFilters);
-
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
-    rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
-    Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
-        Collections.<Rule,NodeProcessor> emptyMap(), null);
-    GraphWalker ogw = new DefaultGraphWalker(disp);
-    ArrayList<Node> topNodes = new ArrayList<Node>();
-    topNodes.add(both);
-    HashMap<Node,Object> nodeOutput = new HashMap<Node,Object>();
-
-    try {
-      ogw.startWalking(topNodes, nodeOutput);
-    } catch (SemanticException ex) {
-      throw new RuntimeException(ex);
-    }
-
-    // Filters are using an index which should match 3 rows
-    Object result = nodeOutput.get(both);
-    if ( result instanceof  List) {
-      List results = (List) result;
-      Assert.assertEquals(3, results.size());
-      Assert.assertTrue("does not contain row1", results.contains(new Range("row1")));
-      Assert.assertTrue("does not contain row2", results.contains(new Range("row2")));
-      Assert.assertTrue("does not contain row3", results.contains(new Range("row3")));
-    } else {
-      Assert.fail("Results not a list");
-    }
-  }
-
-  @Test
-  public void testRangeOverBooleanIndexedField() throws Exception {
-    // mgr == true
-    ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.booleanTypeInfo, "mgr", null, false);
-    ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, true);
-    List<ExprNodeDesc> children = Lists.newArrayList();
-    children.add(column);
-    children.add(constant);
-    ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo,
-        new GenericUDFOPEqual(), children);
-    assertNotNull(node);
-
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
-    rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
-    Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
-        Collections.<Rule,NodeProcessor> emptyMap(), null);
-    GraphWalker ogw = new DefaultGraphWalker(disp);
-    ArrayList<Node> topNodes = new ArrayList<Node>();
-    topNodes.add(node);
-    HashMap<Node,Object> nodeOutput = new HashMap<Node,Object>();
-
-    try {
-      ogw.startWalking(topNodes, nodeOutput);
-    } catch (SemanticException ex) {
-      throw new RuntimeException(ex);
-    }
-
-    // Filters are using an index which should match 2 rows
-    Object result = nodeOutput.get(node);
-    if ( result instanceof  List) {
-      List results = (List) result;
-      Assert.assertEquals(2, results.size());
-      Assert.assertTrue("does not contain row1", results.contains( new Range( "row1")));
-      Assert.assertTrue("does not contain row3", results.contains( new Range( "row3")));
-    }
-    else {
-      Assert.fail("Results not a list");
-    }
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/test/queries/positive/accumulo_index.q
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/queries/positive/accumulo_index.q b/accumulo-handler/src/test/queries/positive/accumulo_index.q
deleted file mode 100644
index 52a33af..0000000
--- a/accumulo-handler/src/test/queries/positive/accumulo_index.q
+++ /dev/null
@@ -1,44 +0,0 @@
-DROP TABLE accumulo_index_test;
-
-CREATE TABLE accumulo_index_test (
-   rowid string,
-   active boolean,
-   num_offices tinyint,
-   num_personel smallint,
-   total_manhours int,
-   num_shareholders bigint,
-   eff_rating float,
-   err_rating double,
-   yearly_production decimal,
-   start_date date,
-   address varchar(100),
-   phone char(13),
-   last_update timestamp )
-ROW FORMAT SERDE 'org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe'
-STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
-WITH SERDEPROPERTIES (
-   "accumulo.columns.mapping" = ":rowID,a:act,a:off,a:per,a:mhs,a:shs,a:eff,a:err,a:yp,a:sd,a:addr,a:ph,a:lu",
-   "accumulo.table.name"="accumulo_index_test",
-   "accumulo.indexed.columns"="*",
-   "accumulo.indextable.name"="accumulo_index_idx"
- );
-
-
-insert into accumulo_index_test values( "row1", true, 55, 107, 555555, 1223232332,
-                                 4.5, 0.8, 1232223, "2001-10-10", "123 main street",
-                                 "555-555-5555", "2016-02-22 12:45:07.000000000");
-
-select * from accumulo_index_test where active = 'true';
-select * from accumulo_index_test where num_offices = 55;
-select * from accumulo_index_test where num_personel = 107;
-select * from accumulo_index_test where total_manhours < 555556;
-select * from accumulo_index_test where num_shareholders >= 1223232331;
-select * from accumulo_index_test where eff_rating <= 4.5;
-select * from accumulo_index_test where err_rating >= 0.8;
-select * from accumulo_index_test where yearly_production = 1232223;
-select * from accumulo_index_test where start_date = "2001-10-10";
-select * from accumulo_index_test where address >= "100 main street";
-select * from accumulo_index_test where phone <= "555-555-5555";
-select * from accumulo_index_test where last_update >= "2016-02-22 12:45:07";
-
-DROP TABLE accumulo_index_test;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/accumulo-handler/src/test/results/positive/accumulo_index.q.out
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/results/positive/accumulo_index.q.out b/accumulo-handler/src/test/results/positive/accumulo_index.q.out
deleted file mode 100644
index 5cb3d73..0000000
--- a/accumulo-handler/src/test/results/positive/accumulo_index.q.out
+++ /dev/null
@@ -1,180 +0,0 @@
-PREHOOK: query: DROP TABLE accumulo_index_test
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE accumulo_index_test
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE accumulo_index_test (
-   rowid string,
-   active boolean,
-   num_offices tinyint,
-   num_personel smallint,
-   total_manhours int,
-   num_shareholders bigint,
-   eff_rating float,
-   err_rating double,
-   yearly_production decimal,
-   start_date date,
-   address varchar(100),
-   phone char(13),
-   last_update timestamp )
-ROW FORMAT SERDE 'org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe'
-STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
-WITH SERDEPROPERTIES (
-   "accumulo.columns.mapping" = ":rowID,a:act,a:off,a:per,a:mhs,a:shs,a:eff,a:err,a:yp,a:sd,a:addr,a:ph,a:lu",
-   "accumulo.table.name"="accumulo_index_test",
-   "accumulo.indexed.columns"="*",
-   "accumulo.indextable.name"="accumulo_index_idx"
- )
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@accumulo_index_test
-POSTHOOK: query: CREATE TABLE accumulo_index_test (
-   rowid string,
-   active boolean,
-   num_offices tinyint,
-   num_personel smallint,
-   total_manhours int,
-   num_shareholders bigint,
-   eff_rating float,
-   err_rating double,
-   yearly_production decimal,
-   start_date date,
-   address varchar(100),
-   phone char(13),
-   last_update timestamp )
-ROW FORMAT SERDE 'org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe'
-STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
-WITH SERDEPROPERTIES (
-   "accumulo.columns.mapping" = ":rowID,a:act,a:off,a:per,a:mhs,a:shs,a:eff,a:err,a:yp,a:sd,a:addr,a:ph,a:lu",
-   "accumulo.table.name"="accumulo_index_test",
-   "accumulo.indexed.columns"="*",
-   "accumulo.indextable.name"="accumulo_index_idx"
- )
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@accumulo_index_test
-PREHOOK: query: insert into accumulo_index_test values( "row1", true, 55, 107, 555555, 1223232332,
-                                 4.5, 0.8, 1232223, "2001-10-10", "123 main street",
-                                 "555-555-5555", "2016-02-22 12:45:07.000000000")
-PREHOOK: type: QUERY
-PREHOOK: Output: default@accumulo_index_test
-POSTHOOK: query: insert into accumulo_index_test values( "row1", true, 55, 107, 555555, 1223232332,
-                                 4.5, 0.8, 1232223, "2001-10-10", "123 main street",
-                                 "555-555-5555", "2016-02-22 12:45:07.000000000")
-POSTHOOK: type: QUERY
-POSTHOOK: Output: default@accumulo_index_test
-PREHOOK: query: select * from accumulo_index_test where active = 'true'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where active = 'true'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where num_offices = 55
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where num_offices = 55
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where num_personel = 107
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where num_personel = 107
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where total_manhours < 555556
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where total_manhours < 555556
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where num_shareholders >= 1223232331
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where num_shareholders >= 1223232331
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where eff_rating <= 4.5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where eff_rating <= 4.5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where err_rating >= 0.8
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where err_rating >= 0.8
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where yearly_production = 1232223
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where yearly_production = 1232223
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where start_date = "2001-10-10"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where start_date = "2001-10-10"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where address >= "100 main street"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where address >= "100 main street"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where phone <= "555-555-5555"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where phone <= "555-555-5555"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: select * from accumulo_index_test where last_update >= "2016-02-22 12:45:07"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from accumulo_index_test where last_update >= "2016-02-22 12:45:07"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@accumulo_index_test
-#### A masked pattern was here ####
-row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
-PREHOOK: query: DROP TABLE accumulo_index_test
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@accumulo_index_test
-PREHOOK: Output: default@accumulo_index_test
-POSTHOOK: query: DROP TABLE accumulo_index_test
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@accumulo_index_test
-POSTHOOK: Output: default@accumulo_index_test

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/pom.xml
----------------------------------------------------------------------
diff --git a/beeline/pom.xml b/beeline/pom.xml
index b0a9a0b..58ca92e 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/src/java/org/apache/hive/beeline/BeeLine.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index a589f33..3c8fccc 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -22,7 +22,6 @@
  */
 package org.apache.hive.beeline;
 
-import java.io.BufferedReader;
 import java.io.ByteArrayInputStream;
 import java.io.Closeable;
 import java.io.EOFException;
@@ -30,7 +29,6 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.InputStreamReader;
 import java.io.PrintStream;
 import java.io.SequenceInputStream;
 import java.lang.reflect.InvocationTargetException;
@@ -61,7 +59,6 @@ import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.ListIterator;
 import java.util.Map;
@@ -151,10 +148,6 @@ public class BeeLine implements Closeable {
   // Indicates if this instance of beeline is running in compatibility mode, or beeline mode
   private boolean isBeeLine = true;
 
-  // Indicates that we are in test mode.
-  // Print only the errors, the operation log and the query results.
-  private boolean isTestMode = false;
-
   private static final Options options = new Options();
 
   public static final String BEELINE_DEFAULT_JDBC_DRIVER = "org.apache.hive.jdbc.HiveDriver";
@@ -545,7 +538,6 @@ public class BeeLine implements Closeable {
       public void run() {
         try {
           if (history != null) {
-            history.setMaxSize(getOpts().getMaxHistoryRows());
             history.flush();
           }
         } catch (IOException e) {
@@ -1387,55 +1379,6 @@ public class BeeLine implements Closeable {
     return lineTrimmed.startsWith("#") || lineTrimmed.startsWith("--");
   }
 
-  String[] getCommands(File file) throws IOException {
-    List<String> cmds = new LinkedList<String>();
-    try (BufferedReader reader =
-             new BufferedReader(new InputStreamReader(new FileInputStream(file), "UTF-8"))) {
-      StringBuilder cmd = null;
-      while (true) {
-        String scriptLine = reader.readLine();
-
-        if (scriptLine == null) {
-          break;
-        }
-
-        String trimmedLine = scriptLine.trim();
-        if (getOpts().getTrimScripts()) {
-          scriptLine = trimmedLine;
-        }
-
-        if (cmd != null) {
-          // we're continuing an existing command
-          cmd.append("\n");
-          cmd.append(scriptLine);
-          if (trimmedLine.endsWith(";")) {
-            // this command has terminated
-            cmds.add(cmd.toString());
-            cmd = null;
-          }
-        } else {
-          // we're starting a new command
-          if (needsContinuation(scriptLine)) {
-            // multi-line
-            cmd = new StringBuilder(scriptLine);
-          } else {
-            // single-line
-            cmds.add(scriptLine);
-          }
-        }
-      }
-
-      if (cmd != null) {
-        // ### REVIEW: oops, somebody left the last command
-        // unterminated; should we fix it for them or complain?
-        // For now be nice and fix it.
-        cmd.append(";");
-        cmds.add(cmd.toString());
-      }
-    }
-    return cmds.toArray(new String[0]);
-  }
-
   /**
    * Print the specified message to the console
    *
@@ -2442,19 +2385,4 @@ public class BeeLine implements Closeable {
   public void setCurrentDatabase(String currentDatabase) {
     this.currentDatabase = currentDatabase;
   }
-
-  /**
-   * Setting the BeeLine into test mode.
-   * Print only the errors, the operation log and the query results.
-   * Should be used only by tests.
-   *
-   * @param isTestMode
-   */
-  void setIsTestMode(boolean isTestMode) {
-    this.isTestMode = isTestMode;
-  }
-
-  boolean isTestMode() {
-    return isTestMode;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java b/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java
index f85d8a3..7e6846d 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java
@@ -45,7 +45,6 @@ import jline.Terminal;
 import jline.TerminalFactory;
 import jline.console.completer.Completer;
 import jline.console.completer.StringsCompleter;
-import jline.console.history.MemoryHistory;
 import org.apache.hadoop.hive.conf.HiveConf;
 
 class BeeLineOpts implements Completer {
@@ -62,7 +61,7 @@ class BeeLineOpts implements Completer {
   public static final int DEFAULT_MAX_COLUMN_WIDTH = 50;
   public static final int DEFAULT_INCREMENTAL_BUFFER_ROWS = 1000;
 
-  public static final String URL_ENV_PREFIX = "BEELINE_URL_";
+  public static String URL_ENV_PREFIX = "BEELINE_URL_";
 
   private final BeeLine beeLine;
   private boolean autosave = false;
@@ -101,7 +100,6 @@ class BeeLineOpts implements Completer {
 
   private final File rcFile = new File(saveDir(), "beeline.properties");
   private String historyFile = new File(saveDir(), "history").getAbsolutePath();
-  private int maxHistoryRows = MemoryHistory.DEFAULT_MAX_SIZE;
 
   private String scriptFile = null;
   private String[] initFiles = null;
@@ -433,17 +431,6 @@ class BeeLineOpts implements Completer {
     return historyFile;
   }
 
-  /**
-   * @param numRows - the number of rows to store in history file
-   */
-  public void setMaxHistoryRows(int numRows) {
-    this.maxHistoryRows = numRows;
-  }
-
-  public int getMaxHistoryRows() {
-    return maxHistoryRows;
-  }
-
   public void setScriptFile(String scriptFile) {
     this.scriptFile = scriptFile;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/src/java/org/apache/hive/beeline/Commands.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java b/beeline/src/java/org/apache/hive/beeline/Commands.java
index 08d53ca..99ee82c 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -55,6 +55,7 @@ import java.util.TreeSet;
 
 import org.apache.hadoop.hive.common.cli.ShellCmdExecutor;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.conf.HiveVariableSource;
 import org.apache.hadoop.hive.conf.SystemVariables;
 import org.apache.hadoop.hive.conf.VariableSubstitution;
@@ -977,8 +978,7 @@ public class Commands {
           hasResults = ((CallableStatement) stmnt).execute();
         } else {
           stmnt = beeLine.createStatement();
-          // In test mode we want the operation logs regardless of the settings
-          if (!beeLine.isTestMode() && beeLine.getOpts().isSilent()) {
+          if (beeLine.getOpts().isSilent()) {
             hasResults = stmnt.execute(sql);
           } else {
             InPlaceUpdateStream.EventNotifier eventNotifier =
@@ -1221,61 +1221,46 @@ public class Commands {
     if (entireLineAsCommand) {
       cmdList.add(line);
     } else {
-      StringBuilder command = new StringBuilder();
+      StringBuffer command = new StringBuffer();
 
-      // Marker to track if there is starting double quote without an ending double quote
       boolean hasUnterminatedDoubleQuote = false;
+      boolean hasUntermindatedSingleQuote = false;
 
-      // Marker to track if there is starting single quote without an ending double quote
-      boolean hasUnterminatedSingleQuote = false;
-
-      // Index of the last seen semicolon in the given line
       int lastSemiColonIndex = 0;
       char[] lineChars = line.toCharArray();
 
-      // Marker to track if the previous character was an escape character
       boolean wasPrevEscape = false;
-
       int index = 0;
-
-      // Iterate through the line and invoke the addCmdPart method whenever a semicolon is seen that is not inside a
-      // quoted string
       for (; index < lineChars.length; index++) {
         switch (lineChars[index]) {
           case '\'':
-            // If a single quote is seen and the index is not inside a double quoted string and the previous character
-            // was not an escape, then update the hasUnterminatedSingleQuote flag
             if (!hasUnterminatedDoubleQuote && !wasPrevEscape) {
-              hasUnterminatedSingleQuote = !hasUnterminatedSingleQuote;
+              hasUntermindatedSingleQuote = !hasUntermindatedSingleQuote;
             }
             wasPrevEscape = false;
             break;
           case '\"':
-            // If a double quote is seen and the index is not inside a single quoted string and the previous character
-            // was not an escape, then update the hasUnterminatedDoubleQuote flag
-            if (!hasUnterminatedSingleQuote && !wasPrevEscape) {
+            if (!hasUntermindatedSingleQuote && !wasPrevEscape) {
               hasUnterminatedDoubleQuote = !hasUnterminatedDoubleQuote;
             }
             wasPrevEscape = false;
             break;
           case ';':
-            // If a semicolon is seen, and the line isn't inside a quoted string, then treat
-            // line[lastSemiColonIndex] to line[index] as a single command
-            if (!hasUnterminatedDoubleQuote && !hasUnterminatedSingleQuote) {
+            if (!hasUnterminatedDoubleQuote && !hasUntermindatedSingleQuote) {
               addCmdPart(cmdList, command, line.substring(lastSemiColonIndex, index));
               lastSemiColonIndex = index + 1;
             }
             wasPrevEscape = false;
             break;
           case '\\':
-            wasPrevEscape = !wasPrevEscape;
+            wasPrevEscape = true;
             break;
           default:
             wasPrevEscape = false;
             break;
         }
       }
-      // If the line doesn't end with a ; or if the line is empty, add the cmd part
+      // if the line doesn't end with a ; or if the line is empty, add the cmd part
       if (lastSemiColonIndex != index || lineChars.length == 0) {
         addCmdPart(cmdList, command, line.substring(lastSemiColonIndex, index));
       }
@@ -1287,7 +1272,7 @@ public class Commands {
    * Given a cmdpart (e.g. if a command spans multiple lines), add to the current command, and if
    * applicable add that command to the {@link List} of commands
    */
-  private void addCmdPart(List<String> cmdList, StringBuilder command, String cmdpart) {
+  private void addCmdPart(List<String> cmdList, StringBuffer command, String cmdpart) {
     if (cmdpart.endsWith("\\")) {
       command.append(cmdpart.substring(0, cmdpart.length() - 1)).append(";");
       return;
@@ -1342,12 +1327,7 @@ public class Commands {
       try {
         List<String> queryLogs = hiveStatement.getQueryLog();
         for (String log : queryLogs) {
-          if (!commands.beeLine.isTestMode()) {
-            commands.beeLine.info(log);
-          } else {
-            // In test mode print the logs to the output
-            commands.beeLine.output(log);
-          }
+          commands.beeLine.info(log);
         }
         if (!queryLogs.isEmpty()) {
           notifier.operationLogShowedToUser();
@@ -1391,12 +1371,7 @@ public class Commands {
           return;
         }
         for (String log : logs) {
-          if (!beeLine.isTestMode()) {
-            beeLine.info(log);
-          } else {
-            // In test mode print the logs to the output
-            beeLine.output(log);
-          }
+          beeLine.info(log);
         }
       } while (logs.size() > 0);
     } else {
@@ -1798,10 +1773,60 @@ public class Commands {
       return false;
     }
 
+    List<String> cmds = new LinkedList<String>();
+
     try {
-      String[] cmds = beeLine.getCommands(new File(parts[1]));
+      BufferedReader reader = new BufferedReader(new FileReader(
+          parts[1]));
+      try {
+        // ### NOTE: fix for sf.net bug 879427
+        StringBuilder cmd = null;
+        for (;;) {
+          String scriptLine = reader.readLine();
+
+          if (scriptLine == null) {
+            break;
+          }
+
+          String trimmedLine = scriptLine.trim();
+          if (beeLine.getOpts().getTrimScripts()) {
+            scriptLine = trimmedLine;
+          }
+
+          if (cmd != null) {
+            // we're continuing an existing command
+            cmd.append(" \n");
+            cmd.append(scriptLine);
+            if (trimmedLine.endsWith(";")) {
+              // this command has terminated
+              cmds.add(cmd.toString());
+              cmd = null;
+            }
+          } else {
+            // we're starting a new command
+            if (beeLine.needsContinuation(scriptLine)) {
+              // multi-line
+              cmd = new StringBuilder(scriptLine);
+            } else {
+              // single-line
+              cmds.add(scriptLine);
+            }
+          }
+        }
+
+        if (cmd != null) {
+          // ### REVIEW: oops, somebody left the last command
+          // unterminated; should we fix it for them or complain?
+          // For now be nice and fix it.
+          cmd.append(";");
+          cmds.add(cmd.toString());
+        }
+      } finally {
+        reader.close();
+      }
+
       // success only if all the commands were successful
-      return beeLine.runCommands(cmds) == cmds.length;
+      return beeLine.runCommands(cmds) == cmds.size();
     } catch (Exception e) {
       return beeLine.error(e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java b/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
index 711f6a8..181f0d2 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
@@ -292,7 +292,7 @@ public class HiveSchemaHelper {
 
   // Derby commandline parser
   public static class DerbyCommandParser extends AbstractCommandParser {
-    private static final String DERBY_NESTING_TOKEN = "RUN";
+    private static String DERBY_NESTING_TOKEN = "RUN";
 
     public DerbyCommandParser(String dbOpts, String msUsername, String msPassword,
         HiveConf hiveConf) {
@@ -380,11 +380,11 @@ public class HiveSchemaHelper {
 
   // Postgres specific parser
   public static class PostgresCommandParser extends AbstractCommandParser {
-    private static final String POSTGRES_NESTING_TOKEN = "\\i";
+    private static String POSTGRES_NESTING_TOKEN = "\\i";
     @VisibleForTesting
-    public static final String POSTGRES_STANDARD_STRINGS_OPT = "SET standard_conforming_strings";
+    public static String POSTGRES_STANDARD_STRINGS_OPT = "SET standard_conforming_strings";
     @VisibleForTesting
-    public static final String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81";
+    public static String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81";
 
     public PostgresCommandParser(String dbOpts, String msUsername, String msPassword,
         HiveConf hiveConf) {
@@ -427,7 +427,7 @@ public class HiveSchemaHelper {
 
   //Oracle specific parser
   public static class OracleCommandParser extends AbstractCommandParser {
-    private static final String ORACLE_NESTING_TOKEN = "@";
+    private static String ORACLE_NESTING_TOKEN = "@";
 
     public OracleCommandParser(String dbOpts, String msUsername, String msPassword,
         HiveConf hiveConf) {
@@ -451,7 +451,7 @@ public class HiveSchemaHelper {
 
   //MSSQL specific parser
   public static class MSSQLCommandParser extends AbstractCommandParser {
-    private static final String MSSQL_NESTING_TOKEN = ":r";
+    private static String MSSQL_NESTING_TOKEN = ":r";
 
     public MSSQLCommandParser(String dbOpts, String msUsername, String msPassword,
         HiveConf hiveConf) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index 7dd4d5f..2c088c9 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -89,7 +89,14 @@ public class HiveSchemaTool {
     }
     this.hiveConf = hiveConf;
     this.dbType = dbType;
-    this.metaStoreSchemaInfo = new MetaStoreSchemaInfo(hiveHome, dbType);
+    this.metaStoreSchemaInfo = new MetaStoreSchemaInfo(hiveHome, hiveConf, dbType);
+    userName = hiveConf.get(ConfVars.METASTORE_CONNECTION_USER_NAME.varname);
+    try {
+      passWord = ShimLoader.getHadoopShims().getPassword(hiveConf,
+          HiveConf.ConfVars.METASTOREPWD.varname);
+    } catch (IOException err) {
+      throw new HiveMetaException("Error getting metastore password", err);
+    }
   }
 
   public HiveConf getHiveConf() {
@@ -586,40 +593,29 @@ public class HiveSchemaTool {
   }
 
   public void doValidate() throws HiveMetaException {
-    System.out.println("Starting metastore validation\n");
+    System.out.println("Starting metastore validation");
     Connection conn = getConnectionToMetastore(false);
-    boolean success = true;
     try {
-      if (validateSchemaVersions(conn)) {
+      if (validateSchemaVersions(conn))
         System.out.println("[SUCCESS]\n");
-      } else {
-        success = false;
+      else
         System.out.println("[FAIL]\n");
-      }
-      if (validateSequences(conn)) {
+      if (validateSequences(conn))
         System.out.println("[SUCCESS]\n");
-      } else {
-        success = false;
+      else
         System.out.println("[FAIL]\n");
-      }
-      if (validateSchemaTables(conn)) {
+      if (validateSchemaTables(conn))
         System.out.println("[SUCCESS]\n");
-      } else {
-        success = false;
+      else
         System.out.println("[FAIL]\n");
-      }
-      if (validateLocations(conn, this.validationServers)) {
+      if (validateLocations(conn, this.validationServers))
         System.out.println("[SUCCESS]\n");
-      } else {
-        success = false;
+      else
         System.out.println("[FAIL]\n");
-      }
-      if (validateColumnNullValues(conn)) {
+      if (validateColumnNullValues(conn))
         System.out.println("[SUCCESS]\n");
-      } else {
-        success = false;
+      else
         System.out.println("[FAIL]\n");
-      }
     } finally {
       if (conn != null) {
         try {
@@ -630,13 +626,7 @@ public class HiveSchemaTool {
       }
     }
 
-    System.out.print("Done with metastore validation: ");
-    if (!success) {
-      System.out.println("[FAIL]");
-      System.exit(1);
-    } else {
-      System.out.println("[SUCCESS]");
-    }
+    System.out.println("Done with metastore validation");
   }
 
   boolean validateSequences(Connection conn) throws HiveMetaException {
@@ -729,14 +719,14 @@ public class HiveSchemaTool {
       version = getMetaStoreSchemaVersion(hmsConn);
     } catch (HiveMetaException he) {
       System.err.println("Failed to determine schema version from Hive Metastore DB," + he.getMessage());
-      LOG.debug("Failed to determine schema version from Hive Metastore DB," + he.getMessage());
+      LOG.error("Failed to determine schema version from Hive Metastore DB," + he.getMessage());
       return false;
     }
 
     // re-open the hms connection
     hmsConn = getConnectionToMetastore(false);
 
-    LOG.debug("Validating tables in the schema for version " + version);
+    LOG.info("Validating tables in the schema for version " + version);
     try {
       metadata       = conn.getMetaData();
       String[] types = {"TABLE"};
@@ -770,7 +760,7 @@ public class HiveSchemaTool {
       subScripts.addAll(findCreateTable(schemaFile, schemaTables));
       while (subScripts.size() > 0) {
         schemaFile = baseDir + "/" + dbType + "/" + subScripts.remove(0);
-        LOG.debug("Parsing subscript " + schemaFile);
+        LOG.info("Parsing subscript " + schemaFile);
         subScripts.addAll(findCreateTable(schemaFile, schemaTables));
       }
     } catch (Exception e) {
@@ -785,12 +775,13 @@ public class HiveSchemaTool {
     int schemaSize = schemaTables.size();
     schemaTables.removeAll(dbTables);
     if (schemaTables.size() > 0) {
-      System.out.println("Table(s) [ " + Arrays.toString(schemaTables.toArray())
+      System.out.println("Found " + schemaSize + " tables in schema definition, " +
+          schemaTables.size() + " tables [ " + Arrays.toString(schemaTables.toArray())
           + " ] are missing from the metastore database schema.");
       System.out.println("Schema table validation failed!!!");
       return false;
     } else {
-      System.out.println("Succeeded in schema table validation.");
+      System.out.println("Succeeded in schema table validation. " + schemaSize + " tables matched");
       return true;
     }
   }
@@ -1111,19 +1102,9 @@ public class HiveSchemaTool {
 
       if (line.hasOption("userName")) {
         schemaTool.setUserName(line.getOptionValue("userName"));
-      } else {
-        schemaTool.setUserName(
-            schemaTool.getHiveConf().get(ConfVars.METASTORE_CONNECTION_USER_NAME.varname));
       }
       if (line.hasOption("passWord")) {
         schemaTool.setPassWord(line.getOptionValue("passWord"));
-      } else {
-        try {
-          schemaTool.setPassWord(ShimLoader.getHadoopShims().getPassword(schemaTool.getHiveConf(),
-              HiveConf.ConfVars.METASTOREPWD.varname));
-        } catch (IOException err) {
-          throw new HiveMetaException("Error getting metastore password", err);
-        }
       }
       if (line.hasOption("dryRun")) {
         schemaTool.setDryRun(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java b/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
index 7d7d9ae..93a6231 100644
--- a/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
+++ b/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
@@ -44,7 +44,7 @@ public class UserHS2ConnectionFileParser implements HS2ConnectionFileParser {
           + (System.getProperty("os.name").toLowerCase().indexOf("windows") != -1 ? "" : ".")
           + "beeline" + File.separator;
   public static final String ETC_HIVE_CONF_LOCATION =
-      File.separator + "etc" + File.separator + "hive" + File.separator + "conf";
+      File.separator + "etc" + File.separator + "conf" + File.separator + "hive";
 
   private final List<String> locations = new ArrayList<>();
   private static final Logger log = LoggerFactory.getLogger(UserHS2ConnectionFileParser.class);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/src/java/org/apache/hive/beeline/logs/BeelineInPlaceUpdateStream.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/logs/BeelineInPlaceUpdateStream.java b/beeline/src/java/org/apache/hive/beeline/logs/BeelineInPlaceUpdateStream.java
index 40cde0c..51344e3 100644
--- a/beeline/src/java/org/apache/hive/beeline/logs/BeelineInPlaceUpdateStream.java
+++ b/beeline/src/java/org/apache/hive/beeline/logs/BeelineInPlaceUpdateStream.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hive.beeline.logs;
 
 import org.apache.hadoop.hive.common.log.InPlaceUpdate;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/src/main/resources/BeeLine.properties
----------------------------------------------------------------------
diff --git a/beeline/src/main/resources/BeeLine.properties b/beeline/src/main/resources/BeeLine.properties
index 7011221..e33b812 100644
--- a/beeline/src/main/resources/BeeLine.properties
+++ b/beeline/src/main/resources/BeeLine.properties
@@ -202,7 +202,6 @@ cmd-usage: Usage: java org.apache.hive.cli.beeline.BeeLine \n \
 \  --delimiterForDSV=DELIMITER     specify the delimiter for delimiter-separated values output format (default: |)\n \
 \  --isolation=LEVEL               set the transaction isolation level\n \
 \  --nullemptystring=[true/false]  set to true to get historic behavior of printing null as empty string\n \
-\  --maxHistoryRows=MAXHISTORYROWS The maximum number of rows to store beeline history.\n \
 \  --help                          display this message\n \
 \n \
 \  Example:\n \

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/src/test/org/apache/hive/beeline/TestBeelineArgParsing.java
----------------------------------------------------------------------
diff --git a/beeline/src/test/org/apache/hive/beeline/TestBeelineArgParsing.java b/beeline/src/test/org/apache/hive/beeline/TestBeelineArgParsing.java
index 2884cc8..d73d374 100644
--- a/beeline/src/test/org/apache/hive/beeline/TestBeelineArgParsing.java
+++ b/beeline/src/test/org/apache/hive/beeline/TestBeelineArgParsing.java
@@ -319,16 +319,4 @@ public class TestBeelineArgParsing {
     Assert.assertTrue(bl.properties.get(0).equals("props"));
     bl.close();
   }
-
-  /**
-   * Test maxHistoryRows parameter option.
-   */
-  @Test
-  public void testMaxHistoryRows() throws Exception {
-    TestBeeline bl = new TestBeeline();
-    String args[] = new String[] {"--maxHistoryRows=100"};
-    Assert.assertEquals(0, bl.initArgs(args));
-    Assert.assertTrue(bl.getOpts().getMaxHistoryRows() == 100);
-    bl.close();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
----------------------------------------------------------------------
diff --git a/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java b/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
index 4cd5124..8d386da 100644
--- a/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
+++ b/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package org.apache.hive.beeline;
 
 import org.apache.hadoop.hive.conf.HiveConf;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/cli/pom.xml
----------------------------------------------------------------------
diff --git a/cli/pom.xml b/cli/pom.xml
index 71d214b..10fb1b9 100644
--- a/cli/pom.xml
+++ b/cli/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java
----------------------------------------------------------------------
diff --git a/cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java b/cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java
index 24550fa..f1806a0 100644
--- a/cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java
+++ b/cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java
@@ -54,7 +54,7 @@ public class RCFileCat implements Tool{
   // In verbose mode, print an update per RECORD_PRINT_INTERVAL records
   private static final int RECORD_PRINT_INTERVAL = (1024*1024);
 
-  protected boolean test = false;
+  protected static boolean test=false;
 
   public RCFileCat() {
     super();
@@ -63,12 +63,12 @@ public class RCFileCat implements Tool{
       onUnmappableCharacter(CodingErrorAction.REPLACE);
   }
 
-  private CharsetDecoder decoder;
+  private static CharsetDecoder decoder;
 
   Configuration conf = null;
 
-  private static final String TAB ="\t";
-  private static final String NEWLINE ="\r\n";
+  private static String TAB ="\t";
+  private static String NEWLINE ="\r\n";
 
   @Override
   public int run(String[] args) throws Exception {
@@ -243,7 +243,7 @@ public class RCFileCat implements Tool{
     this.conf = conf;
   }
 
-  private static final String Usage = "RCFileCat [--start=start_offet] [--length=len] [--verbose] " +
+  private static String Usage = "RCFileCat [--start=start_offet] [--length=len] [--verbose] " +
       "[--column-sizes | --column-sizes-pretty] [--file-sizes] fileName";
 
   public static void main(String[] args) {
@@ -262,7 +262,7 @@ public class RCFileCat implements Tool{
     }
   }
 
-  private void setupBufferedOutput() {
+  private static void setupBufferedOutput() {
     OutputStream pdataOut;
     if (test) {
       pdataOut = System.out;
@@ -275,7 +275,6 @@ public class RCFileCat implements Tool{
         new PrintStream(bos, false);
     System.setOut(ps);
   }
-
   private static void printUsage(String errorMsg) {
     System.err.println(Usage);
     if(errorMsg != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java
----------------------------------------------------------------------
diff --git a/cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java b/cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java
index 4cb4a19..11ceb31 100644
--- a/cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java
+++ b/cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java
@@ -25,6 +25,8 @@ import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
+import java.net.URI;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -75,7 +77,7 @@ public class TestRCFileCat {
     writer.close();
 
     RCFileCat fileCat = new RCFileCat();
-    fileCat.test=true;
+    RCFileCat.test=true;
     fileCat.setConf(new Configuration());
 
     // set fake input and output streams

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/pom.xml
----------------------------------------------------------------------
diff --git a/common/pom.xml b/common/pom.xml
index e6722ba..8474a87 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -69,24 +69,20 @@
       <version>${jline.version}</version>
     </dependency>
     <dependency>
-      <groupId>javax.servlet</groupId>
-      <artifactId>javax.servlet-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-rewrite</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-server</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-servlet</artifactId>
+      <groupId>org.eclipse.jetty.aggregate</groupId>
+      <artifactId>jetty-all</artifactId>
+      <version>${jetty.version}</version>
+      <exclusions>
+	<exclusion>
+	  <groupId>javax.servlet</groupId>
+	  <artifactId>servlet-api</artifactId>
+	</exclusion>
+      </exclusions>
     </dependency>
     <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-webapp</artifactId>
+      <groupId>org.eclipse.jetty.orbit</groupId>
+      <artifactId>javax.servlet</artifactId>
+      <version>${javax-servlet.version}</version>
     </dependency>
     <dependency>
       <groupId>joda-time</groupId>
@@ -133,18 +129,6 @@
           <artifactId>servlet-api</artifactId>
         </exclusion>
         <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-util</artifactId>
-        </exclusion>
-        <exclusion>
           <groupId>org.slf4j</groupId>
           <artifactId>slf4j-log4j12</artifactId>
         </exclusion>
@@ -165,10 +149,6 @@
           <artifactId>servlet-api</artifactId>
         </exclusion>
         <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-util</artifactId>
-        </exclusion>
-        <exclusion>
           <groupId>org.slf4j</groupId>
           <artifactId>slf4j-log4j12</artifactId>
         </exclusion>

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/CopyOnFirstWriteProperties.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/CopyOnFirstWriteProperties.java b/common/src/java/org/apache/hadoop/hive/common/CopyOnFirstWriteProperties.java
deleted file mode 100644
index d4d078b..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/CopyOnFirstWriteProperties.java
+++ /dev/null
@@ -1,344 +0,0 @@
-package org.apache.hadoop.hive.common;
-
-import com.google.common.collect.Interner;
-import com.google.common.collect.Interners;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.io.PrintWriter;
-import java.io.Reader;
-import java.lang.reflect.Field;
-import java.util.Collection;
-import java.util.Enumeration;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.function.BiConsumer;
-import java.util.function.BiFunction;
-import java.util.function.Function;
-
-/**
- * A special subclass of Properties, designed to save memory when many identical
- * copies of Properties would otherwise be created. To achieve that, we use the
- * 'interned' field, which points to the same Properties object for all instances
- * of CopyOnFirstWriteProperties that were created with identical contents.
- * However, as soon as any mutating method is called, contents are copied from
- * the 'interned' properties into this instance.
- */
-public class CopyOnFirstWriteProperties extends Properties {
-
-  private Properties interned;
-
-  private static Interner<Properties> INTERNER = Interners.newWeakInterner();
-  private static Field defaultsField;
-  static {
-    try {
-      defaultsField = Properties.class.getDeclaredField("defaults");
-      defaultsField.setAccessible(true);
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  public CopyOnFirstWriteProperties(Properties p) {
-    setInterned(p);
-  }
-
-  /*************   Public API of java.util.Properties   ************/
-
-  @Override
-  public String getProperty(String key) {
-    if (interned != null) return interned.getProperty(key);
-    else return super.getProperty(key);
-  }
-
-  @Override
-  public String getProperty(String key, String defaultValue) {
-    if (interned != null) return interned.getProperty(key, defaultValue);
-    else return super.getProperty(key, defaultValue);
-  }
-
-  @Override
-  public void list(PrintStream out) {
-    if (interned != null) interned.list(out);
-    else super.list(out);
-  }
-
-  @Override
-  public void list(PrintWriter out) {
-    if (interned != null) interned.list(out);
-    else super.list(out);
-  }
-
-  @Override
-  public synchronized void load(InputStream inStream) throws IOException {
-    if (interned != null) copyFromInternedToThis();
-    super.load(inStream);
-  }
-
-  @Override
-  public synchronized void load(Reader reader) throws IOException {
-    if (interned != null) copyFromInternedToThis();
-    super.load(reader);
-  }
-
-  @Override
-  public synchronized void loadFromXML(InputStream inStream) throws IOException {
-    if (interned != null) copyFromInternedToThis();
-    super.loadFromXML(inStream);
-  }
-
-  @Override
-  public Enumeration<?> propertyNames() {
-    if (interned != null) return interned.propertyNames();
-    else return super.propertyNames();
-  }
-
-  @Override
-  public synchronized Object setProperty(String key, String value) {
-    if (interned != null) copyFromInternedToThis();
-    return super.setProperty(key, value);
-  }
-
-  @Override
-  public void store(OutputStream out, String comments) throws IOException {
-    if (interned != null) interned.store(out, comments);
-    else super.store(out, comments);
-  }
-
-  @Override
-  public void storeToXML(OutputStream os, String comment) throws IOException {
-    if (interned != null) interned.storeToXML(os, comment);
-    else super.storeToXML(os, comment);
-  }
-
-  @Override
-  public void storeToXML(OutputStream os, String comment, String encoding)
-      throws IOException {
-    if (interned != null) interned.storeToXML(os, comment, encoding);
-    else super.storeToXML(os, comment, encoding);
-  }
-
-  @Override
-  public Set<String> stringPropertyNames() {
-    if (interned != null) return interned.stringPropertyNames();
-    else return super.stringPropertyNames();
-  }
-
-  /*************   Public API of java.util.Hashtable   ************/
-
-  @Override
-  public synchronized void clear() {
-    if (interned != null) copyFromInternedToThis();
-    super.clear();
-  }
-
-  @Override
-  public synchronized Object clone() {
-    if (interned != null) return new CopyOnFirstWriteProperties(interned);
-    else return super.clone();
-  }
-
-  @Override
-  public synchronized Object compute(Object key, BiFunction remappingFunction) {
-    if (interned != null) copyFromInternedToThis();  // We do this because if function returns null,
-                                       // the mapping for key is removed, i.e. the table is mutated.
-    return super.compute(key, remappingFunction);
-  }
-
-  @Override
-  public synchronized Object computeIfAbsent(Object key, Function mappingFunction) {
-    if (interned != null) copyFromInternedToThis();
-    return super.computeIfAbsent(key, mappingFunction);
-  }
-
-  @Override
-  public synchronized Object computeIfPresent(Object key, BiFunction remappingFunction) {
-    if (interned != null) copyFromInternedToThis();
-    return super.computeIfPresent(key, remappingFunction);
-  }
-
-  @Override
-  public synchronized boolean contains(Object value) {
-    if (interned != null) return interned.contains(value);
-    else return super.contains(value);
-  }
-
-  @Override
-  public synchronized boolean containsKey(Object key) {
-    if (interned != null) return interned.containsKey(key);
-    else return super.containsKey(key);
-  }
-
-  @Override
-  public synchronized boolean containsValue(Object value) {
-    if (interned != null) return interned.containsValue(value);
-    else return super.containsValue(value);
-  }
-
-  @Override
-  public synchronized Enumeration<Object> elements() {
-    if (interned != null) return interned.elements();
-    else return super.elements();
-  }
-
-  @Override
-  public Set<Map.Entry<Object, Object>> entrySet() {
-    if (interned != null) return interned.entrySet();
-    else return super.entrySet();
-  }
-
-  @Override
-  public synchronized boolean equals(Object o) {
-    if (interned != null) return interned.equals(o);
-    else return super.equals(o);
-  }
-
-  @Override
-  public synchronized void forEach(BiConsumer action) {
-    if (interned != null) interned.forEach(action);
-    else super.forEach(action);
-  }
-
-  @Override
-  public synchronized Object get(Object key) {
-    if (interned != null) return interned.get(key);
-    else return super.get(key);
-  }
-
-  @Override
-  public synchronized Object getOrDefault(Object key, Object defaultValue) {
-    if (interned != null) return interned.getOrDefault(key, defaultValue);
-    else return super.getOrDefault(key, defaultValue);
-  }
-
-  @Override
-  public synchronized int hashCode() {
-    if (interned != null) return interned.hashCode();
-    else return super.hashCode();
-  }
-
-  @Override
-  public synchronized boolean isEmpty() {
-    if (interned != null) return interned.isEmpty();
-    else return super.isEmpty();
-  }
-
-  @Override
-  public synchronized Enumeration<Object> keys() {
-    if (interned != null) return interned.keys();
-    else return super.keys();
-  }
-
-  @Override
-  public Set<Object> keySet() {
-    if (interned != null) return interned.keySet();
-    else return super.keySet();
-  }
-
-  @Override
-  public synchronized Object merge(Object key, Object value, BiFunction remappingFunction) {
-    if (interned != null) copyFromInternedToThis();
-    return super.merge(key, value, remappingFunction);
-  }
-
-  @Override
-  public synchronized Object put(Object key, Object value) {
-    if (interned != null) copyFromInternedToThis();
-    return super.put(key, value);
-  }
-
-  @Override
-  public synchronized void putAll(Map<? extends Object, ? extends Object> t) {
-    if (interned != null) copyFromInternedToThis();
-    super.putAll(t);
-  }
-
-  @Override
-  public synchronized Object putIfAbsent(Object key, Object value) {
-    if (interned != null) copyFromInternedToThis();
-    return super.putIfAbsent(key, value);
-  }
-
-  @Override
-  public synchronized Object remove(Object key) {
-    if (interned != null) copyFromInternedToThis();
-    return super.remove(key);
-  }
-
-  @Override
-  public synchronized boolean remove(Object key, Object value) {
-    if (interned != null) copyFromInternedToThis();
-    return super.remove(key, value);
-  }
-
-  @Override
-  public synchronized Object replace(Object key, Object value) {
-    if (interned != null) copyFromInternedToThis();
-    return super.replace(key, value);
-  }
-
-  @Override
-  public synchronized boolean replace(Object key, Object oldValue, Object newValue) {
-    if (interned != null) copyFromInternedToThis();
-    return super.replace(key, oldValue, newValue);
-  }
-
-  @Override
-  public synchronized void replaceAll(BiFunction function) {
-    if (interned != null) copyFromInternedToThis();
-    super.replaceAll(function);
-  }
-
-  @Override
-  public synchronized int size() {
-    if (interned != null) return interned.size();
-    else return super.size();
-  }
-
-  @Override
-  public synchronized String toString() {
-    if (interned != null) return interned.toString();
-    else return super.toString();
-  }
-
-  @Override
-  public Collection<Object> values() {
-    if (interned != null) return interned.values();
-    else return super.values();
-  }
-
-  /*************   Private implementation ************/
-
-  private void copyFromInternedToThis() {
-    for (Map.Entry<?,?> e : interned.entrySet()) {
-      super.put(e.getKey(), e.getValue());
-    }
-    try {
-      // Unfortunately, we cannot directly read a protected field of non-this object
-      this.defaults = (Properties) defaultsField.get(interned);
-    } catch (IllegalAccessException e) {   // Shouldn't happen
-      throw new RuntimeException(e);
-    }
-    setInterned(null);
-  }
-
-  public void setInterned(Properties p) {
-    if (p != null) {
-      this.interned = INTERNER.intern(p);
-    } else {
-      this.interned = p;
-    }
-  }
-
-  // These methods are required by serialization
-
-  public CopyOnFirstWriteProperties() {
-  }
-
-  public Properties getInterned() {
-    return interned;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index 0f7401c..c6bc9b9 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -25,8 +25,6 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.security.AccessControlException;
 import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.BitSet;
 import java.util.Collection;
 import java.util.HashSet;
@@ -359,12 +357,6 @@ public final class FileUtils {
     return getPathOrParentThatExists(fs, parentPath);
   }
 
-  public static void checkFileAccessWithImpersonation(final FileSystem fs, final FileStatus stat,
-      final FsAction action, final String user)
-      throws IOException, AccessControlException, InterruptedException, Exception {
-    checkFileAccessWithImpersonation(fs, stat, action, user, null);
-  }
-
   /**
    * Perform a check to determine if the user is able to access the file passed in.
    * If the user name passed in is different from the current user, this method will
@@ -379,15 +371,13 @@ public final class FileUtils {
    *             check will be performed within a doAs() block to use the access privileges
    *             of this user. In this case the user must be configured to impersonate other
    *             users, otherwise this check will fail with error.
-   * @param children List of children to be collected. If this is null, no children are collected.
-   *        To be set only if this is a directory
    * @throws IOException
    * @throws AccessControlException
    * @throws InterruptedException
    * @throws Exception
    */
   public static void checkFileAccessWithImpersonation(final FileSystem fs,
-      final FileStatus stat, final FsAction action, final String user, final List<FileStatus> children)
+      final FileStatus stat, final FsAction action, final String user)
           throws IOException, AccessControlException, InterruptedException, Exception {
     UserGroupInformation ugi = Utils.getUGI();
     String currentUser = ugi.getShortUserName();
@@ -395,7 +385,6 @@ public final class FileUtils {
     if (user == null || currentUser.equals(user)) {
       // No need to impersonate user, do the checks as the currently configured user.
       ShimLoader.getHadoopShims().checkFileAccess(fs, stat, action);
-      addChildren(fs, stat.getPath(), children);
       return;
     }
 
@@ -408,7 +397,6 @@ public final class FileUtils {
         public Object run() throws Exception {
           FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf());
           ShimLoader.getHadoopShims().checkFileAccess(fsAsUser, stat, action);
-          addChildren(fsAsUser, stat.getPath(), children);
           return null;
         }
       });
@@ -417,20 +405,6 @@ public final class FileUtils {
     }
   }
 
-  private static void addChildren(FileSystem fsAsUser, Path path, List<FileStatus> children)
-      throws IOException {
-    if (children != null) {
-      FileStatus[] listStatus;
-      try {
-        listStatus = fsAsUser.listStatus(path);
-      } catch (IOException e) {
-        LOG.warn("Unable to list files under " + path + " : " + e);
-        throw e;
-      }
-      children.addAll(Arrays.asList(listStatus));
-    }
-  }
-
   /**
    * Check if user userName has permissions to perform the given FsAction action
    * on all files under the file whose FileStatus fileStatus is provided
@@ -457,26 +431,20 @@ public final class FileUtils {
       dirActionNeeded.and(FsAction.EXECUTE);
     }
 
-    List<FileStatus> subDirsToCheck = null;
-    if (isDir && recurse) {
-      subDirsToCheck = new ArrayList<FileStatus>();
-    }
-
     try {
-      checkFileAccessWithImpersonation(fs, fileStatus, action, userName, subDirsToCheck);
+      checkFileAccessWithImpersonation(fs, fileStatus, action, userName);
     } catch (AccessControlException err) {
       // Action not permitted for user
-      LOG.warn("Action " + action + " denied on " + fileStatus.getPath() + " for user " + userName);
       return false;
     }
 
-    if (subDirsToCheck == null || subDirsToCheck.isEmpty()) {
+    if ((!isDir) || (!recurse)) {
       // no sub dirs to be checked
       return true;
     }
-
     // check all children
-    for (FileStatus childStatus : subDirsToCheck) {
+    FileStatus[] childStatuses = fs.listStatus(fileStatus.getPath());
+    for (FileStatus childStatus : childStatuses) {
       // check children recursively - recurse is true if we're here.
       if (!isActionPermittedForFileHierarchy(fs, childStatus, userName, action, true)) {
         return false;
@@ -518,30 +486,11 @@ public final class FileUtils {
     return false;
   }
   public static boolean isOwnerOfFileHierarchy(FileSystem fs, FileStatus fileStatus, String userName)
-      throws IOException, InterruptedException {
+      throws IOException {
     return isOwnerOfFileHierarchy(fs, fileStatus, userName, true);
   }
 
-  public static boolean isOwnerOfFileHierarchy(final FileSystem fs,
-      final FileStatus fileStatus, final String userName, final boolean recurse)
-      throws IOException, InterruptedException {
-    UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(userName,
-        UserGroupInformation.getLoginUser());
-    try {
-      boolean isOwner = proxyUser.doAs(new PrivilegedExceptionAction<Boolean>() {
-        @Override
-        public Boolean run() throws Exception {
-          FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf());
-          return checkIsOwnerOfFileHierarchy(fsAsUser, fileStatus, userName, recurse);
-        }
-      });
-      return isOwner;
-    } finally {
-      FileSystem.closeAllForUGI(proxyUser);
-    }
-  }
-
-  public static boolean checkIsOwnerOfFileHierarchy(FileSystem fs, FileStatus fileStatus,
+  public static boolean isOwnerOfFileHierarchy(FileSystem fs, FileStatus fileStatus,
       String userName, boolean recurse)
       throws IOException {
     if (!fileStatus.getOwner().equals(userName)) {
@@ -556,24 +505,59 @@ public final class FileUtils {
     FileStatus[] childStatuses = fs.listStatus(fileStatus.getPath());
     for (FileStatus childStatus : childStatuses) {
       // check children recursively - recurse is true if we're here.
-      if (!checkIsOwnerOfFileHierarchy(fs, childStatus, userName, true)) {
+      if (!isOwnerOfFileHierarchy(fs, childStatus, userName, true)) {
         return false;
       }
     }
     return true;
   }
 
+  public static boolean mkdir(FileSystem fs, Path f, Configuration conf) throws IOException {
+    boolean inheritPerms = HiveConf.getBoolVar(conf, ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+    return mkdir(fs, f, inheritPerms, conf);
+  }
+
   /**
    * Creates the directory and all necessary parent directories.
    * @param fs FileSystem to use
    * @param f path to create.
+   * @param inheritPerms whether directory inherits the permission of the last-existing parent path
    * @param conf Hive configuration
    * @return true if directory created successfully.  False otherwise, including if it exists.
    * @throws IOException exception in creating the directory
    */
-  public static boolean mkdir(FileSystem fs, Path f, Configuration conf) throws IOException {
+  public static boolean mkdir(FileSystem fs, Path f, boolean inheritPerms, Configuration conf) throws IOException {
     LOG.info("Creating directory if it doesn't exist: " + f);
-    return fs.mkdirs(f);
+    if (!inheritPerms) {
+      //just create the directory
+      return fs.mkdirs(f);
+    } else {
+      //Check if the directory already exists. We want to change the permission
+      //to that of the parent directory only for newly created directories.
+      try {
+        return fs.getFileStatus(f).isDir();
+      } catch (FileNotFoundException ignore) {
+      }
+      //inherit perms: need to find last existing parent path, and apply its permission on entire subtree.
+      Path lastExistingParent = f;
+      Path firstNonExistentParent = null;
+      while (!fs.exists(lastExistingParent)) {
+        firstNonExistentParent = lastExistingParent;
+        lastExistingParent = lastExistingParent.getParent();
+      }
+      boolean success = fs.mkdirs(f);
+      if (!success) {
+        return false;
+      } else {
+        //set on the entire subtree
+        if (inheritPerms) {
+          HdfsUtils.setFullFileStatus(conf,
+                  new HdfsUtils.HadoopFileStatus(conf, fs, lastExistingParent), fs,
+                  firstNonExistentParent, true);
+        }
+        return true;
+      }
+    }
   }
 
   public static Path makeAbsolute(FileSystem fileSystem, Path path) throws IOException {
@@ -626,6 +610,11 @@ public final class FileUtils {
     if (!triedDistcp) {
       copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
     }
+
+    boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+    if (copied && inheritPerms) {
+      HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, dstFS, dst.getParent()), dstFS, dst, true);
+    }
     return copied;
   }
 
@@ -637,19 +626,15 @@ public final class FileUtils {
    * @return true if move successful
    * @throws IOException
    */
-  public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf, boolean purge)
+  public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf)
       throws IOException {
     LOG.debug("deleting  " + f);
     boolean result = false;
     try {
-      if(purge) {
-        LOG.debug("purge is set to true. Not moving to Trash " + f);
-      } else {
-        result = Trash.moveToAppropriateTrash(fs, f, conf);
-        if (result) {
-          LOG.trace("Moved to trash: " + f);
-          return true;
-        }
+      result = Trash.moveToAppropriateTrash(fs, f, conf);
+      if (result) {
+        LOG.trace("Moved to trash: " + f);
+        return true;
       }
     } catch (IOException ioe) {
       // for whatever failure reason including that trash has lower encryption zone
@@ -661,11 +646,13 @@ public final class FileUtils {
     if (!result) {
       LOG.error("Failed to delete " + f);
     }
+
     return result;
   }
 
-  public static boolean rename(FileSystem fs, Path sourcePath,
-                               Path destPath, Configuration conf) throws IOException {
+  public static boolean renameWithPerms(FileSystem fs, Path sourcePath,
+                               Path destPath, boolean inheritPerms,
+                               Configuration conf) throws IOException {
     LOG.info("Renaming " + sourcePath + " to " + destPath);
 
     // If destPath directory exists, rename call will move the sourcePath
@@ -674,7 +661,20 @@ public final class FileUtils {
       throw new IOException("Cannot rename the source path. The destination "
           + "path already exists.");
     }
-    return fs.rename(sourcePath, destPath);
+
+    if (!inheritPerms) {
+      //just rename the directory
+      return fs.rename(sourcePath, destPath);
+    } else {
+      //rename the directory
+      if (fs.rename(sourcePath, destPath)) {
+        HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, fs, destPath.getParent()), fs, destPath,
+                true);
+        return true;
+      }
+
+      return false;
+    }
   }
 
   /**


[14/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 64bc1a2..915bce3 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -58,13 +58,9 @@ import java.util.regex.Pattern;
 
 import javax.jdo.JDOException;
 
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableListMultimap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Multimaps;
 import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
@@ -82,7 +78,6 @@ import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.io.HdfsUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
@@ -119,7 +114,6 @@ import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
 import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
 import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler;
-import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
 import org.apache.hadoop.hive.metastore.model.MTableWrite;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
@@ -165,6 +159,10 @@ import com.facebook.fb303.fb_status;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Splitter;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableListMultimap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Multimaps;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
@@ -172,8 +170,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  */
 public class HiveMetaStore extends ThriftHiveMetastore {
   public static final Logger LOG = LoggerFactory.getLogger(HiveMetaStore.class);
-  public static final String PARTITION_NUMBER_EXCEED_LIMIT_MSG =
-      "Number of partitions scanned (=%d) on table '%s' exceeds limit (=%d). This is controlled on the metastore server by %s.";
 
   // boolean that tells if the HiveMetaStore (remote) server is being used.
   // Can be used to determine if the calls to metastore api (HMSHandler) are being made with
@@ -238,6 +234,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
   public static class HMSHandler extends FacebookBase implements IHMSHandler, ThreadLocalRawStore {
     public static final Logger LOG = HiveMetaStore.LOG;
+    private String rawStoreClassName;
     private final HiveConf hiveConf; // stores datastore (jpox) properties,
                                      // right now they come from jpox.properties
 
@@ -419,6 +416,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     @Override
     public void init() throws MetaException {
+      rawStoreClassName = hiveConf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL);
       initListeners = MetaStoreUtils.getMetaStoreListeners(
           MetaStoreInitListener.class, hiveConf,
           hiveConf.getVar(HiveConf.ConfVars.METASTORE_INIT_HOOKS));
@@ -516,7 +514,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       fileMetadataManager = new FileMetadataManager((ThreadLocalRawStore)this, hiveConf);
     }
 
-    private static String addPrefix(String s) {
+    private String addPrefix(String s) {
       return threadLocalId.get() + ": " + s;
     }
 
@@ -591,14 +589,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @InterfaceStability.Evolving
     @Override
     public RawStore getMS() throws MetaException {
-      Configuration conf = getConf();
-      return getMSForConf(conf);
-    }
-
-    public static RawStore getMSForConf(Configuration conf) throws MetaException {
       RawStore ms = threadLocalMS.get();
       if (ms == null) {
-        ms = newRawStoreForConf(conf);
+        ms = newRawStore();
         ms.verifySchema();
         threadLocalMS.set(ms);
         ms = threadLocalMS.get();
@@ -615,23 +608,24 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       return txn;
     }
 
-    private static RawStore newRawStoreForConf(Configuration conf) throws MetaException {
-      HiveConf hiveConf = new HiveConf(conf, HiveConf.class);
-      String rawStoreClassName = hiveConf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL);
-      LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName));
+    private RawStore newRawStore() throws MetaException {
+      LOG.info(addPrefix("Opening raw store with implementation class:"
+          + rawStoreClassName));
+      Configuration conf = getConf();
+
       if (hiveConf.getBoolVar(ConfVars.METASTORE_FASTPATH)) {
         LOG.info("Fastpath, skipping raw store proxy");
         try {
-          RawStore rs =
-              ((Class<? extends RawStore>) MetaStoreUtils.getClass(rawStoreClassName))
-                  .newInstance();
-          rs.setConf(hiveConf);
+          RawStore rs = ((Class<? extends RawStore>) MetaStoreUtils.getClass(
+              rawStoreClassName)).newInstance();
+          rs.setConf(conf);
           return rs;
         } catch (Exception e) {
           LOG.error("Unable to instantiate raw store directly in fastpath mode", e);
           throw new RuntimeException(e);
         }
       }
+
       return RawStoreProxy.getProxy(hiveConf, conf, rawStoreClassName, threadLocalId.get());
     }
 
@@ -880,11 +874,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Path dbPath = new Path(db.getLocationUri());
       boolean success = false;
       boolean madeDir = false;
-      Map<String, String> transactionalListenersResponses = Collections.emptyMap();
       try {
         firePreEvent(new PreCreateDatabaseEvent(db, this));
         if (!wh.isDir(dbPath)) {
-          if (!wh.mkdirs(dbPath)) {
+          if (!wh.mkdirs(dbPath, true)) {
             throw new MetaException("Unable to create database path " + dbPath +
                 ", failed to create database " + db.getName());
           }
@@ -893,12 +886,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         ms.openTransaction();
         ms.createDatabase(db);
-
-        if (!transactionalListeners.isEmpty()) {
-          transactionalListenersResponses =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventType.CREATE_DATABASE,
-                                                    new CreateDatabaseEvent(db, true, this));
+        if (transactionalListeners.size() > 0) {
+          CreateDatabaseEvent cde = new CreateDatabaseEvent(db, true, this);
+          for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+            transactionalListener.onCreateDatabase(cde);
+          }
         }
 
         success = ms.commitTransaction();
@@ -909,13 +901,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             wh.deleteDir(dbPath, true);
           }
         }
-
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.CREATE_DATABASE,
-                                                new CreateDatabaseEvent(db, success, this),
-                                                null,
-                                                transactionalListenersResponses);
+        for (MetaStoreEventListener listener : listeners) {
+          listener.onCreateDatabase(new CreateDatabaseEvent(db, success, this));
         }
       }
     }
@@ -1030,7 +1017,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Database db = null;
       List<Path> tablePaths = new ArrayList<Path>();
       List<Path> partitionPaths = new ArrayList<Path>();
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
         db = ms.getDatabase(name);
@@ -1113,13 +1099,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         if (ms.dropDatabase(name)) {
-          if (!transactionalListeners.isEmpty()) {
-            transactionalListenerResponses =
-                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                      EventType.DROP_DATABASE,
-                                                      new DropDatabaseEvent(db, true, this));
+          if (transactionalListeners.size() > 0) {
+            DropDatabaseEvent dde = new DropDatabaseEvent(db, true, this);
+            for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+              transactionalListener.onDropDatabase(dde);
+            }
           }
-
           success = ms.commitTransaction();
         }
       } finally {
@@ -1141,13 +1126,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
           // it is not a terrible thing even if the data is not deleted
         }
-
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.DROP_DATABASE,
-                                                new DropDatabaseEvent(db, success, this),
-                                                null,
-                                                transactionalListenerResponses);
+        for (MetaStoreEventListener listener : listeners) {
+          listener.onDropDatabase(new DropDatabaseEvent(db, success, this));
         }
       }
     }
@@ -1373,7 +1353,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
       }
 
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       Path tblPath = null;
       boolean success = false, madeDir = false;
       try {
@@ -1395,7 +1374,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) {
           if (tbl.getSd().getLocation() == null
               || tbl.getSd().getLocation().isEmpty()) {
-            tblPath = wh.getDefaultTablePath(
+            tblPath = wh.getTablePath(
                 ms.getDatabase(tbl.getDbName()), tbl.getTableName());
           } else {
             if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) {
@@ -1409,7 +1388,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         if (tblPath != null) {
           if (!wh.isDir(tblPath)) {
-            if (!wh.mkdirs(tblPath)) {
+            if (!wh.mkdirs(tblPath, true)) {
               throw new MetaException(tblPath
                   + " is not a directory or unable to create one");
             }
@@ -1434,12 +1413,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           ms.createTableWithConstraints(tbl, primaryKeys, foreignKeys);
         }
 
-        if (!transactionalListeners.isEmpty()) {
-          transactionalListenerResponses =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventType.CREATE_TABLE,
-                                                    new CreateTableEvent(tbl, true, this),
-                                                    envContext);
+        if (transactionalListeners.size() > 0) {
+          CreateTableEvent createTableEvent = new CreateTableEvent(tbl, true, this);
+          createTableEvent.setEnvironmentContext(envContext);
+          for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+            transactionalListener.onCreateTable(createTableEvent);
+          }
         }
 
         success = ms.commitTransaction();
@@ -1450,13 +1429,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             wh.deleteDir(tblPath, true);
           }
         }
-
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.CREATE_TABLE,
-                                                new CreateTableEvent(tbl, success, this),
-                                                envContext,
-                                                transactionalListenerResponses);
+        for (MetaStoreEventListener listener : listeners) {
+          CreateTableEvent createTableEvent =
+              new CreateTableEvent(tbl, success, this);
+          createTableEvent.setEnvironmentContext(envContext);
+          listener.onCreateTable(createTableEvent);
         }
       }
     }
@@ -1621,7 +1598,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       List<Path> partPaths = null;
       Table tbl = null;
       boolean ifPurge = false;
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
         // drop any partitions
@@ -1666,6 +1642,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
         }
 
+        checkTrashPurgeCombination(tblPath, dbname + "." + name, ifPurge, deleteData && !isExternal);
         // Drop the partitions and get a list of locations which need to be deleted
         partPaths = dropPartitionsAndGetLocations(ms, dbname, name, tblPath,
             tbl.getPartitionKeys(), deleteData && !isExternal);
@@ -1674,12 +1651,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           throw new MetaException(indexName == null ? "Unable to drop table " + tableName:
               "Unable to drop index table " + tableName + " for index " + indexName);
         } else {
-          if (!transactionalListeners.isEmpty()) {
-            transactionalListenerResponses =
-                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                      EventType.DROP_TABLE,
-                                                      new DropTableEvent(tbl, deleteData, true, this),
-                                                      envContext);
+          if (transactionalListeners.size() > 0) {
+            DropTableEvent dropTableEvent = new DropTableEvent(tbl, true, deleteData, this);
+            dropTableEvent.setEnvironmentContext(envContext);
+            for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+              transactionalListener.onDropTable(dropTableEvent);
+            }
           }
           success = ms.commitTransaction();
         }
@@ -1694,19 +1671,56 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           deleteTableData(tblPath, ifPurge);
           // ok even if the data is not deleted
         }
-
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.DROP_TABLE,
-                                                new DropTableEvent(tbl, deleteData, success, this),
-                                                envContext,
-                                                transactionalListenerResponses);
+        for (MetaStoreEventListener listener : listeners) {
+          DropTableEvent dropTableEvent = new DropTableEvent(tbl, success, deleteData, this);
+          dropTableEvent.setEnvironmentContext(envContext);
+          listener.onDropTable(dropTableEvent);
         }
       }
       return success;
     }
 
     /**
+     * Will throw MetaException if combination of trash policy/purge can't be satisfied
+     * @param pathToData path to data which may potentially be moved to trash
+     * @param objectName db.table, or db.table.part
+     * @param ifPurge if PURGE options is specified
+     */
+    private void checkTrashPurgeCombination(Path pathToData, String objectName, boolean ifPurge,
+        boolean deleteData) throws MetaException {
+      // There is no need to check TrashPurgeCombination in following cases since Purge/Trash
+      // is not applicable:
+      // a) deleteData is false -- drop an external table
+      // b) pathToData is null -- a view
+      // c) ifPurge is true -- force delete without Trash
+      if (!deleteData || pathToData == null || ifPurge) {
+        return;
+      }
+
+      boolean trashEnabled = false;
+      try {
+        trashEnabled = 0 < hiveConf.getFloat("fs.trash.interval", -1);
+      } catch(NumberFormatException ex) {
+  // nothing to do
+      }
+
+      if (trashEnabled) {
+        try {
+          HadoopShims.HdfsEncryptionShim shim =
+            ShimLoader.getHadoopShims().createHdfsEncryptionShim(FileSystem.get(hiveConf), hiveConf);
+          if (shim.isPathEncrypted(pathToData)) {
+            throw new MetaException("Unable to drop " + objectName + " because it is in an encryption zone" +
+              " and trash is enabled.  Use PURGE option to skip trash.");
+          }
+        } catch (IOException ex) {
+          MetaException e = new MetaException(ex.getMessage());
+          e.initCause(ex);
+          throw e;
+        }
+      }
+    }
+
+    /**
      * Deletes the data in a table's location, if it fails logs an error
      *
      * @param tablePath
@@ -1869,151 +1883,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     }
 
-    private void updateStatsForTruncate(Map<String,String> props, EnvironmentContext environmentContext) {
-      if (null == props) {
-        return;
-      }
-      for (String stat : StatsSetupConst.supportedStats) {
-        String statVal = props.get(stat);
-        if (statVal != null) {
-          //In the case of truncate table, we set the stats to be 0.
-          props.put(stat, "0");
-        }
-      }
-      //first set basic stats to true
-      StatsSetupConst.setBasicStatsState(props, StatsSetupConst.TRUE);
-      environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
-      //then invalidate column stats
-      StatsSetupConst.clearColumnStatsState(props);
-      return;
-    }
-
-    private void alterPartitionForTruncate(final RawStore ms,
-                                           final String dbName,
-                                           final String tableName,
-                                           final Table table,
-                                           final Partition partition) throws Exception {
-      EnvironmentContext environmentContext = new EnvironmentContext();
-      updateStatsForTruncate(partition.getParameters(), environmentContext);
-
-      if (!transactionalListeners.isEmpty()) {
-        MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                EventType.ALTER_PARTITION,
-                new AlterPartitionEvent(partition, partition, table, true, true, this));
-      }
-
-      if (!listeners.isEmpty()) {
-        MetaStoreListenerNotifier.notifyEvent(listeners,
-                EventType.ALTER_PARTITION,
-                new AlterPartitionEvent(partition, partition, table, true, true, this));
-      }
-
-      alterHandler.alterPartition(ms, wh, dbName, tableName, null, partition, environmentContext, this);
-    }
-
-    private void alterTableStatsForTruncate(final RawStore ms,
-                                            final String dbName,
-                                            final String tableName,
-                                            final Table table,
-                                            final List<String> partNames) throws Exception {
-      if (partNames == null) {
-        if (0 != table.getPartitionKeysSize()) {
-          for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) {
-            alterPartitionForTruncate(ms, dbName, tableName, table, partition);
-          }
-        } else {
-          EnvironmentContext environmentContext = new EnvironmentContext();
-          updateStatsForTruncate(table.getParameters(), environmentContext);
-
-          if (!transactionalListeners.isEmpty()) {
-            MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                    EventType.ALTER_TABLE,
-                    new AlterTableEvent(table, table, true, true, this));
-          }
-
-          if (!listeners.isEmpty()) {
-            MetaStoreListenerNotifier.notifyEvent(listeners,
-                    EventType.ALTER_TABLE,
-                    new AlterTableEvent(table, table, true, true, this));
-          }
-
-          alterHandler.alterTable(ms, wh, dbName, tableName, table, environmentContext, this);
-        }
-      } else {
-        for (Partition partition : ms.getPartitionsByNames(dbName, tableName, partNames)) {
-          alterPartitionForTruncate(ms, dbName, tableName, table, partition);
-        }
-      }
-      return;
-    }
-
-    private List<Path> getLocationsForTruncate(final RawStore ms,
-                                               final String dbName,
-                                               final String tableName,
-                                               final Table table,
-                                               final List<String> partNames) throws Exception {
-      List<Path> locations = new ArrayList<Path>();
-      if (partNames == null) {
-        if (0 != table.getPartitionKeysSize()) {
-          for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) {
-            locations.add(new Path(partition.getSd().getLocation()));
-          }
-        } else {
-          locations.add(new Path(table.getSd().getLocation()));
-        }
-      } else {
-        for (Partition partition : ms.getPartitionsByNames(dbName, tableName, partNames)) {
-          locations.add(new Path(partition.getSd().getLocation()));
-        }
-      }
-      return locations;
-    }
-
-    @Override
-    public void truncate_table(final String dbName, final String tableName, List<String> partNames)
-      throws NoSuchObjectException, MetaException {
-      try {
-        Table tbl = get_table_core(dbName, tableName);
-        boolean isAutopurge = (tbl.isSetParameters() && "true".equalsIgnoreCase(tbl.getParameters().get("auto.purge")));
-
-        // This is not transactional
-        for (Path location : getLocationsForTruncate(getMS(), dbName, tableName, tbl, partNames)) {
-          FileSystem fs = location.getFileSystem(getHiveConf());
-          HadoopShims.HdfsEncryptionShim shim
-                  = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, getHiveConf());
-          if (!shim.isPathEncrypted(location)) {
-            HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(getHiveConf(), fs, location);
-            FileStatus targetStatus = fs.getFileStatus(location);
-            String targetGroup = targetStatus == null ? null : targetStatus.getGroup();
-            wh.deleteDir(location, true, isAutopurge);
-            fs.mkdirs(location);
-            HdfsUtils.setFullFileStatus(getHiveConf(), status, targetGroup, fs, location, false);
-          } else {
-            FileStatus[] statuses = fs.listStatus(location, FileUtils.HIDDEN_FILES_PATH_FILTER);
-            if (statuses == null || statuses.length == 0) {
-              continue;
-            }
-            for (final FileStatus status : statuses) {
-              wh.deleteDir(status.getPath(), true, isAutopurge);
-            }
-          }
-        }
-
-        // Alter the table/partition stats and also notify truncate table event
-        alterTableStatsForTruncate(getMS(), dbName, tableName, tbl, partNames);
-      } catch (IOException e) {
-        throw new MetaException(e.getMessage());
-      } catch (Exception e) {
-        if (e instanceof MetaException) {
-          throw (MetaException) e;
-        } else if (e instanceof NoSuchObjectException) {
-          throw (NoSuchObjectException) e;
-        } else {
-          throw newMetaException(e);
-        }
-      }
-    }
-
     /**
      * Is this an external table?
      *
@@ -2266,7 +2135,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       boolean success = false, madeDir = false;
       Path partLocation = null;
       Table tbl = null;
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
         part.setDbName(dbName);
@@ -2305,7 +2173,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         if (!wh.isDir(partLocation)) {
-          if (!wh.mkdirs(partLocation)) {
+          if (!wh.mkdirs(partLocation, true)) {
             throw new MetaException(partLocation
                 + " is not a directory or unable to create one");
           }
@@ -2323,12 +2191,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         if (ms.addPartition(part)) {
-          if (!transactionalListeners.isEmpty()) {
-            transactionalListenerResponses =
-                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                      EventType.ADD_PARTITION,
-                                                      new AddPartitionEvent(tbl, part, true, this),
-                                                      envContext);
+          if (transactionalListeners.size() > 0) {
+            AddPartitionEvent addPartitionEvent = new AddPartitionEvent(tbl, part, true, this);
+            addPartitionEvent.setEnvironmentContext(envContext);
+            for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+              transactionalListener.onAddPartition(addPartitionEvent);
+            }
           }
 
           success = ms.commitTransaction();
@@ -2341,12 +2209,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
         }
 
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.ADD_PARTITION,
-                                                new AddPartitionEvent(tbl, part, success, this),
-                                                envContext,
-                                                transactionalListenerResponses);
+        for (MetaStoreEventListener listener : listeners) {
+          AddPartitionEvent addPartitionEvent =
+              new AddPartitionEvent(tbl, part, success, this);
+          addPartitionEvent.setEnvironmentContext(envContext);
+          listener.onAddPartition(addPartitionEvent);
         }
       }
       return part;
@@ -2491,10 +2358,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       final Map<PartValEqWrapper, Boolean> addedPartitions =
           Collections.synchronizedMap(new HashMap<PartValEqWrapper, Boolean>());
       final List<Partition> newParts = new ArrayList<Partition>();
-      final List<Partition> existingParts = new ArrayList<Partition>();
+      final List<Partition> existingParts = new ArrayList<Partition>();;
       Table tbl = null;
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
-
       try {
         ms.openTransaction();
         tbl = ms.getTable(dbName, tblName);
@@ -2580,13 +2445,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         success = false;
         // Notification is generated for newly created partitions only. The subset of partitions
         // that already exist (existingParts), will not generate notifications.
-        if (!transactionalListeners.isEmpty()) {
-          transactionalListenerResponses =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventType.ADD_PARTITION,
-                                                    new AddPartitionEvent(tbl, newParts, true, this));
-        }
-
+        fireMetaStoreAddPartitionEventTransactional(tbl, newParts, null, true);
         success = ms.commitTransaction();
       } finally {
         if (!success) {
@@ -2597,26 +2456,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
               wh.deleteDir(new Path(e.getKey().partition.getSd().getLocation()), true);
             }
           }
-
-          if (!listeners.isEmpty()) {
-            MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                  EventType.ADD_PARTITION,
-                                                  new AddPartitionEvent(tbl, parts, false, this));
-          }
+          fireMetaStoreAddPartitionEvent(tbl, parts, null, false);
         } else {
-          if (!listeners.isEmpty()) {
-            MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                  EventType.ADD_PARTITION,
-                                                  new AddPartitionEvent(tbl, newParts, true, this),
-                                                  null,
-                                                  transactionalListenerResponses);
-
-            if (!existingParts.isEmpty()) {
-              // The request has succeeded but we failed to add these partitions.
-              MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                    EventType.ADD_PARTITION,
-                                                    new AddPartitionEvent(tbl, existingParts, false, this));
-            }
+          fireMetaStoreAddPartitionEvent(tbl, newParts, null, true);
+          if (existingParts != null) {
+            // The request has succeeded but we failed to add these partitions.
+            fireMetaStoreAddPartitionEvent(tbl, existingParts, null, false);
           }
         }
       }
@@ -2703,7 +2548,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       final PartitionSpecProxy.PartitionIterator partitionIterator = partitionSpecProxy
           .getPartitionIterator();
       Table tbl = null;
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
         tbl = ms.getTable(dbName, tblName);
@@ -2777,14 +2621,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         success = ms.addPartitions(dbName, tblName, partitionSpecProxy, ifNotExists);
         //setting success to false to make sure that if the listener fails, rollback happens.
         success = false;
-
-        if (!transactionalListeners.isEmpty()) {
-          transactionalListenerResponses =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventType.ADD_PARTITION,
-                                                    new AddPartitionEvent(tbl, partitionSpecProxy, true, this));
-        }
-
+        fireMetaStoreAddPartitionEventTransactional(tbl, partitionSpecProxy, null, true);
         success = ms.commitTransaction();
         return addedPartitions.size();
       } finally {
@@ -2797,14 +2634,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             }
           }
         }
-
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.ADD_PARTITION,
-                                                new AddPartitionEvent(tbl, partitionSpecProxy, true, this),
-                                                null,
-                                                transactionalListenerResponses);
-        }
+        fireMetaStoreAddPartitionEvent(tbl, partitionSpecProxy, null, true);
       }
     }
 
@@ -2856,7 +2686,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         // mkdirs() because if the file system is read-only, mkdirs will
         // throw an exception even if the directory already exists.
         if (!wh.isDir(partLocation)) {
-          if (!wh.mkdirs(partLocation)) {
+          if (!wh.mkdirs(partLocation, true)) {
             throw new MetaException(partLocation
                 + " is not a directory or unable to create one");
           }
@@ -2909,7 +2739,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
       boolean success = false;
       Table tbl = null;
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
         tbl = ms.getTable(part.getDbName(), part.getTableName());
@@ -2934,16 +2763,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         // Setting success to false to make sure that if the listener fails, rollback happens.
         success = false;
-
-        if (!transactionalListeners.isEmpty()) {
-          transactionalListenerResponses =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventType.ADD_PARTITION,
-                                                    new AddPartitionEvent(tbl, Arrays.asList(part), true, this),
-                                                    envContext);
-
-        }
-
+        fireMetaStoreAddPartitionEventTransactional(tbl, Arrays.asList(part), envContext, true);
         // we proceed only if we'd actually succeeded anyway, otherwise,
         // we'd have thrown an exception
         success = ms.commitTransaction();
@@ -2951,19 +2771,64 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         if (!success) {
           ms.rollbackTransaction();
         }
+        fireMetaStoreAddPartitionEvent(tbl, Arrays.asList(part), envContext, success);
+      }
+      return part;
+    }
 
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.ADD_PARTITION,
-                                                new AddPartitionEvent(tbl, Arrays.asList(part), success, this),
-                                                envContext,
-                                                transactionalListenerResponses);
+    private void fireMetaStoreAddPartitionEvent(final Table tbl,
+        final List<Partition> parts, final EnvironmentContext envContext, boolean success)
+          throws MetaException {
+      if (tbl != null && parts != null && !parts.isEmpty()) {
+        AddPartitionEvent addPartitionEvent =
+            new AddPartitionEvent(tbl, parts, success, this);
+        addPartitionEvent.setEnvironmentContext(envContext);
+        for (MetaStoreEventListener listener : listeners) {
+          listener.onAddPartition(addPartitionEvent);
+        }
+      }
+    }
 
+    private void fireMetaStoreAddPartitionEvent(final Table tbl,
+        final PartitionSpecProxy partitionSpec, final EnvironmentContext envContext, boolean success)
+          throws MetaException {
+      if (tbl != null && partitionSpec != null) {
+        AddPartitionEvent addPartitionEvent =
+            new AddPartitionEvent(tbl, partitionSpec, success, this);
+        addPartitionEvent.setEnvironmentContext(envContext);
+        for (MetaStoreEventListener listener : listeners) {
+          listener.onAddPartition(addPartitionEvent);
         }
       }
-      return part;
     }
 
+    private void fireMetaStoreAddPartitionEventTransactional(final Table tbl,
+          final List<Partition> parts, final EnvironmentContext envContext, boolean success)
+            throws MetaException {
+      if (tbl != null && parts != null && !parts.isEmpty()) {
+        AddPartitionEvent addPartitionEvent =
+                new AddPartitionEvent(tbl, parts, success, this);
+        addPartitionEvent.setEnvironmentContext(envContext);
+        for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+          transactionalListener.onAddPartition(addPartitionEvent);
+        }
+      }
+    }
+
+    private void fireMetaStoreAddPartitionEventTransactional(final Table tbl,
+          final PartitionSpecProxy partitionSpec, final EnvironmentContext envContext, boolean success)
+            throws MetaException {
+      if (tbl != null && partitionSpec != null) {
+        AddPartitionEvent addPartitionEvent =
+                new AddPartitionEvent(tbl, partitionSpec, success, this);
+        addPartitionEvent.setEnvironmentContext(envContext);
+        for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+          transactionalListener.onAddPartition(addPartitionEvent);
+        }
+      }
+    }
+
+
     @Override
     public Partition add_partition(final Partition part)
         throws InvalidObjectException, AlreadyExistsException, MetaException {
@@ -3046,11 +2911,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Path destPath = new Path(destinationTable.getSd().getLocation(),
           Warehouse.makePartName(partitionKeysPresent, partValsPresent));
       List<Partition> destPartitions = new ArrayList<Partition>();
-
-      Map<String, String> transactionalListenerResponsesForAddPartition = Collections.emptyMap();
-      List<Map<String, String>> transactionalListenerResponsesForDropPartition =
-          Lists.newArrayListWithCapacity(partitionsToExchange.size());
-
       try {
         for (Partition partition: partitionsToExchange) {
           Partition destPartition = new Partition(partition);
@@ -3066,7 +2926,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
         Path destParentPath = destPath.getParent();
         if (!wh.isDir(destParentPath)) {
-          if (!wh.mkdirs(destParentPath)) {
+          if (!wh.mkdirs(destParentPath, true)) {
               throw new MetaException("Unable to create path " + destParentPath);
           }
         }
@@ -3078,22 +2938,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         // Setting success to false to make sure that if the listener fails, rollback happens.
         success = false;
-
-        if (!transactionalListeners.isEmpty()) {
-          transactionalListenerResponsesForAddPartition =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventType.ADD_PARTITION,
-                                                    new AddPartitionEvent(destinationTable, destPartitions, true, this));
-
-          for (Partition partition : partitionsToExchange) {
-            DropPartitionEvent dropPartitionEvent =
-                new DropPartitionEvent(sourceTable, partition, true, true, this);
-            transactionalListenerResponsesForDropPartition.add(
-                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                      EventType.DROP_PARTITION,
-                                                      dropPartitionEvent));
-          }
-        }
+        fireMetaStoreExchangePartitionEvent(sourceTable, partitionsToExchange,
+            destinationTable, destPartitions, transactionalListeners, true);
 
         success = ms.commitTransaction();
         return destPartitions;
@@ -3103,31 +2949,34 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           if (pathCreated) {
             wh.renameDir(destPath, sourcePath);
           }
+
+          fireMetaStoreExchangePartitionEvent(sourceTable, partitionsToExchange,
+              destinationTable, destPartitions, listeners, success);
         }
+      }
+    }
 
-        if (!listeners.isEmpty()) {
-          AddPartitionEvent addPartitionEvent = new AddPartitionEvent(destinationTable, destPartitions, success, this);
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.ADD_PARTITION,
-                                                addPartitionEvent,
-                                                null,
-                                                transactionalListenerResponsesForAddPartition);
+    private void fireMetaStoreExchangePartitionEvent(Table sourceTable,
+        List<Partition> partitionsToExchange, Table destinationTable,
+        List<Partition> destPartitions,
+        List<MetaStoreEventListener> eventListeners,
+        boolean status) throws MetaException {
+      if (sourceTable != null && destinationTable != null
+          && !CollectionUtils.isEmpty(partitionsToExchange)
+          && !CollectionUtils.isEmpty(destPartitions)) {
+        if (eventListeners.size() > 0) {
+          AddPartitionEvent addPartitionEvent =
+              new AddPartitionEvent(destinationTable, destPartitions, status, this);
+          for (MetaStoreEventListener eventListener : eventListeners) {
+            eventListener.onAddPartition(addPartitionEvent);
+          }
 
-          i = 0;
           for (Partition partition : partitionsToExchange) {
             DropPartitionEvent dropPartitionEvent =
-                new DropPartitionEvent(sourceTable, partition, success, true, this);
-            Map<String, String> parameters =
-                (transactionalListenerResponsesForDropPartition.size() > i)
-                    ? transactionalListenerResponsesForDropPartition.get(i)
-                    : null;
-
-            MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                  EventType.DROP_PARTITION,
-                                                  dropPartitionEvent,
-                                                  null,
-                                                  parameters);
-            i++;
+                new DropPartitionEvent(sourceTable, partition, true, status, this);
+            for (MetaStoreEventListener eventListener : eventListeners) {
+              eventListener.onDropPartition(dropPartitionEvent);
+            }
           }
         }
       }
@@ -3145,7 +2994,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Path archiveParentDir = null;
       boolean mustPurge = false;
       boolean isExternalTbl = false;
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
 
       try {
         ms.openTransaction();
@@ -3164,23 +3012,27 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         if (isArchived) {
           archiveParentDir = MetaStoreUtils.getOriginalLocation(part);
           verifyIsWritablePath(archiveParentDir);
+          checkTrashPurgeCombination(archiveParentDir, db_name + "." + tbl_name + "." + part_vals,
+              mustPurge, deleteData && !isExternalTbl);
         }
 
         if ((part.getSd() != null) && (part.getSd().getLocation() != null)) {
           partPath = new Path(part.getSd().getLocation());
           verifyIsWritablePath(partPath);
+          checkTrashPurgeCombination(partPath, db_name + "." + tbl_name + "." + part_vals,
+              mustPurge, deleteData && !isExternalTbl);
         }
 
         if (!ms.dropPartition(db_name, tbl_name, part_vals)) {
           throw new MetaException("Unable to drop partition");
         } else {
-          if (!transactionalListeners.isEmpty()) {
-
-            transactionalListenerResponses =
-                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                      EventType.DROP_PARTITION,
-                                                      new DropPartitionEvent(tbl, part, true, deleteData, this),
-                                                      envContext);
+          if (transactionalListeners.size() > 0) {
+            DropPartitionEvent dropPartitionEvent =
+                new DropPartitionEvent(tbl, part, true, deleteData, this);
+            dropPartitionEvent.setEnvironmentContext(envContext);
+            for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+              transactionalListener.onDropPartition(dropPartitionEvent);
+            }
           }
           success = ms.commitTransaction();
         }
@@ -3208,12 +3060,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             // ok even if the data is not deleted
           }
         }
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.DROP_PARTITION,
-                                                new DropPartitionEvent(tbl, part, success, deleteData, this),
-                                                envContext,
-                                                transactionalListenerResponses);
+        for (MetaStoreEventListener listener : listeners) {
+          DropPartitionEvent dropPartitionEvent =
+            new DropPartitionEvent(tbl, part, success, deleteData, this);
+          dropPartitionEvent.setEnvironmentContext(envContext);
+          listener.onDropPartition(dropPartitionEvent);
         }
       }
       return true;
@@ -3275,8 +3126,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       List<Partition> parts = null;
       boolean mustPurge = false;
       boolean isExternalTbl = false;
-      List<Map<String, String>> transactionalListenerResponses = Lists.newArrayList();
-
       try {
         // We need Partition-s for firing events and for result; DN needs MPartition-s to drop.
         // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes.
@@ -3346,23 +3195,28 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           if (MetaStoreUtils.isArchived(part)) {
             Path archiveParentDir = MetaStoreUtils.getOriginalLocation(part);
             verifyIsWritablePath(archiveParentDir);
+            checkTrashPurgeCombination(archiveParentDir, dbName + "." + tblName + "." +
+                part.getValues(), mustPurge, deleteData && !isExternalTbl);
             archToDelete.add(archiveParentDir);
           }
           if ((part.getSd() != null) && (part.getSd().getLocation() != null)) {
             Path partPath = new Path(part.getSd().getLocation());
             verifyIsWritablePath(partPath);
+            checkTrashPurgeCombination(partPath, dbName + "." + tblName + "." + part.getValues(),
+                mustPurge, deleteData && !isExternalTbl);
             dirsToDelete.add(new PathAndPartValSize(partPath, part.getValues().size()));
           }
         }
 
         ms.dropPartitions(dbName, tblName, partNames);
-        if (parts != null && !transactionalListeners.isEmpty()) {
+        if (parts != null && transactionalListeners.size() > 0) {
           for (Partition part : parts) {
-            transactionalListenerResponses.add(
-                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                      EventType.DROP_PARTITION,
-                                                      new DropPartitionEvent(tbl, part, true, deleteData, this),
-                                                      envContext));
+            DropPartitionEvent dropPartitionEvent =
+                new DropPartitionEvent(tbl, part, true, deleteData, this);
+            dropPartitionEvent.setEnvironmentContext(envContext);
+            for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+              transactionalListener.onDropPartition(dropPartitionEvent);
+            }
           }
         }
 
@@ -3396,19 +3250,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
         }
         if (parts != null) {
-          int i = 0;
-          if (parts != null && !listeners.isEmpty()) {
-            for (Partition part : parts) {
-              Map<String, String> parameters =
-                  (!transactionalListenerResponses.isEmpty()) ? transactionalListenerResponses.get(i) : null;
-
-              MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                    EventType.DROP_PARTITION,
-                                                    new DropPartitionEvent(tbl, part, success, deleteData, this),
-                                                    envContext,
-                                                    parameters);
-
-              i++;
+          for (Partition part : parts) {
+            for (MetaStoreEventListener listener : listeners) {
+              DropPartitionEvent dropPartitionEvent =
+                new DropPartitionEvent(tbl, part, success, deleteData, this);
+              dropPartitionEvent.setEnvironmentContext(envContext);
+              listener.onDropPartition(dropPartitionEvent);
             }
           }
         }
@@ -3588,8 +3435,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         int partitionRequest = (maxToFetch < 0) ? numPartitions : maxToFetch;
         if (partitionRequest > partitionLimit) {
           String configName = ConfVars.METASTORE_LIMIT_PARTITION_REQUEST.varname;
-          throw new MetaException(String.format(PARTITION_NUMBER_EXCEED_LIMIT_MSG, partitionRequest,
-              tblName, partitionLimit, configName));
+          throw new MetaException(String.format("Number of partitions scanned (=%d) on table '%s' exceeds limit" +
+              " (=%d). This is controlled on the metastore server by %s.", partitionRequest, tblName, partitionLimit, configName));
         }
       }
     }
@@ -3831,15 +3678,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         // Only fetch the table if we actually have a listener
         Table table = null;
-        if (!listeners.isEmpty()) {
+        for (MetaStoreEventListener listener : listeners) {
           if (table == null) {
             table = getMS().getTable(db_name, tbl_name);
           }
-
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.ALTER_PARTITION,
-                                                new AlterPartitionEvent(oldPart, new_part, table, false, true, this),
-                                                envContext);
+          AlterPartitionEvent alterPartitionEvent =
+              new AlterPartitionEvent(oldPart, new_part, table, true, this);
+          alterPartitionEvent.setEnvironmentContext(envContext);
+          listener.onAlterPartition(alterPartitionEvent);
         }
       } catch (InvalidObjectException e) {
         ex = e;
@@ -3903,15 +3749,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           else {
             throw new InvalidOperationException("failed to alterpartitions");
           }
-
-          if (table == null) {
-            table = getMS().getTable(db_name, tbl_name);
-          }
-
-          if (!listeners.isEmpty()) {
-            MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                  EventType.ALTER_PARTITION,
-                                                  new AlterPartitionEvent(oldTmpPart, tmpPart, table, false, true, this));
+          for (MetaStoreEventListener listener : listeners) {
+            if (table == null) {
+              table = getMS().getTable(db_name, tbl_name);
+            }
+            AlterPartitionEvent alterPartitionEvent =
+                new AlterPartitionEvent(oldTmpPart, tmpPart, table, true, this);
+            listener.onAlterPartition(alterPartitionEvent);
           }
         }
       } catch (InvalidObjectException e) {
@@ -3948,17 +3792,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Exception ex = null;
       Index oldIndex = null;
       RawStore ms  = getMS();
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
         oldIndex = get_index_by_name(dbname, base_table_name, index_name);
         firePreEvent(new PreAlterIndexEvent(oldIndex, newIndex, this));
         ms.alterIndex(dbname, base_table_name, index_name, newIndex);
-        if (!transactionalListeners.isEmpty()) {
-          transactionalListenerResponses =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventType.ALTER_INDEX,
-                                                    new AlterIndexEvent(oldIndex, newIndex, true, this));
+        if (transactionalListeners.size() > 0) {
+          AlterIndexEvent alterIndexEvent = new AlterIndexEvent(oldIndex, newIndex, true, this);
+          for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+            transactionalListener.onAlterIndex(alterIndexEvent);
+          }
         }
 
         success = ms.commitTransaction();
@@ -3980,13 +3823,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         endFunction("alter_index", success, ex, base_table_name);
-
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.ALTER_INDEX,
-                                                new AlterIndexEvent(oldIndex, newIndex, success, this),
-                                                null,
-                                                transactionalListenerResponses);
+        for (MetaStoreEventListener listener : listeners) {
+          AlterIndexEvent alterIndexEvent = new AlterIndexEvent(oldIndex, newIndex, success, this);
+          listener.onAlterIndex(alterIndexEvent);
         }
       }
     }
@@ -4054,11 +3893,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         alterHandler.alterTable(getMS(), wh, dbname, name, newTable,
                 envContext, this);
         success = true;
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.ALTER_TABLE,
-                                                new AlterTableEvent(oldt, newTable, false, true, this),
-                                                envContext);
+        for (MetaStoreEventListener listener : listeners) {
+          AlterTableEvent alterTableEvent =
+              new AlterTableEvent(oldt, newTable, success, this);
+          alterTableEvent.setEnvironmentContext(envContext);
+          listener.onAlterTable(alterTableEvent);
         }
       } catch (NoSuchObjectException e) {
         // thrown when the table to be altered does not exist
@@ -4626,7 +4465,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       boolean success = false, indexTableCreated = false;
       String[] qualified =
           MetaStoreUtils.getQualifiedName(index.getDbName(), index.getIndexTableName());
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
         firePreEvent(new PreAddIndexEvent(index, this));
@@ -4664,11 +4502,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         index.setCreateTime((int) time);
         index.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
         if (ms.addIndex(index)) {
-          if (!transactionalListeners.isEmpty()) {
-            transactionalListenerResponses =
-                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                      EventType.CREATE_INDEX,
-                                                      new AddIndexEvent(index, true, this));
+          if (transactionalListeners.size() > 0) {
+            AddIndexEvent addIndexEvent = new AddIndexEvent(index, true, this);
+            for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+              transactionalListener.onAddIndex(addIndexEvent);
+            }
           }
         }
 
@@ -4685,12 +4523,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           ms.rollbackTransaction();
         }
 
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.CREATE_INDEX,
-                                                new AddIndexEvent(index, success, this),
-                                                null,
-                                                transactionalListenerResponses);
+        for (MetaStoreEventListener listener : listeners) {
+          AddIndexEvent addIndexEvent = new AddIndexEvent(index, success, this);
+          listener.onAddIndex(addIndexEvent);
         }
       }
     }
@@ -4728,7 +4563,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Index index = null;
       Path tblPath = null;
       List<Path> partPaths = null;
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
         // drop the underlying index table
@@ -4761,11 +4595,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
         }
 
-        if (!transactionalListeners.isEmpty()) {
-          transactionalListenerResponses =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventType.DROP_INDEX,
-                                                    new DropIndexEvent(index, true, this));
+        if (transactionalListeners.size() > 0) {
+          DropIndexEvent dropIndexEvent = new DropIndexEvent(index, true, this);
+          for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+            transactionalListener.onDropIndex(dropIndexEvent);
+          }
         }
 
         success = ms.commitTransaction();
@@ -4778,12 +4612,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           // ok even if the data is not deleted
         }
         // Skip the event listeners if the index is NULL
-        if (index != null && !listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.DROP_INDEX,
-                                                new DropIndexEvent(index, success, this),
-                                                null,
-                                                transactionalListenerResponses);
+        if (index != null) {
+          for (MetaStoreEventListener listener : listeners) {
+            DropIndexEvent dropIndexEvent = new DropIndexEvent(index, success, this);
+            listener.onDropIndex(dropIndexEvent);
+          }
         }
       }
       return success;
@@ -6219,7 +6052,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       validateFunctionInfo(func);
       boolean success = false;
       RawStore ms = getMS();
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
         Database db = ms.getDatabase(func.getDbName());
@@ -6236,11 +6068,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         long time = System.currentTimeMillis() / 1000;
         func.setCreateTime((int) time);
         ms.createFunction(func);
-        if (!transactionalListeners.isEmpty()) {
-          transactionalListenerResponses =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventType.CREATE_FUNCTION,
-                                                    new CreateFunctionEvent(func, true, this));
+        if (transactionalListeners.size() > 0) {
+          CreateFunctionEvent createFunctionEvent = new CreateFunctionEvent(func, true, this);
+          for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+            transactionalListener.onCreateFunction(createFunctionEvent);
+          }
         }
 
         success = ms.commitTransaction();
@@ -6249,12 +6081,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           ms.rollbackTransaction();
         }
 
-        if (!listeners.isEmpty()) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.CREATE_FUNCTION,
-                                                new CreateFunctionEvent(func, success, this),
-                                                null,
-                                                transactionalListenerResponses);
+        if (listeners.size() > 0) {
+          CreateFunctionEvent createFunctionEvent = new CreateFunctionEvent(func, success, this);
+          for (MetaStoreEventListener listener : listeners) {
+            listener.onCreateFunction(createFunctionEvent);
+          }
         }
       }
     }
@@ -6266,7 +6097,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       boolean success = false;
       Function func = null;
       RawStore ms = getMS();
-      Map<String, String> transactionalListenerResponses = Collections.emptyMap();
       try {
         ms.openTransaction();
         func = ms.getFunction(dbName, funcName);
@@ -6276,10 +6106,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         ms.dropFunction(dbName, funcName);
         if (transactionalListeners.size() > 0) {
-          transactionalListenerResponses =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                                                    EventType.DROP_FUNCTION,
-                                                    new DropFunctionEvent(func, true, this));
+          DropFunctionEvent dropFunctionEvent = new DropFunctionEvent(func, true, this);
+          for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+            transactionalListener.onDropFunction(dropFunctionEvent);
+          }
         }
 
         success = ms.commitTransaction();
@@ -6289,11 +6119,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         if (listeners.size() > 0) {
-          MetaStoreListenerNotifier.notifyEvent(listeners,
-                                                EventType.DROP_FUNCTION,
-                                                new DropFunctionEvent(func, success, this),
-                                                null,
-                                                transactionalListenerResponses);
+          DropFunctionEvent dropFunctionEvent = new DropFunctionEvent(func, success, this);
+          for (MetaStoreEventListener listener : listeners) {
+            listener.onDropFunction(dropFunctionEvent);
+          }
         }
       }
     }
@@ -6645,13 +6474,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         InsertEvent event =
             new InsertEvent(rqst.getDbName(), rqst.getTableName(), rqst.getPartitionVals(), rqst
                 .getData().getInsertData(), rqst.isSuccessful(), this);
+        for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+          transactionalListener.onInsert(event);
+        }
 
-        /*
-         * The transactional listener response will be set already on the event, so there is not need
-         * to pass the response to the non-transactional listener.
-         */
-        MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.INSERT, event);
-        MetaStoreListenerNotifier.notifyEvent(listeners, EventType.INSERT, event);
+        for (MetaStoreEventListener listener : listeners) {
+          listener.onInsert(event);
+        }
 
         return new FireEventResponse();
 
@@ -7405,9 +7234,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             ServerMode.METASTORE);
         saslServer.setSecretManager(delegationTokenManager.getSecretManager());
         transFactory = saslServer.createTransportFactory(
-                MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+                MetaStoreUtils.getMetaStoreSaslProperties(conf));
         processor = saslServer.wrapProcessor(
           new ThriftHiveMetastore.Processor<IHMSHandler>(handler));
+        serverSocket = HiveAuthUtils.getServerSocket(null, port);
 
         LOG.info("Starting DB backed MetaStore Server in Secure Mode");
       } else {
@@ -7426,27 +7256,25 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           processor = new TSetIpAddressProcessor<IHMSHandler>(handler);
           LOG.info("Starting DB backed MetaStore Server");
         }
-      }
-
-      if (!useSSL) {
-        serverSocket = HiveAuthUtils.getServerSocket(null, port);
-      } else {
-        String keyStorePath = conf.getVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH).trim();
-        if (keyStorePath.isEmpty()) {
-          throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH.varname
-              + " Not configured for SSL connection");
-        }
-        String keyStorePassword = ShimLoader.getHadoopShims().getPassword(conf,
-            HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname);
 
         // enable SSL support for HMS
         List<String> sslVersionBlacklist = new ArrayList<String>();
         for (String sslVersion : conf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",")) {
           sslVersionBlacklist.add(sslVersion);
         }
-
-        serverSocket = HiveAuthUtils.getServerSSLSocket(null, port, keyStorePath,
-            keyStorePassword, sslVersionBlacklist);
+        if (!useSSL) {
+          serverSocket = HiveAuthUtils.getServerSocket(null, port);
+        } else {
+          String keyStorePath = conf.getVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH).trim();
+          if (keyStorePath.isEmpty()) {
+            throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname
+                + " Not configured for SSL connection");
+          }
+          String keyStorePassword = ShimLoader.getHadoopShims().getPassword(conf,
+              HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname);
+          serverSocket = HiveAuthUtils.getServerSSLSocket(null, port, keyStorePath,
+              keyStorePassword, sslVersionBlacklist);
+        }
       }
 
       if (tcpKeepAlive) {
@@ -7508,7 +7336,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       HMSHandler.LOG.info("Options.maxWorkerThreads = "
           + maxWorkerThreads);
       HMSHandler.LOG.info("TCP keepalive = " + tcpKeepAlive);
-      HMSHandler.LOG.info("Enable SSL = " + useSSL);
 
       if (startLock != null) {
         signalOtherThreadsToStart(tServer, startLock, startCondition, startedServing);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 4912a31..b0b009a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -395,29 +395,6 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
         LOG.info("Trying to connect to metastore with URI " + store);
 
         try {
-          if (useSSL) {
-            try {
-              String trustStorePath = conf.getVar(ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH).trim();
-              if (trustStorePath.isEmpty()) {
-                throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH.varname
-                    + " Not configured for SSL connection");
-              }
-              String trustStorePassword = ShimLoader.getHadoopShims().getPassword(conf,
-                  HiveConf.ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD.varname);
-
-              // Create an SSL socket and connect
-              transport = HiveAuthUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout, trustStorePath, trustStorePassword );
-              LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet());
-            } catch(IOException e) {
-              throw new IllegalArgumentException(e);
-            } catch(TTransportException e) {
-              tte = e;
-              throw new MetaException(e.toString());
-            }
-          } else {
-            transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
-          }
-
           if (useSasl) {
             // Wrap thrift connection with SASL for secure connection.
             try {
@@ -432,24 +409,48 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
               String tokenSig = conf.getVar(ConfVars.METASTORE_TOKEN_SIGNATURE);
               // tokenSig could be null
               tokenStrForm = Utils.getTokenStrForm(tokenSig);
+              transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
 
               if(tokenStrForm != null) {
                 // authenticate using delegation tokens via the "DIGEST" mechanism
                 transport = authBridge.createClientTransport(null, store.getHost(),
                     "DIGEST", tokenStrForm, transport,
-                        MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+                        MetaStoreUtils.getMetaStoreSaslProperties(conf));
               } else {
                 String principalConfig =
                     conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL);
                 transport = authBridge.createClientTransport(
                     principalConfig, store.getHost(), "KERBEROS", null,
-                    transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+                    transport, MetaStoreUtils.getMetaStoreSaslProperties(conf));
               }
             } catch (IOException ioe) {
               LOG.error("Couldn't create client transport", ioe);
               throw new MetaException(ioe.toString());
             }
           } else {
+            if (useSSL) {
+              try {
+                String trustStorePath = conf.getVar(ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH).trim();
+                if (trustStorePath.isEmpty()) {
+                  throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH.varname
+                      + " Not configured for SSL connection");
+                }
+                String trustStorePassword = ShimLoader.getHadoopShims().getPassword(conf,
+                    HiveConf.ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD.varname);
+
+                // Create an SSL socket and connect
+                transport = HiveAuthUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout, trustStorePath, trustStorePassword );
+                LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet());
+              } catch(IOException e) {
+                throw new IllegalArgumentException(e);
+              } catch(TTransportException e) {
+                tte = e;
+                throw new MetaException(e.toString());
+              }
+            } else {
+              transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
+            }
+
             if (useFramedTransport) {
               transport = new TFramedTransport(transport);
             }
@@ -1096,23 +1097,6 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
   }
 
   /**
-   * Truncate the table/partitions in the DEFAULT database.
-   * @param dbName
-   *          The db to which the table to be truncate belongs to
-   * @param tableName
-   *          The table to truncate
-   * @param partNames
-   *          List of partitions to truncate. NULL will truncate the whole table/all partitions
-   * @throws MetaException
-   * @throws TException
-   *           Could not truncate table properly.
-   */
-  @Override
-  public void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException {
-    client.truncate_table(dbName, tableName, partNames);
-  }
-
-  /**
    * @param type
    * @return true if the type is dropped
    * @throws MetaException

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java
index b7d7b50..df698c8 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java
@@ -25,23 +25,35 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 
 public class HiveMetaStoreFsImpl implements MetaStoreFS {
 
   public static final Logger LOG = LoggerFactory
-      .getLogger("hive.metastore.hivemetastoreFsimpl");
+      .getLogger("hive.metastore.hivemetastoressimpl");
 
   @Override
   public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
       boolean ifPurge, Configuration conf) throws MetaException {
+    LOG.debug("deleting  " + f);
+
     try {
-      FileUtils.moveToTrash(fs, f, conf, ifPurge);
+      if (ifPurge) {
+        LOG.info("Not moving "+ f +" to trash");
+      } else if (Trash.moveToAppropriateTrash(fs, f, conf)) {
+        LOG.info("Moved to trash: " + f);
+        return true;
+      }
+
+      if (fs.delete(f, true)) {
+        LOG.debug("Deleted the diretory " + f);
+        return true;
+      }
+
       if (fs.exists(f)) {
         throw new MetaException("Unable to delete directory: " + f);
       }
-      return true;
     } catch (FileNotFoundException e) {
       return true; // ok even if there is not data
     } catch (Exception e) {
@@ -49,4 +61,5 @@ public class HiveMetaStoreFsImpl implements MetaStoreFS {
     }
     return false;
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 82db281..d567258 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -305,20 +305,6 @@ public interface IMetaStoreClient {
   void dropTable(String dbname, String tableName)
       throws MetaException, TException, NoSuchObjectException;
 
-  /**
-   * Truncate the table/partitions in the DEFAULT database.
-   * @param dbName
-   *          The db to which the table to be truncate belongs to
-   * @param tableName
-   *          The table to truncate
-   * @param partNames
-   *          List of partitions to truncate. NULL will truncate the whole table/all partitions
-   * @throws MetaException
-   * @throws TException
-   *           Could not truncate table properly.
-   */
-  void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException;
-
   boolean tableExists(String databaseName, String tableName) throws MetaException,
       TException, UnknownDBException;
 


[19/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index cb1bd59..6b80461 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -90,8 +90,6 @@ public class ThriftHiveMetastore {
 
     public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
 
-    public void truncate_table(String dbName, String tableName, List<String> partNames) throws MetaException, org.apache.thrift.TException;
-
     public List<String> get_tables(String db_name, String pattern) throws MetaException, org.apache.thrift.TException;
 
     public List<String> get_tables_by_type(String db_name, String pattern, String tableType) throws MetaException, org.apache.thrift.TException;
@@ -410,8 +408,6 @@ public class ThriftHiveMetastore {
 
     public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
-    public void truncate_table(String dbName, String tableName, List<String> partNames, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
-
     public void get_tables(String db_name, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void get_tables_by_type(String db_name, String pattern, String tableType, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -1401,31 +1397,6 @@ public class ThriftHiveMetastore {
       return;
     }
 
-    public void truncate_table(String dbName, String tableName, List<String> partNames) throws MetaException, org.apache.thrift.TException
-    {
-      send_truncate_table(dbName, tableName, partNames);
-      recv_truncate_table();
-    }
-
-    public void send_truncate_table(String dbName, String tableName, List<String> partNames) throws org.apache.thrift.TException
-    {
-      truncate_table_args args = new truncate_table_args();
-      args.setDbName(dbName);
-      args.setTableName(tableName);
-      args.setPartNames(partNames);
-      sendBase("truncate_table", args);
-    }
-
-    public void recv_truncate_table() throws MetaException, org.apache.thrift.TException
-    {
-      truncate_table_result result = new truncate_table_result();
-      receiveBase(result, "truncate_table");
-      if (result.o1 != null) {
-        throw result.o1;
-      }
-      return;
-    }
-
     public List<String> get_tables(String db_name, String pattern) throws MetaException, org.apache.thrift.TException
     {
       send_get_tables(db_name, pattern);
@@ -6056,44 +6027,6 @@ public class ThriftHiveMetastore {
       }
     }
 
-    public void truncate_table(String dbName, String tableName, List<String> partNames, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
-      checkReady();
-      truncate_table_call method_call = new truncate_table_call(dbName, tableName, partNames, resultHandler, this, ___protocolFactory, ___transport);
-      this.___currentMethod = method_call;
-      ___manager.call(method_call);
-    }
-
-    public static class truncate_table_call extends org.apache.thrift.async.TAsyncMethodCall {
-      private String dbName;
-      private String tableName;
-      private List<String> partNames;
-      public truncate_table_call(String dbName, String tableName, List<String> partNames, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
-        super(client, protocolFactory, transport, resultHandler, false);
-        this.dbName = dbName;
-        this.tableName = tableName;
-        this.partNames = partNames;
-      }
-
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
-        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("truncate_table", org.apache.thrift.protocol.TMessageType.CALL, 0));
-        truncate_table_args args = new truncate_table_args();
-        args.setDbName(dbName);
-        args.setTableName(tableName);
-        args.setPartNames(partNames);
-        args.write(prot);
-        prot.writeMessageEnd();
-      }
-
-      public void getResult() throws MetaException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
-          throw new IllegalStateException("Method call not finished!");
-        }
-        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
-        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
-        (new Client(prot)).recv_truncate_table();
-      }
-    }
-
     public void get_tables(String db_name, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       get_tables_call method_call = new get_tables_call(db_name, pattern, resultHandler, this, ___protocolFactory, ___transport);
@@ -10825,7 +10758,6 @@ public class ThriftHiveMetastore {
       processMap.put("add_foreign_key", new add_foreign_key());
       processMap.put("drop_table", new drop_table());
       processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
-      processMap.put("truncate_table", new truncate_table());
       processMap.put("get_tables", new get_tables());
       processMap.put("get_tables_by_type", new get_tables_by_type());
       processMap.put("get_table_meta", new get_table_meta());
@@ -11604,30 +11536,6 @@ public class ThriftHiveMetastore {
       }
     }
 
-    public static class truncate_table<I extends Iface> extends org.apache.thrift.ProcessFunction<I, truncate_table_args> {
-      public truncate_table() {
-        super("truncate_table");
-      }
-
-      public truncate_table_args getEmptyArgsInstance() {
-        return new truncate_table_args();
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public truncate_table_result getResult(I iface, truncate_table_args args) throws org.apache.thrift.TException {
-        truncate_table_result result = new truncate_table_result();
-        try {
-          iface.truncate_table(args.dbName, args.tableName, args.partNames);
-        } catch (MetaException o1) {
-          result.o1 = o1;
-        }
-        return result;
-      }
-    }
-
     public static class get_tables<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_tables_args> {
       public get_tables() {
         super("get_tables");
@@ -14999,7 +14907,6 @@ public class ThriftHiveMetastore {
       processMap.put("add_foreign_key", new add_foreign_key());
       processMap.put("drop_table", new drop_table());
       processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
-      processMap.put("truncate_table", new truncate_table());
       processMap.put("get_tables", new get_tables());
       processMap.put("get_tables_by_type", new get_tables_by_type());
       processMap.put("get_table_meta", new get_table_meta());
@@ -16654,62 +16561,6 @@ public class ThriftHiveMetastore {
       }
     }
 
-    public static class truncate_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, truncate_table_args, Void> {
-      public truncate_table() {
-        super("truncate_table");
-      }
-
-      public truncate_table_args getEmptyArgsInstance() {
-        return new truncate_table_args();
-      }
-
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            truncate_table_result result = new truncate_table_result();
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            truncate_table_result result = new truncate_table_result();
-            if (e instanceof MetaException) {
-                        result.o1 = (MetaException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, truncate_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.truncate_table(args.dbName, args.tableName, args.partNames,resultHandler);
-      }
-    }
-
     public static class get_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_tables_args, List<String>> {
       public get_tables() {
         super("get_tables");
@@ -48726,1015 +48577,7 @@ public class ThriftHiveMetastore {
 
     @Override
     public String toString() {
-      StringBuilder sb = new StringBuilder("drop_table_with_environment_context_result(");
-      boolean first = true;
-
-      sb.append("o1:");
-      if (this.o1 == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.o1);
-      }
-      first = false;
-      if (!first) sb.append(", ");
-      sb.append("o3:");
-      if (this.o3 == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.o3);
-      }
-      first = false;
-      sb.append(")");
-      return sb.toString();
-    }
-
-    public void validate() throws org.apache.thrift.TException {
-      // check for required fields
-      // check for sub-struct validity
-    }
-
-    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-      try {
-        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-      } catch (org.apache.thrift.TException te) {
-        throw new java.io.IOException(te);
-      }
-    }
-
-    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-      try {
-        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-      } catch (org.apache.thrift.TException te) {
-        throw new java.io.IOException(te);
-      }
-    }
-
-    private static class drop_table_with_environment_context_resultStandardSchemeFactory implements SchemeFactory {
-      public drop_table_with_environment_context_resultStandardScheme getScheme() {
-        return new drop_table_with_environment_context_resultStandardScheme();
-      }
-    }
-
-    private static class drop_table_with_environment_context_resultStandardScheme extends StandardScheme<drop_table_with_environment_context_result> {
-
-      public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
-        org.apache.thrift.protocol.TField schemeField;
-        iprot.readStructBegin();
-        while (true)
-        {
-          schemeField = iprot.readFieldBegin();
-          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-            break;
-          }
-          switch (schemeField.id) {
-            case 1: // O1
-              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-                struct.o1 = new NoSuchObjectException();
-                struct.o1.read(iprot);
-                struct.setO1IsSet(true);
-              } else { 
-                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-              }
-              break;
-            case 2: // O3
-              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-                struct.o3 = new MetaException();
-                struct.o3.read(iprot);
-                struct.setO3IsSet(true);
-              } else { 
-                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-              }
-              break;
-            default:
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-          }
-          iprot.readFieldEnd();
-        }
-        iprot.readStructEnd();
-        struct.validate();
-      }
-
-      public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
-        struct.validate();
-
-        oprot.writeStructBegin(STRUCT_DESC);
-        if (struct.o1 != null) {
-          oprot.writeFieldBegin(O1_FIELD_DESC);
-          struct.o1.write(oprot);
-          oprot.writeFieldEnd();
-        }
-        if (struct.o3 != null) {
-          oprot.writeFieldBegin(O3_FIELD_DESC);
-          struct.o3.write(oprot);
-          oprot.writeFieldEnd();
-        }
-        oprot.writeFieldStop();
-        oprot.writeStructEnd();
-      }
-
-    }
-
-    private static class drop_table_with_environment_context_resultTupleSchemeFactory implements SchemeFactory {
-      public drop_table_with_environment_context_resultTupleScheme getScheme() {
-        return new drop_table_with_environment_context_resultTupleScheme();
-      }
-    }
-
-    private static class drop_table_with_environment_context_resultTupleScheme extends TupleScheme<drop_table_with_environment_context_result> {
-
-      @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
-        TTupleProtocol oprot = (TTupleProtocol) prot;
-        BitSet optionals = new BitSet();
-        if (struct.isSetO1()) {
-          optionals.set(0);
-        }
-        if (struct.isSetO3()) {
-          optionals.set(1);
-        }
-        oprot.writeBitSet(optionals, 2);
-        if (struct.isSetO1()) {
-          struct.o1.write(oprot);
-        }
-        if (struct.isSetO3()) {
-          struct.o3.write(oprot);
-        }
-      }
-
-      @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
-        TTupleProtocol iprot = (TTupleProtocol) prot;
-        BitSet incoming = iprot.readBitSet(2);
-        if (incoming.get(0)) {
-          struct.o1 = new NoSuchObjectException();
-          struct.o1.read(iprot);
-          struct.setO1IsSet(true);
-        }
-        if (incoming.get(1)) {
-          struct.o3 = new MetaException();
-          struct.o3.read(iprot);
-          struct.setO3IsSet(true);
-        }
-      }
-    }
-
-  }
-
-  public static class truncate_table_args implements org.apache.thrift.TBase<truncate_table_args, truncate_table_args._Fields>, java.io.Serializable, Cloneable, Comparable<truncate_table_args>   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncate_table_args");
-
-    private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
-    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
-    private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)3);
-
-    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-    static {
-      schemes.put(StandardScheme.class, new truncate_table_argsStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new truncate_table_argsTupleSchemeFactory());
-    }
-
-    private String dbName; // required
-    private String tableName; // required
-    private List<String> partNames; // required
-
-    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-      DB_NAME((short)1, "dbName"),
-      TABLE_NAME((short)2, "tableName"),
-      PART_NAMES((short)3, "partNames");
-
-      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-      static {
-        for (_Fields field : EnumSet.allOf(_Fields.class)) {
-          byName.put(field.getFieldName(), field);
-        }
-      }
-
-      /**
-       * Find the _Fields constant that matches fieldId, or null if its not found.
-       */
-      public static _Fields findByThriftId(int fieldId) {
-        switch(fieldId) {
-          case 1: // DB_NAME
-            return DB_NAME;
-          case 2: // TABLE_NAME
-            return TABLE_NAME;
-          case 3: // PART_NAMES
-            return PART_NAMES;
-          default:
-            return null;
-        }
-      }
-
-      /**
-       * Find the _Fields constant that matches fieldId, throwing an exception
-       * if it is not found.
-       */
-      public static _Fields findByThriftIdOrThrow(int fieldId) {
-        _Fields fields = findByThriftId(fieldId);
-        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-        return fields;
-      }
-
-      /**
-       * Find the _Fields constant that matches name, or null if its not found.
-       */
-      public static _Fields findByName(String name) {
-        return byName.get(name);
-      }
-
-      private final short _thriftId;
-      private final String _fieldName;
-
-      _Fields(short thriftId, String fieldName) {
-        _thriftId = thriftId;
-        _fieldName = fieldName;
-      }
-
-      public short getThriftFieldId() {
-        return _thriftId;
-      }
-
-      public String getFieldName() {
-        return _fieldName;
-      }
-    }
-
-    // isset id assignments
-    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-    static {
-      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-      tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-      tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-      metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncate_table_args.class, metaDataMap);
-    }
-
-    public truncate_table_args() {
-    }
-
-    public truncate_table_args(
-      String dbName,
-      String tableName,
-      List<String> partNames)
-    {
-      this();
-      this.dbName = dbName;
-      this.tableName = tableName;
-      this.partNames = partNames;
-    }
-
-    /**
-     * Performs a deep copy on <i>other</i>.
-     */
-    public truncate_table_args(truncate_table_args other) {
-      if (other.isSetDbName()) {
-        this.dbName = other.dbName;
-      }
-      if (other.isSetTableName()) {
-        this.tableName = other.tableName;
-      }
-      if (other.isSetPartNames()) {
-        List<String> __this__partNames = new ArrayList<String>(other.partNames);
-        this.partNames = __this__partNames;
-      }
-    }
-
-    public truncate_table_args deepCopy() {
-      return new truncate_table_args(this);
-    }
-
-    @Override
-    public void clear() {
-      this.dbName = null;
-      this.tableName = null;
-      this.partNames = null;
-    }
-
-    public String getDbName() {
-      return this.dbName;
-    }
-
-    public void setDbName(String dbName) {
-      this.dbName = dbName;
-    }
-
-    public void unsetDbName() {
-      this.dbName = null;
-    }
-
-    /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
-    public boolean isSetDbName() {
-      return this.dbName != null;
-    }
-
-    public void setDbNameIsSet(boolean value) {
-      if (!value) {
-        this.dbName = null;
-      }
-    }
-
-    public String getTableName() {
-      return this.tableName;
-    }
-
-    public void setTableName(String tableName) {
-      this.tableName = tableName;
-    }
-
-    public void unsetTableName() {
-      this.tableName = null;
-    }
-
-    /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
-    public boolean isSetTableName() {
-      return this.tableName != null;
-    }
-
-    public void setTableNameIsSet(boolean value) {
-      if (!value) {
-        this.tableName = null;
-      }
-    }
-
-    public int getPartNamesSize() {
-      return (this.partNames == null) ? 0 : this.partNames.size();
-    }
-
-    public java.util.Iterator<String> getPartNamesIterator() {
-      return (this.partNames == null) ? null : this.partNames.iterator();
-    }
-
-    public void addToPartNames(String elem) {
-      if (this.partNames == null) {
-        this.partNames = new ArrayList<String>();
-      }
-      this.partNames.add(elem);
-    }
-
-    public List<String> getPartNames() {
-      return this.partNames;
-    }
-
-    public void setPartNames(List<String> partNames) {
-      this.partNames = partNames;
-    }
-
-    public void unsetPartNames() {
-      this.partNames = null;
-    }
-
-    /** Returns true if field partNames is set (has been assigned a value) and false otherwise */
-    public boolean isSetPartNames() {
-      return this.partNames != null;
-    }
-
-    public void setPartNamesIsSet(boolean value) {
-      if (!value) {
-        this.partNames = null;
-      }
-    }
-
-    public void setFieldValue(_Fields field, Object value) {
-      switch (field) {
-      case DB_NAME:
-        if (value == null) {
-          unsetDbName();
-        } else {
-          setDbName((String)value);
-        }
-        break;
-
-      case TABLE_NAME:
-        if (value == null) {
-          unsetTableName();
-        } else {
-          setTableName((String)value);
-        }
-        break;
-
-      case PART_NAMES:
-        if (value == null) {
-          unsetPartNames();
-        } else {
-          setPartNames((List<String>)value);
-        }
-        break;
-
-      }
-    }
-
-    public Object getFieldValue(_Fields field) {
-      switch (field) {
-      case DB_NAME:
-        return getDbName();
-
-      case TABLE_NAME:
-        return getTableName();
-
-      case PART_NAMES:
-        return getPartNames();
-
-      }
-      throw new IllegalStateException();
-    }
-
-    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-    public boolean isSet(_Fields field) {
-      if (field == null) {
-        throw new IllegalArgumentException();
-      }
-
-      switch (field) {
-      case DB_NAME:
-        return isSetDbName();
-      case TABLE_NAME:
-        return isSetTableName();
-      case PART_NAMES:
-        return isSetPartNames();
-      }
-      throw new IllegalStateException();
-    }
-
-    @Override
-    public boolean equals(Object that) {
-      if (that == null)
-        return false;
-      if (that instanceof truncate_table_args)
-        return this.equals((truncate_table_args)that);
-      return false;
-    }
-
-    public boolean equals(truncate_table_args that) {
-      if (that == null)
-        return false;
-
-      boolean this_present_dbName = true && this.isSetDbName();
-      boolean that_present_dbName = true && that.isSetDbName();
-      if (this_present_dbName || that_present_dbName) {
-        if (!(this_present_dbName && that_present_dbName))
-          return false;
-        if (!this.dbName.equals(that.dbName))
-          return false;
-      }
-
-      boolean this_present_tableName = true && this.isSetTableName();
-      boolean that_present_tableName = true && that.isSetTableName();
-      if (this_present_tableName || that_present_tableName) {
-        if (!(this_present_tableName && that_present_tableName))
-          return false;
-        if (!this.tableName.equals(that.tableName))
-          return false;
-      }
-
-      boolean this_present_partNames = true && this.isSetPartNames();
-      boolean that_present_partNames = true && that.isSetPartNames();
-      if (this_present_partNames || that_present_partNames) {
-        if (!(this_present_partNames && that_present_partNames))
-          return false;
-        if (!this.partNames.equals(that.partNames))
-          return false;
-      }
-
-      return true;
-    }
-
-    @Override
-    public int hashCode() {
-      List<Object> list = new ArrayList<Object>();
-
-      boolean present_dbName = true && (isSetDbName());
-      list.add(present_dbName);
-      if (present_dbName)
-        list.add(dbName);
-
-      boolean present_tableName = true && (isSetTableName());
-      list.add(present_tableName);
-      if (present_tableName)
-        list.add(tableName);
-
-      boolean present_partNames = true && (isSetPartNames());
-      list.add(present_partNames);
-      if (present_partNames)
-        list.add(partNames);
-
-      return list.hashCode();
-    }
-
-    @Override
-    public int compareTo(truncate_table_args other) {
-      if (!getClass().equals(other.getClass())) {
-        return getClass().getName().compareTo(other.getClass().getName());
-      }
-
-      int lastComparison = 0;
-
-      lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-      if (isSetDbName()) {
-        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
-        if (lastComparison != 0) {
-          return lastComparison;
-        }
-      }
-      lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-      if (isSetTableName()) {
-        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
-        if (lastComparison != 0) {
-          return lastComparison;
-        }
-      }
-      lastComparison = Boolean.valueOf(isSetPartNames()).compareTo(other.isSetPartNames());
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-      if (isSetPartNames()) {
-        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partNames, other.partNames);
-        if (lastComparison != 0) {
-          return lastComparison;
-        }
-      }
-      return 0;
-    }
-
-    public _Fields fieldForId(int fieldId) {
-      return _Fields.findByThriftId(fieldId);
-    }
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-    }
-
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder("truncate_table_args(");
-      boolean first = true;
-
-      sb.append("dbName:");
-      if (this.dbName == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.dbName);
-      }
-      first = false;
-      if (!first) sb.append(", ");
-      sb.append("tableName:");
-      if (this.tableName == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.tableName);
-      }
-      first = false;
-      if (!first) sb.append(", ");
-      sb.append("partNames:");
-      if (this.partNames == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.partNames);
-      }
-      first = false;
-      sb.append(")");
-      return sb.toString();
-    }
-
-    public void validate() throws org.apache.thrift.TException {
-      // check for required fields
-      // check for sub-struct validity
-    }
-
-    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-      try {
-        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-      } catch (org.apache.thrift.TException te) {
-        throw new java.io.IOException(te);
-      }
-    }
-
-    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-      try {
-        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-      } catch (org.apache.thrift.TException te) {
-        throw new java.io.IOException(te);
-      }
-    }
-
-    private static class truncate_table_argsStandardSchemeFactory implements SchemeFactory {
-      public truncate_table_argsStandardScheme getScheme() {
-        return new truncate_table_argsStandardScheme();
-      }
-    }
-
-    private static class truncate_table_argsStandardScheme extends StandardScheme<truncate_table_args> {
-
-      public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args struct) throws org.apache.thrift.TException {
-        org.apache.thrift.protocol.TField schemeField;
-        iprot.readStructBegin();
-        while (true)
-        {
-          schemeField = iprot.readFieldBegin();
-          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-            break;
-          }
-          switch (schemeField.id) {
-            case 1: // DB_NAME
-              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-                struct.dbName = iprot.readString();
-                struct.setDbNameIsSet(true);
-              } else { 
-                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-              }
-              break;
-            case 2: // TABLE_NAME
-              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-                struct.tableName = iprot.readString();
-                struct.setTableNameIsSet(true);
-              } else { 
-                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-              }
-              break;
-            case 3: // PART_NAMES
-              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
-                {
-                  org.apache.thrift.protocol.TList _list756 = iprot.readListBegin();
-                  struct.partNames = new ArrayList<String>(_list756.size);
-                  String _elem757;
-                  for (int _i758 = 0; _i758 < _list756.size; ++_i758)
-                  {
-                    _elem757 = iprot.readString();
-                    struct.partNames.add(_elem757);
-                  }
-                  iprot.readListEnd();
-                }
-                struct.setPartNamesIsSet(true);
-              } else { 
-                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-              }
-              break;
-            default:
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-          }
-          iprot.readFieldEnd();
-        }
-        iprot.readStructEnd();
-        struct.validate();
-      }
-
-      public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_args struct) throws org.apache.thrift.TException {
-        struct.validate();
-
-        oprot.writeStructBegin(STRUCT_DESC);
-        if (struct.dbName != null) {
-          oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
-          oprot.writeString(struct.dbName);
-          oprot.writeFieldEnd();
-        }
-        if (struct.tableName != null) {
-          oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
-          oprot.writeString(struct.tableName);
-          oprot.writeFieldEnd();
-        }
-        if (struct.partNames != null) {
-          oprot.writeFieldBegin(PART_NAMES_FIELD_DESC);
-          {
-            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size()));
-            for (String _iter759 : struct.partNames)
-            {
-              oprot.writeString(_iter759);
-            }
-            oprot.writeListEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-        oprot.writeFieldStop();
-        oprot.writeStructEnd();
-      }
-
-    }
-
-    private static class truncate_table_argsTupleSchemeFactory implements SchemeFactory {
-      public truncate_table_argsTupleScheme getScheme() {
-        return new truncate_table_argsTupleScheme();
-      }
-    }
-
-    private static class truncate_table_argsTupleScheme extends TupleScheme<truncate_table_args> {
-
-      @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args struct) throws org.apache.thrift.TException {
-        TTupleProtocol oprot = (TTupleProtocol) prot;
-        BitSet optionals = new BitSet();
-        if (struct.isSetDbName()) {
-          optionals.set(0);
-        }
-        if (struct.isSetTableName()) {
-          optionals.set(1);
-        }
-        if (struct.isSetPartNames()) {
-          optionals.set(2);
-        }
-        oprot.writeBitSet(optionals, 3);
-        if (struct.isSetDbName()) {
-          oprot.writeString(struct.dbName);
-        }
-        if (struct.isSetTableName()) {
-          oprot.writeString(struct.tableName);
-        }
-        if (struct.isSetPartNames()) {
-          {
-            oprot.writeI32(struct.partNames.size());
-            for (String _iter760 : struct.partNames)
-            {
-              oprot.writeString(_iter760);
-            }
-          }
-        }
-      }
-
-      @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args struct) throws org.apache.thrift.TException {
-        TTupleProtocol iprot = (TTupleProtocol) prot;
-        BitSet incoming = iprot.readBitSet(3);
-        if (incoming.get(0)) {
-          struct.dbName = iprot.readString();
-          struct.setDbNameIsSet(true);
-        }
-        if (incoming.get(1)) {
-          struct.tableName = iprot.readString();
-          struct.setTableNameIsSet(true);
-        }
-        if (incoming.get(2)) {
-          {
-            org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.partNames = new ArrayList<String>(_list761.size);
-            String _elem762;
-            for (int _i763 = 0; _i763 < _list761.size; ++_i763)
-            {
-              _elem762 = iprot.readString();
-              struct.partNames.add(_elem762);
-            }
-          }
-          struct.setPartNamesIsSet(true);
-        }
-      }
-    }
-
-  }
-
-  public static class truncate_table_result implements org.apache.thrift.TBase<truncate_table_result, truncate_table_result._Fields>, java.io.Serializable, Cloneable, Comparable<truncate_table_result>   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncate_table_result");
-
-    private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
-
-    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-    static {
-      schemes.put(StandardScheme.class, new truncate_table_resultStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new truncate_table_resultTupleSchemeFactory());
-    }
-
-    private MetaException o1; // required
-
-    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-      O1((short)1, "o1");
-
-      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-      static {
-        for (_Fields field : EnumSet.allOf(_Fields.class)) {
-          byName.put(field.getFieldName(), field);
-        }
-      }
-
-      /**
-       * Find the _Fields constant that matches fieldId, or null if its not found.
-       */
-      public static _Fields findByThriftId(int fieldId) {
-        switch(fieldId) {
-          case 1: // O1
-            return O1;
-          default:
-            return null;
-        }
-      }
-
-      /**
-       * Find the _Fields constant that matches fieldId, throwing an exception
-       * if it is not found.
-       */
-      public static _Fields findByThriftIdOrThrow(int fieldId) {
-        _Fields fields = findByThriftId(fieldId);
-        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-        return fields;
-      }
-
-      /**
-       * Find the _Fields constant that matches name, or null if its not found.
-       */
-      public static _Fields findByName(String name) {
-        return byName.get(name);
-      }
-
-      private final short _thriftId;
-      private final String _fieldName;
-
-      _Fields(short thriftId, String fieldName) {
-        _thriftId = thriftId;
-        _fieldName = fieldName;
-      }
-
-      public short getThriftFieldId() {
-        return _thriftId;
-      }
-
-      public String getFieldName() {
-        return _fieldName;
-      }
-    }
-
-    // isset id assignments
-    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-    static {
-      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-      tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
-      metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncate_table_result.class, metaDataMap);
-    }
-
-    public truncate_table_result() {
-    }
-
-    public truncate_table_result(
-      MetaException o1)
-    {
-      this();
-      this.o1 = o1;
-    }
-
-    /**
-     * Performs a deep copy on <i>other</i>.
-     */
-    public truncate_table_result(truncate_table_result other) {
-      if (other.isSetO1()) {
-        this.o1 = new MetaException(other.o1);
-      }
-    }
-
-    public truncate_table_result deepCopy() {
-      return new truncate_table_result(this);
-    }
-
-    @Override
-    public void clear() {
-      this.o1 = null;
-    }
-
-    public MetaException getO1() {
-      return this.o1;
-    }
-
-    public void setO1(MetaException o1) {
-      this.o1 = o1;
-    }
-
-    public void unsetO1() {
-      this.o1 = null;
-    }
-
-    /** Returns true if field o1 is set (has been assigned a value) and false otherwise */
-    public boolean isSetO1() {
-      return this.o1 != null;
-    }
-
-    public void setO1IsSet(boolean value) {
-      if (!value) {
-        this.o1 = null;
-      }
-    }
-
-    public void setFieldValue(_Fields field, Object value) {
-      switch (field) {
-      case O1:
-        if (value == null) {
-          unsetO1();
-        } else {
-          setO1((MetaException)value);
-        }
-        break;
-
-      }
-    }
-
-    public Object getFieldValue(_Fields field) {
-      switch (field) {
-      case O1:
-        return getO1();
-
-      }
-      throw new IllegalStateException();
-    }
-
-    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-    public boolean isSet(_Fields field) {
-      if (field == null) {
-        throw new IllegalArgumentException();
-      }
-
-      switch (field) {
-      case O1:
-        return isSetO1();
-      }
-      throw new IllegalStateException();
-    }
-
-    @Override
-    public boolean equals(Object that) {
-      if (that == null)
-        return false;
-      if (that instanceof truncate_table_result)
-        return this.equals((truncate_table_result)that);
-      return false;
-    }
-
-    public boolean equals(truncate_table_result that) {
-      if (that == null)
-        return false;
-
-      boolean this_present_o1 = true && this.isSetO1();
-      boolean that_present_o1 = true && that.isSetO1();
-      if (this_present_o1 || that_present_o1) {
-        if (!(this_present_o1 && that_present_o1))
-          return false;
-        if (!this.o1.equals(that.o1))
-          return false;
-      }
-
-      return true;
-    }
-
-    @Override
-    public int hashCode() {
-      List<Object> list = new ArrayList<Object>();
-
-      boolean present_o1 = true && (isSetO1());
-      list.add(present_o1);
-      if (present_o1)
-        list.add(o1);
-
-      return list.hashCode();
-    }
-
-    @Override
-    public int compareTo(truncate_table_result other) {
-      if (!getClass().equals(other.getClass())) {
-        return getClass().getName().compareTo(other.getClass().getName());
-      }
-
-      int lastComparison = 0;
-
-      lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1());
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-      if (isSetO1()) {
-        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1);
-        if (lastComparison != 0) {
-          return lastComparison;
-        }
-      }
-      return 0;
-    }
-
-    public _Fields fieldForId(int fieldId) {
-      return _Fields.findByThriftId(fieldId);
-    }
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-      }
-
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder("truncate_table_result(");
+      StringBuilder sb = new StringBuilder("drop_table_with_environment_context_result(");
       boolean first = true;
 
       sb.append("o1:");
@@ -49744,6 +48587,14 @@ public class ThriftHiveMetastore {
         sb.append(this.o1);
       }
       first = false;
+      if (!first) sb.append(", ");
+      sb.append("o3:");
+      if (this.o3 == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.o3);
+      }
+      first = false;
       sb.append(")");
       return sb.toString();
     }
@@ -49769,15 +48620,15 @@ public class ThriftHiveMetastore {
       }
     }
 
-    private static class truncate_table_resultStandardSchemeFactory implements SchemeFactory {
-      public truncate_table_resultStandardScheme getScheme() {
-        return new truncate_table_resultStandardScheme();
+    private static class drop_table_with_environment_context_resultStandardSchemeFactory implements SchemeFactory {
+      public drop_table_with_environment_context_resultStandardScheme getScheme() {
+        return new drop_table_with_environment_context_resultStandardScheme();
       }
     }
 
-    private static class truncate_table_resultStandardScheme extends StandardScheme<truncate_table_result> {
+    private static class drop_table_with_environment_context_resultStandardScheme extends StandardScheme<drop_table_with_environment_context_result> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -49789,13 +48640,22 @@ public class ThriftHiveMetastore {
           switch (schemeField.id) {
             case 1: // O1
               if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-                struct.o1 = new MetaException();
+                struct.o1 = new NoSuchObjectException();
                 struct.o1.read(iprot);
                 struct.setO1IsSet(true);
               } else { 
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
               break;
+            case 2: // O3
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.o3 = new MetaException();
+                struct.o3.read(iprot);
+                struct.setO3IsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
             default:
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
           }
@@ -49805,7 +48665,7 @@ public class ThriftHiveMetastore {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
@@ -49814,42 +48674,58 @@ public class ThriftHiveMetastore {
           struct.o1.write(oprot);
           oprot.writeFieldEnd();
         }
+        if (struct.o3 != null) {
+          oprot.writeFieldBegin(O3_FIELD_DESC);
+          struct.o3.write(oprot);
+          oprot.writeFieldEnd();
+        }
         oprot.writeFieldStop();
         oprot.writeStructEnd();
       }
 
     }
 
-    private static class truncate_table_resultTupleSchemeFactory implements SchemeFactory {
-      public truncate_table_resultTupleScheme getScheme() {
-        return new truncate_table_resultTupleScheme();
+    private static class drop_table_with_environment_context_resultTupleSchemeFactory implements SchemeFactory {
+      public drop_table_with_environment_context_resultTupleScheme getScheme() {
+        return new drop_table_with_environment_context_resultTupleScheme();
       }
     }
 
-    private static class truncate_table_resultTupleScheme extends TupleScheme<truncate_table_result> {
+    private static class drop_table_with_environment_context_resultTupleScheme extends TupleScheme<drop_table_with_environment_context_result> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.isSetO1()) {
           optionals.set(0);
         }
-        oprot.writeBitSet(optionals, 1);
+        if (struct.isSetO3()) {
+          optionals.set(1);
+        }
+        oprot.writeBitSet(optionals, 2);
         if (struct.isSetO1()) {
           struct.o1.write(oprot);
         }
+        if (struct.isSetO3()) {
+          struct.o3.write(oprot);
+        }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
-        BitSet incoming = iprot.readBitSet(1);
+        BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
-          struct.o1 = new MetaException();
+          struct.o1 = new NoSuchObjectException();
           struct.o1.read(iprot);
           struct.setO1IsSet(true);
         }
+        if (incoming.get(1)) {
+          struct.o3 = new MetaException();
+          struct.o3.read(iprot);
+          struct.setO3IsSet(true);
+        }
       }
     }
 
@@ -50710,13 +49586,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list764 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list764.size);
-                  String _elem765;
-                  for (int _i766 = 0; _i766 < _list764.size; ++_i766)
+                  org.apache.thrift.protocol.TList _list756 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list756.size);
+                  String _elem757;
+                  for (int _i758 = 0; _i758 < _list756.size; ++_i758)
                   {
-                    _elem765 = iprot.readString();
-                    struct.success.add(_elem765);
+                    _elem757 = iprot.readString();
+                    struct.success.add(_elem757);
                   }
                   iprot.readListEnd();
                 }
@@ -50751,9 +49627,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter767 : struct.success)
+            for (String _iter759 : struct.success)
             {
-              oprot.writeString(_iter767);
+              oprot.writeString(_iter759);
             }
             oprot.writeListEnd();
           }
@@ -50792,9 +49668,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter768 : struct.success)
+            for (String _iter760 : struct.success)
             {
-              oprot.writeString(_iter768);
+              oprot.writeString(_iter760);
             }
           }
         }
@@ -50809,13 +49685,13 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list769.size);
-            String _elem770;
-            for (int _i771 = 0; _i771 < _list769.size; ++_i771)
+            org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list761.size);
+            String _elem762;
+            for (int _i763 = 0; _i763 < _list761.size; ++_i763)
             {
-              _elem770 = iprot.readString();
-              struct.success.add(_elem770);
+              _elem762 = iprot.readString();
+              struct.success.add(_elem762);
             }
           }
           struct.setSuccessIsSet(true);
@@ -51789,13 +50665,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list772 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list772.size);
-                  String _elem773;
-                  for (int _i774 = 0; _i774 < _list772.size; ++_i774)
+                  org.apache.thrift.protocol.TList _list764 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list764.size);
+                  String _elem765;
+                  for (int _i766 = 0; _i766 < _list764.size; ++_i766)
                   {
-                    _elem773 = iprot.readString();
-                    struct.success.add(_elem773);
+                    _elem765 = iprot.readString();
+                    struct.success.add(_elem765);
                   }
                   iprot.readListEnd();
                 }
@@ -51830,9 +50706,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter775 : struct.success)
+            for (String _iter767 : struct.success)
             {
-              oprot.writeString(_iter775);
+              oprot.writeString(_iter767);
             }
             oprot.writeListEnd();
           }
@@ -51871,9 +50747,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter776 : struct.success)
+            for (String _iter768 : struct.success)
             {
-              oprot.writeString(_iter776);
+              oprot.writeString(_iter768);
             }
           }
         }
@@ -51888,13 +50764,13 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list777.size);
-            String _elem778;
-            for (int _i779 = 0; _i779 < _list777.size; ++_i779)
+            org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list769.size);
+            String _elem770;
+            for (int _i771 = 0; _i771 < _list769.size; ++_i771)
             {
-              _elem778 = iprot.readString();
-              struct.success.add(_elem778);
+              _elem770 = iprot.readString();
+              struct.success.add(_elem770);
             }
           }
           struct.setSuccessIsSet(true);
@@ -52399,13 +51275,13 @@ public class ThriftHiveMetastore {
             case 3: // TBL_TYPES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list780 = iprot.readListBegin();
-                  struct.tbl_types = new ArrayList<String>(_list780.size);
-                  String _elem781;
-                  for (int _i782 = 0; _i782 < _list780.size; ++_i782)
+                  org.apache.thrift.protocol.TList _list772 = iprot.readListBegin();
+                  struct.tbl_types = new ArrayList<String>(_list772.size);
+                  String _elem773;
+                  for (int _i774 = 0; _i774 < _list772.size; ++_i774)
                   {
-                    _elem781 = iprot.readString();
-                    struct.tbl_types.add(_elem781);
+                    _elem773 = iprot.readString();
+                    struct.tbl_types.add(_elem773);
                   }
                   iprot.readListEnd();
                 }
@@ -52441,9 +51317,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size()));
-            for (String _iter783 : struct.tbl_types)
+            for (String _iter775 : struct.tbl_types)
             {
-              oprot.writeString(_iter783);
+              oprot.writeString(_iter775);
             }
             oprot.writeListEnd();
           }
@@ -52486,9 +51362,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetTbl_types()) {
           {
             oprot.writeI32(struct.tbl_types.size());
-            for (String _iter784 : struct.tbl_types)
+            for (String _iter776 : struct.tbl_types)
             {
-              oprot.writeString(_iter784);
+              oprot.writeString(_iter776);
             }
           }
         }
@@ -52508,13 +51384,13 @@ public class ThriftHiveMetastore {
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.tbl_types = new ArrayList<String>(_list785.size);
-            String _elem786;
-            for (int _i787 = 0; _i787 < _list785.size; ++_i787)
+            org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.tbl_types = new ArrayList<String>(_list777.size);
+            String _elem778;
+            for (int _i779 = 0; _i779 < _list777.size; ++_i779)
             {
-              _elem786 = iprot.readString();
-              struct.tbl_types.add(_elem786);
+              _elem778 = iprot.readString();
+              struct.tbl_types.add(_elem778);
             }
           }
           struct.setTbl_typesIsSet(true);
@@ -52920,14 +51796,14 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list788 = iprot.readListBegin();
-                  struct.success = new ArrayList<TableMeta>(_list788.size);
-                  TableMeta _elem789;
-                  for (int _i790 = 0; _i790 < _list788.size; ++_i790)
+                  org.apache.thrift.protocol.TList _list780 = iprot.readListBegin();
+                  struct.success = new ArrayList<TableMeta>(_list780.size);
+                  TableMeta _elem781;
+                  for (int _i782 = 0; _i782 < _list780.size; ++_i782)
                   {
-                    _elem789 = new TableMeta();
-                    _elem789.read(iprot);
-                    struct.success.add(_elem789);
+                    _elem781 = new TableMeta();
+                    _elem781.read(iprot);
+                    struct.success.add(_elem781);
                   }
                   iprot.readListEnd();
                 }
@@ -52962,9 +51838,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (TableMeta _iter791 : struct.success)
+            for (TableMeta _iter783 : struct.success)
             {
-              _iter791.write(oprot);
+              _iter783.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -53003,9 +51879,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (TableMeta _iter792 : struct.success)
+            for (TableMeta _iter784 : struct.success)
             {
-              _iter792.write(oprot);
+              _iter784.write(oprot);
             }
           }
         }
@@ -53020,14 +51896,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<TableMeta>(_list793.size);
-            TableMeta _elem794;
-            for (int _i795 = 0; _i795 < _list793.size; ++_i795)
+            org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<TableMeta>(_list785.size);
+            TableMeta _elem786;
+            for (int _i787 = 0; _i787 < _list785.size; ++_i787)
             {
-              _elem794 = new TableMeta();
-              _elem794.read(iprot);
-              struct.success.add(_elem794);
+              _elem786 = new TableMeta();
+              _elem786.read(iprot);
+              struct.success.add(_elem786);
             }
           }
           struct.setSuccessIsSet(true);
@@ -53793,13 +52669,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list796 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list796.size);
-                  String _elem797;
-                  for (int _i798 = 0; _i798 < _list796.size; ++_i798)
+                  org.apache.thrift.protocol.TList _list788 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list788.size);
+                  String _elem789;
+                  for (int _i790 = 0; _i790 < _list788.size; ++_i790)
                   {
-                    _elem797 = iprot.readString();
-                    struct.success.add(_elem797);
+                    _elem789 = iprot.readString();
+                    struct.success.add(_elem789);
                   }
                   iprot.readListEnd();
                 }
@@ -53834,9 +52710,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter799 : struct.success)
+            for (String _iter791 : struct.success)
             {
-              oprot.writeString(_iter799);
+              oprot.writeString(_iter791);
             }
             oprot.writeListEnd();
           }
@@ -53875,9 +52751,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter800 : struct.success)
+            for (String _iter792 : struct.success)
             {
-              oprot.writeString(_iter800);
+              oprot.writeString(_iter792);
             }
           }
         }
@@ -53892,13 +52768,13 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list801.size);
-            String _elem802;
-            for (int _i803 = 0; _i803 < _list801.size; ++_i803)
+            org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list793.size);
+            String _elem794;
+            for (int _i795 = 0; _i795 < _list793.size; ++_i795)
             {
-              _elem802 = iprot.readString();
-              struct.success.add(_elem802);
+              _elem794 = iprot.readString();
+              struct.success.add(_elem794);
             }
           }
           struct.setSuccessIsSet(true);
@@ -55351,13 +54227,13 @@ public class ThriftHiveMetastore {
             case 2: // TBL_NAMES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list804 = iprot.readListBegin();
-                  struct.tbl_names = new ArrayList<String>(_list804.size);
-                  String _elem805;
-                  for (int _i806 = 0; _i806 < _list804.size; ++_i806)
+                  org.apache.thrift.protocol.TList _list796 = iprot.readListBegin();
+                  struct.tbl_names = new ArrayList<String>(_list796.size);
+                  String _elem797;
+                  for (int _i798 = 0; _i798 < _list796.size; ++_i798)
                   {
-                    _elem805 = iprot.readString();
-                    struct.tbl_names.add(_elem805);
+                    _elem797 = iprot.readString();
+                    struct.tbl_names.add(_elem797);
                   }
                   iprot.readListEnd();
                 }
@@ -55388,9 +54264,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size()));
-            for (String _iter807 : struct.tbl_names)
+            for (String _iter799 : struct.tbl_names)
             {
-              oprot.writeString(_iter807);
+              oprot.writeString(_iter799);
             }
             oprot.writeListEnd();
           }
@@ -55427,9 +54303,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetTbl_names()) {
           {
             oprot.writeI32(struct.tbl_names.size());
-            for (String _iter808 : struct.tbl_names)
+            for (String _iter800 : struct.tbl_names)
             {
-              oprot.writeString(_iter808);
+              oprot.writeString(_iter800);
             }
           }
         }
@@ -55445,13 +54321,13 @@ public class ThriftHiveMetastore {
         }
         if (incoming.get(1)) {
           {
-            org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.tbl_names = new ArrayList<String>(_list809.size);
-            String _elem810;
-            for (int _i811 = 0; _i811 < _list809.size; ++_i811)
+            org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.tbl_names = new ArrayList<String>(_list801.size);
+            String _elem802;
+            for (int _i803 = 0; _i803 < _list801.size; ++_i803)
             {
-              _elem810 = iprot.readString();
-              struct.tbl_names.add(_elem810);
+              _elem802 = iprot.readString();
+              struct.tbl_names.add(_elem802);
             }
           }
           struct.setTbl_namesIsSet(true);
@@ -55776,14 +54652,14 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list812 = iprot.readListBegin();
-                  struct.success = new ArrayList<Table>(_list812.size);
-                  Table _elem813;
-                  for (int _i814 = 0; _i814 < _list812.size; ++_i814)
+                  org.apache.thrift.protocol.TList _list804 = iprot.readListBegin();
+                  struct.success = new ArrayList<Table>(_list804.size);
+                  Table _elem805;
+                  for (int _i806 = 0; _i806 < _list804.size; ++_i806)
                   {
-                    _elem813 = new Table();
-                    _elem813.read(iprot);
-                    struct.success.add(_elem813);
+                    _elem805 = new Table();
+                    _elem805.read(iprot);
+                    struct.success.add(_elem805);
                   }
                   iprot.readListEnd();
                 }
@@ -55809,9 +54685,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (Table _iter815 : struct.success)
+            for (Table _iter807 : struct.success)
             {
-              _iter815.write(oprot);
+              _iter807.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -55842,9 +54718,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (Table _iter816 : struct.success)
+            for (Table _iter808 : struct.success)
             {
-              _iter816.write(oprot);
+              _iter808.write(oprot);
             }
           }
         }
@@ -55856,14 +54732,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<Table>(_list817.size);
-            Table _elem818;
-            for (int _i819 = 0; _i819 < _list817.size; ++_i819)
+            org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<Table>(_list809.size);
+            Table _elem810;
+            for (int _i811 = 0; _i811 < _list809.size; ++_i811)
             {
-              _elem818 = new Table();
-              _elem818.read(iprot);
-              struct.success.add(_elem818);
+              _elem810 = new Table();
+              _elem810.read(iprot);
+              struct.success.add(_elem810);
             }
           }
           struct.setSuccessIsSet(true);
@@ -58976,13 +57852,13 @@ public class ThriftHiveMetastore {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list820 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list820.size);
-                  String _elem821;
-                  for (int _i822 = 0; _i822 < _list820.size; ++_i822)
+                  org.apache.thrift.protocol.TList _list812 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list812.size);
+                  String _elem813;
+                  for (int _i814 = 0; _i814 < _list812.size; ++_i814)
                   {
-                    _elem821 = iprot.readString();
-                    struct.success.add(_elem821);
+                    _elem813 = iprot.readString();
+                    struct.success.add(_elem813);
                   }
                   iprot.readListEnd();
                 }
@@ -59035,9 +57911,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter823 : struct.success)
+            for (String _iter815 : struct.success)
             {
-              oprot.writeString(_iter823);
+              oprot.writeString(_iter815);
             }
             oprot.writeListEnd();
           }
@@ -59092,9 +57968,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter824 : struct.success)
+            for (String _iter816 : struct.success)
             {
-              oprot.writeString(_iter824);
+              oprot.writeString(_iter816);
             }
           }
         }
@@ -59115,13 +57991,13 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list825.size);
-            String _elem826;
-            for (int _i827 = 0; _i827 < _list825.size; ++_i827)
+            org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list817.size);
+            String _elem818;
+            for (int _i819 = 0; _i819 < _list817.size; ++_i819)
             {
-              _elem826 = iprot.readString();
-              struct.success.add(_elem826);
+              _elem818 = iprot.readString();
+              struct.success.add(_elem818);
             }
           }
           struct.setSuccessIsSet(true);
@@ -64980,14 +63856,14 @@ public class ThriftHiveMetastore {
             case 1: // NEW_PARTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list828 = iprot.readListBegin();
-                  struct.new_parts = new ArrayList<Partition>(_list828.size);
-                  Partition _elem829;
-                  for (int _i830 = 0; _i830 < _list828.size; ++_i830)
+                  org.apache.thrift.protocol.TList _list820 = iprot.readListBegin();
+                  struct.new_parts = new ArrayList<Partition>(_list820.size);
+                  Partition _elem821;
+                  for (int _i822 = 0; _i822 < _list820.size; ++_i822)
                   {
-                    _elem829 = new Partition();
-                    _elem829.read(iprot);
-                    struct.new_parts.add(_elem829);
+                    _elem821 = new Partition();
+                    _elem821.read(iprot);
+                    struct.new_parts.add(_elem821);
                   }
                   iprot.readListEnd();
                 }
@@ -65013,9 +63889,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size()));
-            for (Partition _iter831 : struct.new_parts)
+            for (Partition _iter823 : struct.new_parts)
             {
-              _iter831.write(oprot);
+              _iter823.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -65046,9 +63922,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetNew_parts()) {
           {
             oprot.writeI32(struct.new_parts.size());
-            for (Partition _iter832 : struct.new_parts)
+            for (Partition _iter824 : struct.new_parts)
             {
-              _iter832.write(oprot);
+              _iter824.write(oprot);
             }
           }
         }
@@ -65060,14 +63936,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.new_parts = new ArrayList<Partition>(_list833.size);
-            Partition _elem834;
-            for (int _i835 = 0; _i835 < _list833.size; ++_i835)
+            org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.new_parts = new ArrayList<Partition>(_list825.size);
+            Partition _elem826;
+            for (int _i827 = 0; _i827 < _list825.size; ++_i827)
             {
-              _elem834 = new Partition();
-              _elem834.read(iprot);
-              struct.new_parts.add(_elem834);
+              _elem826 = new Partition();
+              _elem826.read(iprot);
+              struct.new_parts.add(_elem826);
             }
           }
           struct.setNew_partsIsSet(true);
@@ -66068,14 +64944,14 @@ public class ThriftHiveMetastore {
             case 1: // NEW_PARTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list836 = iprot.readListBegin();
-                  struct.new_parts = new ArrayList<PartitionSpec>(_list836.size);
-                  PartitionSpec _elem837;
-                  for (int _i838 = 0; _i838 < _list836.size; ++_i838)
+                  org.apache.thrift.protocol.TList _list828 = iprot.readListBegin();
+                  struct.new_parts = new ArrayList<PartitionSpec>(_list828.size);
+                  PartitionSpec _elem829;
+                  for (int _i830 = 0; _i830 < _list828.size; ++_i830)
                   {
-                    _elem837 = new PartitionSpec();
-                    _elem837.read(iprot);
-                    struct.new_parts.add(_elem837);
+                    _elem829 = new PartitionSpec();
+                    _elem829.read(iprot);
+                    struct.new_parts.add(_elem829);
                   }
                   iprot.readListEnd();
                 }
@@ -66101,9 +64977,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size()));
-            for (PartitionSpec _iter839 : struct.new_parts)
+            for (PartitionSpec _iter831 : struct.new_parts)
             {
-              _iter839.write(oprot);
+              _iter831.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -66134,9 +65010,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetNew_parts()) {
           {
             oprot.writeI32(struct.new_parts.size());
-            for (PartitionSpec _iter840 : struct.new_parts)
+            for (PartitionSpec _iter832 : struct.new_parts)
             {
-              _iter840.write(oprot);
+              _iter832.write(oprot);
             }
           }
         }
@@ -66148,14 +65024,14 @@ public class ThriftHiveMetastore {
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.new_parts = new ArrayList<PartitionSpec>(_list841.size);
-            PartitionSpec _elem842;
-            for (int _i843 = 0; _i843 < _list841.size; ++_i843)
+            org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.new_parts = new ArrayList<PartitionSpec>(_list833.size);
+            PartitionSpec _elem834;
+            for (int _i835 = 0; _i835 < _list833.size; ++_i835)
             {
-              _elem842 = new PartitionSpec();
-              _elem842.read(iprot);
-              struct.new_parts.add(_elem842);
+              _elem834 = new PartitionSpec();
+              _elem834.read(iprot);
+              struct.new_parts.add(_elem834);
             }
           }
           struct.setNew_partsIsSet(true);
@@ -67331,13 +66207,13 @@ public class ThriftHiveMetastore {
             case 3: // PART_VALS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list844 = iprot.readListBegin();
-                  struct.part_vals = new ArrayList<String>(_list844.size);
-                  String _elem845;
-                  for (int _i846 = 0; _i846 < _list844.size; ++_i846)
+                  org.apache.thrift.protocol.TList _list836 = iprot.readListBegin();
+                  struct.part_vals = new ArrayList<String>(_list836.size);
+                  String _elem837;
+                  for (int _i838 = 0; _i838 < _list836.size; ++_i838)
                   {
-                    _elem845 = iprot.readString();
-                    struct.part_vals.add(_elem845);
+                    _elem837 = iprot.readString();
+                    struct.part_vals.add(_elem837);
                   }
                   iprot.readListEnd();
                 }
@@ -67373,9 +66249,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(PART_VALS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size()));
-            for (String _iter847 : struct.part_vals)
+            for (String _iter839 : struct.part_vals)
             {
-              oprot.writeString(_iter847);
+              oprot.writeString(_iter839);
             }
             oprot.writeListEnd();
           }
@@ -67418,9 +66294,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetPart_vals()) {
           {
             oprot.writeI32(struct.part_vals.size());
-            for (String _iter848 : struct.part_vals)
+            for (String _iter840 : struct.part_vals)
             {
-              oprot.writeString(_iter848);
+              oprot.writeString(_iter840);
             }
           }
         }
@@ -67440,13 +66316,13 @@ public class ThriftHiveMetastore {
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list849 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.part_vals = new ArrayList<String>(_list849.size);
-            String _elem850;
-            for (int _i851 = 0; _i851 < _list849.size; ++_i851)
+            org.apache.thrift.protocol.TList _list841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.part_vals = new ArrayList<String>(_list841.size);
+            String _elem842;
+            for (int _i843 = 0; _i843 < _list841.size; ++_i843)
             {
-              _elem850 = iprot.readString();
-              struct.part_vals.add(_elem850);
+              _elem842 = iprot.readString();
+              struct.part_vals.add(_elem842);
             }
           }
           struct.setPart_valsIsSet(true);
@@ -69755,13 +68631,13 @@ public class ThriftHiveMetastore {
             case 3: // PART_VALS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list852 = iprot.readListBegin();
-                  struct.part_vals = new ArrayList<String>(_list852.size);
-                  String _elem853;
-                  for (int _i854 = 0; _i854 < _list852.size; ++_i854)
+                  org.apache.thrift.protocol.TList _list844 = iprot.readListBegin();
+                  struct.part_vals = new ArrayList<String>(_list844.size);
+                  String _elem845;
+                  for (int _i846 = 0; _i846 < _list844.size; ++_i846)
                   {
-                    _elem853 = iprot.readString();
-                    struct.part_vals.add(_elem853);
+                    _elem845 = iprot.readString();
+                    struct.part_vals.add(_elem845);
                   }
                   iprot.readListEnd();
                 }
@@ -69806,9 +68682,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(PART_VALS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size()));
-            for (String _iter855 : struct.part_vals)
+            for (String _iter847 : struct.part_vals)
             {
-              oprot.writeString(_iter855);
+              oprot.writeString(_iter847);
             }
             oprot.writeListEnd();
           }
@@ -69859,9 +68735,9 @@ public class ThriftHiveMetastore {
         if (struct.isSetPart_vals()) {
           {
             oprot.writeI32(struct.part_vals.size());
-            for (String _iter856 : struct.part_vals)
+            for (String _iter848 : struct.part_vals)
             {
-              oprot.writeString(_iter856);
+              oprot.writeString(_iter848);
             }
           }
         }
@@ -69884,13 +68760,13 @@ public class ThriftHiveMetastore {
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list857 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.part_vals = new ArrayList<String>(_list857.size);
-            String _elem858;
-            for (int _i859 = 0; _i859 < _list857.size; ++_i859)
+            org.apache.thrift.protocol.TList _list849 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.part_vals = new ArrayList<String>(_list849.size);
+            String _elem850;
+            for (int _i851 = 0; _i851 < _list849.size; ++_i851)
             {
-              _elem858 = iprot.readString();
-              struct.part_vals.add(_elem858);
+              _elem850 = iprot.readString();
+              struct.part_vals.add(_elem850);
             }
           }
           struct.setPart_valsIsSet(true);
@@ -73760,13 +72636,13 @@ public class ThriftHiveMetastore {
             case 3: // PART_VALS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list860 = iprot.readListBegin();
-                  struct.part_vals = new ArrayList<String>(_list860.size);
-                  String _elem861;
-                  for (int _i862 = 0; _i862 < _list860.size; ++_i862)
+                  org.apache.thrift.protocol.TList _list852 = iprot.readListBegin();
+                  struct.part_vals = new ArrayList<String>(_list852.size);
+                  String _elem853;
+                  for (int _i854 = 0; _i854 < _list852.size; ++_i854)
                   {
-                    _elem861 = iprot.readString();
-                    struct.part_vals.add(_elem861);
+                    _elem853 = iprot.readString();
+                    struct.part_vals.add(_elem853);
                   }
                   iprot.readListEnd();
                 }
@@ -73810,9 +72686,9 @@ public class ThriftHiveMetastore {
           oprot.writeFieldBegin(PART_VALS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size()));
-            for (String _iter863 : struct.part_vals)
+            for (String _iter855 : struct.part_vals)
             {
-              oprot.writeString(_iter863);
+              oprot.writeString(_iter855);
             }
             oprot.writeL

<TRUNCATED>

[03/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppenderForTest.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppenderForTest.java b/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppenderForTest.java
deleted file mode 100644
index 966c264..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppenderForTest.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.log;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.logging.log4j.Level;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.core.LogEvent;
-import org.apache.logging.log4j.core.LoggerContext;
-import org.apache.logging.log4j.core.appender.RandomAccessFileAppender;
-import org.apache.logging.log4j.core.appender.routing.Route;
-import org.apache.logging.log4j.core.appender.routing.Routes;
-import org.apache.logging.log4j.core.appender.routing.RoutingAppender;
-import org.apache.logging.log4j.core.config.Configuration;
-import org.apache.logging.log4j.core.config.LoggerConfig;
-import org.apache.logging.log4j.core.config.Node;
-import org.apache.logging.log4j.core.config.plugins.Plugin;
-import org.apache.logging.log4j.core.config.plugins.PluginFactory;
-import org.apache.logging.log4j.core.config.plugins.processor.PluginEntry;
-import org.apache.logging.log4j.core.config.plugins.util.PluginType;
-import org.apache.logging.log4j.core.filter.AbstractFilter;
-import org.apache.logging.log4j.core.layout.PatternLayout;
-
-/**
- * Divert appender to redirect and filter test operation logs to match the output of the original
- * CLI qtest results.
- */
-public final class LogDivertAppenderForTest {
-  private LogDivertAppenderForTest() {
-    // Prevent instantiation
-  }
-
-  /**
-   * A log filter that filters test messages coming from the logger.
-   */
-  @Plugin(name = "TestFilter", category = "Core", elementType="filter", printObject = true)
-  private static class TestFilter extends AbstractFilter {
-    @Override
-    public Result filter(LogEvent event) {
-      if (event.getLevel().equals(Level.INFO) && "SessionState".equals(event.getLoggerName())) {
-        if (event.getMessage().getFormattedMessage().startsWith("PREHOOK:")
-            || event.getMessage().getFormattedMessage().startsWith("POSTHOOK:")) {
-          return Result.ACCEPT;
-        }
-      }
-      return Result.DENY;
-    }
-
-    @PluginFactory
-    public static TestFilter createFilter() {
-      return new TestFilter();
-    }
-  }
-
-  /**
-   * If the HIVE_IN_TEST is set, then programmatically register a routing appender to Log4J
-   * configuration, which automatically writes the test log of each query to an individual file.
-   * The equivalent property configuration is as follows:
-   *  # queryId based routing file appender
-      appender.test-query-routing.type = Routing
-      appender.test-query-routing.name = test-query-routing
-      appender.test-query-routing.routes.type = Routes
-      appender.test-query-routing.routes.pattern = $${ctx:queryId}
-      # default route
-      appender.test-query-routing.routes.test-route-default.type = Route
-      appender.test-query-routing.routes.test-route-default.key = $${ctx:queryId}
-      appender.test-query-routing.routes.test-route-default.app.type = NullAppender
-      appender.test-query-routing.routes.test-route-default.app.name = test-null-appender
-      # queryId based route
-      appender.test-query-routing.routes.test-route-mdc.type = Route
-      appender.test-query-routing.routes.test-route-mdc.name = test-query-routing
-      appender.test-query-routing.routes.test-route-mdc.app.type = RandomAccessFile
-      appender.test-query-routing.routes.test-route-mdc.app.name = test-query-file-appender
-      appender.test-query-routing.routes.test-route-mdc.app.fileName = ${sys:hive.log.dir}/${ctx:sessionId}/${ctx:queryId}.test
-      appender.test-query-routing.routes.test-route-mdc.app.layout.type = PatternLayout
-      appender.test-query-routing.routes.test-route-mdc.app.layout.pattern = %d{ISO8601} %5p %c{2}: %m%n
-      appender.test-query-routing.routes.test-route-mdc.app.filter.type = TestFilter
-   * @param conf the configuration for HiveServer2 instance
-   */
-  public static void registerRoutingAppenderIfInTest(org.apache.hadoop.conf.Configuration conf) {
-    if (!conf.getBoolean(HiveConf.ConfVars.HIVE_IN_TEST.varname,
-        HiveConf.ConfVars.HIVE_IN_TEST.defaultBoolVal)) {
-      // If not in test mode, then do no create the appender
-      return;
-    }
-
-    String logLocation =
-        HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION);
-
-    // Create test-null-appender to drop events without queryId
-    PluginEntry nullAppenderEntry = new PluginEntry();
-    nullAppenderEntry.setClassName(NullAppender.class.getName());
-    PluginType<NullAppender> nullAppenderType =
-        new PluginType<NullAppender>(nullAppenderEntry, NullAppender.class, "appender");
-    Node nullAppenderChildNode = new Node(null, "test-null-appender", nullAppenderType);
-
-    // Create default route where events go without queryId
-    PluginEntry defaultRouteEntry = new PluginEntry();
-    defaultRouteEntry.setClassName(Route.class.getName());
-    PluginType<Route> defaultRouteType = new PluginType<Route>(defaultRouteEntry, Route.class, "");
-    Node defaultRouteNode = new Node(null, "test-route-default", defaultRouteType);
-    // Add the test-null-appender to the default route
-    defaultRouteNode.getChildren().add(nullAppenderChildNode);
-
-    // Create queryId based route
-    PluginEntry queryIdRouteEntry = new PluginEntry();
-    queryIdRouteEntry.setClassName(Route.class.getName());
-    PluginType<Route> queryIdRouteType = new PluginType<Route>(queryIdRouteEntry, Route.class, "");
-    Node queryIdRouteNode = new Node(null, "test-route-mdc", queryIdRouteType);
-
-    // Create the queryId appender for the queryId route
-    PluginEntry queryIdAppenderEntry = new PluginEntry();
-    queryIdAppenderEntry.setClassName(RandomAccessFileAppender.class.getName());
-    PluginType<RandomAccessFileAppender> queryIdAppenderType =
-        new PluginType<RandomAccessFileAppender>(queryIdAppenderEntry,
-            RandomAccessFileAppender.class, "appender");
-    Node queryIdAppenderNode =
-        new Node(queryIdRouteNode, "test-query-file-appender", queryIdAppenderType);
-    queryIdAppenderNode.getAttributes().put("fileName", logLocation
-        + "/${ctx:sessionId}/${ctx:queryId}.test");
-    queryIdAppenderNode.getAttributes().put("name", "test-query-file-appender");
-    // Add the queryId appender to the queryId based route
-    queryIdRouteNode.getChildren().add(queryIdAppenderNode);
-
-    // Create the filter for the queryId appender
-    PluginEntry filterEntry = new PluginEntry();
-    filterEntry.setClassName(TestFilter.class.getName());
-    PluginType<TestFilter> filterType =
-        new PluginType<TestFilter>(filterEntry, TestFilter.class, "");
-    Node filterNode = new Node(queryIdAppenderNode, "test-filter", filterType);
-    // Add the filter to the queryId appender
-    queryIdAppenderNode.getChildren().add(filterNode);
-
-    // Create the layout for the queryId appender
-    PluginEntry layoutEntry = new PluginEntry();
-    layoutEntry.setClassName(PatternLayout.class.getName());
-    PluginType<PatternLayout> layoutType =
-        new PluginType<PatternLayout>(layoutEntry, PatternLayout.class, "");
-    Node layoutNode = new Node(queryIdAppenderNode, "PatternLayout", layoutType);
-    layoutNode.getAttributes().put("pattern", LogDivertAppender.nonVerboseLayout);
-    // Add the layout to the queryId appender
-    queryIdAppenderNode.getChildren().add(layoutNode);
-
-    // Create the route objects based on the Nodes
-    Route defaultRoute = Route.createRoute(null, "${ctx:queryId}", defaultRouteNode);
-    Route mdcRoute = Route.createRoute(null, null, queryIdRouteNode);
-    // Create the routes group
-    Routes routes = Routes.createRoutes("${ctx:queryId}", defaultRoute, mdcRoute);
-
-    LoggerContext context = (LoggerContext)LogManager.getContext(false);
-    Configuration configuration = context.getConfiguration();
-
-    // Create the appender
-    RoutingAppender routingAppender = RoutingAppender.createAppender("test-query-routing",
-        "true",
-        routes,
-        configuration,
-        null,
-        null,
-        null);
-
-    LoggerConfig loggerConfig = configuration.getRootLogger();
-    loggerConfig.addAppender(routingAppender, null, null);
-    context.updateLoggers();
-    routingAppender.start();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index de68876..ea87cb4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -697,11 +697,6 @@ public class Hive {
       throws InvalidOperationException, HiveException {
     try {
       validatePartition(newPart);
-      String location = newPart.getLocation();
-      if (location != null && !Utilities.isDefaultNameNode(conf)) {
-        location = Utilities.getQualifiedPath(conf, new Path(location));
-        newPart.setLocation(location);
-      }
       getMSC().alter_partition(dbName, tblName, newPart.getTPartition(), environmentContext);
 
     } catch (MetaException e) {
@@ -741,11 +736,6 @@ public class Hive {
         if (tmpPart.getParameters() != null) {
           tmpPart.getParameters().remove(hive_metastoreConstants.DDL_TIME);
         }
-        String location = tmpPart.getLocation();
-        if (location != null && !Utilities.isDefaultNameNode(conf)) {
-          location = Utilities.getQualifiedPath(conf, new Path(location));
-          tmpPart.setLocation(location);
-        }
         newTParts.add(tmpPart.getTPartition());
       }
       getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext);
@@ -1218,27 +1208,6 @@ public class Hive {
     }
   }
 
-
-
-  /**
-   * Truncates the table/partition as per specifications. Just trash the data files
-   *
-   * @param dbDotTableName
-   *          name of the table
-   * @throws HiveException
-   */
-  public void truncateTable(String dbDotTableName, Map<String, String> partSpec) throws HiveException {
-    try {
-      Table table = getTable(dbDotTableName, true);
-
-      List<String> partNames = ((null == partSpec)
-                       ? null : getPartitionNames(table.getDbName(), table.getTableName(), partSpec, (short) -1));
-      getMSC().truncateTable(table.getDbName(), table.getTableName(), partNames);
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
-  }
-
   public HiveConf getConf() {
     return (conf);
   }
@@ -1444,6 +1413,7 @@ public class Hive {
    */
   public List<String> getTablesByType(String dbName, String pattern, TableType type)
       throws HiveException {
+    List<String> retList = new ArrayList<String>();
     if (dbName == null)
       dbName = SessionState.get().getCurrentDatabase();
 
@@ -1608,20 +1578,7 @@ public class Hive {
     return getDatabase(currentDb);
   }
 
-  /**
-   * @param loadPath
-   * @param tableName
-   * @param partSpec
-   * @param replace
-   * @param inheritTableSpecs
-   * @param isSkewedStoreAsSubdir
-   * @param isSrcLocal
-   * @param isAcid
-   * @param hasFollowingStatsTask
-   * @return
-   * @throws HiveException
-   */
-  public void loadPartition(Path loadPath, String tableName,
+  public void loadSinglePartition(Path loadPath, String tableName,
       Map<String, String> partSpec, boolean replace, boolean inheritTableSpecs,
       boolean isSkewedStoreAsSubdir,  boolean isSrcLocal, boolean isAcid,
       boolean hasFollowingStatsTask, Long mmWriteId, boolean isCommitMmWrite)
@@ -1637,6 +1594,7 @@ public class Hive {
     }
   }
 
+
   public void commitMmTableWrite(Table tbl, Long mmWriteId)
       throws HiveException {
     try {
@@ -1665,11 +1623,7 @@ public class Hive {
    *          location/inputformat/outputformat/serde details from table spec
    * @param isSrcLocal
    *          If the source directory is LOCAL
-   * @param isAcid
-   *          true if this is an ACID operation
-   * @param hasFollowingStatsTask
-   *          true if there is a following task which updates the stats, so, this method need not update.
-   * @return Partition object being loaded with data
+   * @param isAcid true if this is an ACID operation
    */
   public Partition loadPartition(Path loadPath, Table tbl, Map<String, String> partSpec,
       boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir,
@@ -1677,7 +1631,6 @@ public class Hive {
           throws HiveException {
     Path tblDataLocationPath =  tbl.getDataLocation();
     try {
-      // Get the partition object if it already exists
       Partition oldPart = getPartition(tbl, partSpec, false);
       /**
        * Move files before creating the partition since down stream processes
@@ -1715,12 +1668,6 @@ public class Hive {
       List<Path> newFiles = null;
       PerfLogger perfLogger = SessionState.getPerfLogger();
       perfLogger.PerfLogBegin("MoveTask", "FileMoves");
-      // If config is set, table is not temporary and partition being inserted exists, capture
-      // the list of files added. For not yet existing partitions (insert overwrite to new partition
-      // or dynamic partition inserts), the add partition event will capture the list of files added.
-      if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && (null != oldPart)) {
-        newFiles = Collections.synchronizedList(new ArrayList<Path>());
-      }
       // TODO: this assumes both paths are qualified; which they are, currently.
       if (mmWriteId != null && loadPath.equals(newPartPath)) {
         // MM insert query, move itself is a no-op.
@@ -1731,8 +1678,7 @@ public class Hive {
         }
         Utilities.LOG14535.info("maybe deleting stuff from " + oldPartPath + " (new " + newPartPath + ") for replace");
         if (replace && oldPartPath != null) {
-          boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
-          deleteOldPathForReplace(newPartPath, oldPartPath, getConf(), isAutoPurge,
+          deleteOldPathForReplace(newPartPath, oldPartPath, getConf(),
               new ValidWriteIds.IdPathFilter(mmWriteId, false, true), mmWriteId != null,
               tbl.isStoredAsSubDirectories() ? tbl.getSkewedColNames().size() : 0);
         }
@@ -1747,10 +1693,13 @@ public class Hive {
         }
         Utilities.LOG14535.info("moving " + loadPath + " to " + destPath);
         if (replace || (oldPart == null && !isAcid)) {
-          boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
           replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(),
-              isSrcLocal, isAutoPurge, newFiles, filter, mmWriteId != null);
+              isSrcLocal, filter, mmWriteId != null);
         } else {
+          if (areEventsForDmlNeeded(tbl, oldPart)) {
+            newFiles = Collections.synchronizedList(new ArrayList<Path>());
+          }
+
           FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
           Hive.copyFiles(conf, loadPath, destPath, fs, isSrcLocal, isAcid, newFiles);
         }
@@ -1759,17 +1708,13 @@ public class Hive {
       Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath);
       alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString());
       validatePartition(newTPart);
-
-      // Generate an insert event only if inserting into an existing partition
-      // When inserting into a new partition, the add partition event takes care of insert event
-      if ((null != oldPart) && (null != newFiles)) {
-        fireInsertEvent(tbl, partSpec, replace, newFiles);
+      if ((null != newFiles) || replace) {
+        fireInsertEvent(tbl, partSpec, newFiles);
       } else {
-        LOG.debug("No new files were created, and is not a replace, or we're inserting into a "
-                + "partition that does not exist yet. Skipping generating INSERT event.");
+        LOG.debug("No new files were created, and is not a replace. Skipping generating INSERT event.");
       }
 
-      // column stats will be inaccurate
+      //column stats will be inaccurate
       StatsSetupConst.clearColumnStatsState(newTPart.getParameters());
 
       // recreate the partition if it existed before
@@ -2213,8 +2158,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       Utilities.LOG14535.info("not moving " + loadPath + " to " + tbl.getPath());
       if (replace) {
         Path tableDest = tbl.getPath();
-        boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
-        deleteOldPathForReplace(tableDest, tableDest, sessionConf, isAutopurge,
+        deleteOldPathForReplace(tableDest, tableDest, sessionConf,
             new ValidWriteIds.IdPathFilter(mmWriteId, false, true), mmWriteId != null,
             tbl.isStoredAsSubDirectories() ? tbl.getSkewedColNames().size() : 0);
       }
@@ -2230,9 +2174,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
       }
       Utilities.LOG14535.info("moving " + loadPath + " to " + tblPath + " (replace = " + replace + ")");
       if (replace) {
-        boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
         replaceFiles(tblPath, loadPath, destPath, tblPath,
-            sessionConf, isSrcLocal, isAutopurge, newFiles, filter, mmWriteId != null);
+            sessionConf, isSrcLocal, filter, mmWriteId != null);
       } else {
         try {
           FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf);
@@ -2278,7 +2221,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       commitMmTableWrite(tbl, mmWriteId);
     }
 
-    fireInsertEvent(tbl, null, replace, newFiles);
+    fireInsertEvent(tbl, null, newFiles);
   }
 
   /**
@@ -2503,7 +2446,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
         }
         else {
           alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
-          fireInsertEvent(tbl, partSpec, true, newFiles);
+          fireInsertEvent(tbl, partSpec, newFiles);
         }
       }
       if (tpart == null) {
@@ -2553,7 +2496,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     tpart.getSd().setLocation(partPath);
   }
 
-  private void fireInsertEvent(Table tbl, Map<String, String> partitionSpec, boolean replace, List<Path> newFiles)
+  private void fireInsertEvent(Table tbl, Map<String, String> partitionSpec, List<Path> newFiles)
       throws HiveException {
     if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML)) {
       LOG.debug("Firing dml insert event");
@@ -2565,7 +2508,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
         FileSystem fileSystem = tbl.getDataLocation().getFileSystem(conf);
         FireEventRequestData data = new FireEventRequestData();
         InsertEventRequestData insertData = new InsertEventRequestData();
-        insertData.setReplace(replace);
         data.setInsertData(insertData);
         if (newFiles != null && newFiles.size() > 0) {
           for (Path p : newFiles) {
@@ -3111,6 +3053,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
     if (!fullDestStatus.getFileStatus().isDirectory()) {
       throw new HiveException(destf + " is not a directory.");
     }
+    final boolean inheritPerms = HiveConf.getBoolVar(conf,
+        HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
     final List<Future<ObjectPair<Path, Path>>> futures = new LinkedList<>();
     final ExecutorService pool = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0 ?
         Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25),
@@ -3137,10 +3081,15 @@ private void constructOneLBLocationMap(FileStatus fSta,
         // If we do a rename for a non-local file, we will be transfering the original
         // file permissions from source to the destination. Else, in case of mvFile() where we
         // copy from source to destination, we will inherit the destination's parent group ownership.
+        final String srcGroup = isRenameAllowed ? srcFile.getGroup() :
+          fullDestStatus.getFileStatus().getGroup();
         if (null == pool) {
           try {
             Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isRenameAllowed);
 
+            if (inheritPerms) {
+              HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false);
+            }
             if (null != newFiles) {
               newFiles.add(destPath);
             }
@@ -3155,7 +3104,9 @@ private void constructOneLBLocationMap(FileStatus fSta,
               SessionState.setCurrentSessionState(parentSession);
 
               Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isRenameAllowed);
-
+              if (inheritPerms) {
+                HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false);
+              }
               if (null != newFiles) {
                 newFiles.add(destPath);
               }
@@ -3165,7 +3116,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
         }
       }
     }
-    if (null != pool) {
+    if (null == pool) {
+      if (inheritPerms) {
+        HdfsUtils.setFullFileStatus(conf, fullDestStatus, null, destFs, destf, true);
+      }
+    } else {
       pool.shutdown();
       for (Future<ObjectPair<Path, Path>> future : futures) {
         try {
@@ -3223,34 +3178,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
     return ShimLoader.getHadoopShims().getPathWithoutSchemeAndAuthority(path);
   }
 
-  /**
-   * <p>
-   *   Moves a file from one {@link Path} to another. If {@code isRenameAllowed} is true then the
-   *   {@link FileSystem#rename(Path, Path)} method is used to move the file. If its false then the data is copied, if
-   *   {@code isSrcLocal} is true then the {@link FileSystem#copyFromLocalFile(Path, Path)} method is used, else
-   *   {@link FileUtils#copy(FileSystem, Path, FileSystem, Path, boolean, boolean, HiveConf)} is used.
-   * </p>
-   *
-   * <p>
-   *   If the destination file already exists, then {@code _copy_[counter]} is appended to the file name, where counter
-   *   is an integer starting from 1.
-   * </p>
-   *
-   * @param conf the {@link HiveConf} to use if copying data
-   * @param sourceFs the {@link FileSystem} where the source file exists
-   * @param sourcePath the {@link Path} to move
-   * @param destFs the {@link FileSystem} to move the file to
-   * @param destDirPath the {@link Path} to move the file to
-   * @param isSrcLocal if the source file is on the local filesystem
-   * @param isRenameAllowed true if the data should be renamed and not copied, false otherwise
-   *
-   * @return the {@link Path} the source file was moved to
-   *
-   * @throws IOException if there was an issue moving the file
-   */
   private static Path mvFile(HiveConf conf, FileSystem sourceFs, Path sourcePath, FileSystem destFs, Path destDirPath,
                              boolean isSrcLocal, boolean isRenameAllowed) throws IOException {
 
+    boolean isBlobStoragePath = BlobStorageUtils.isBlobStoragePath(conf, destDirPath);
+
     // Strip off the file type, if any so we don't make:
     // 000000_0.gz -> 000000_0.gz_copy_1
     final String fullname = sourcePath.getName();
@@ -3260,19 +3192,27 @@ private void constructOneLBLocationMap(FileStatus fSta,
     Path destFilePath = new Path(destDirPath, fullname);
 
     /*
-    * The below loop may perform bad when the destination file already exists and it has too many _copy_
-    * files as well. A desired approach was to call listFiles() and get a complete list of files from
-    * the destination, and check whether the file exists or not on that list. However, millions of files
-    * could live on the destination directory, and on concurrent situations, this can cause OOM problems.
-    *
-    * I'll leave the below loop for now until a better approach is found.
-    */
-    for (int counter = 1; destFs.exists(destFilePath); counter++) {
-      destFilePath =  new Path(destDirPath, name + ("_copy_" + counter) + (!type.isEmpty() ? "." + type : ""));
+       * The below loop may perform bad when the destination file already exists and it has too many _copy_
+       * files as well. A desired approach was to call listFiles() and get a complete list of files from
+       * the destination, and check whether the file exists or not on that list. However, millions of files
+       * could live on the destination directory, and on concurrent situations, this can cause OOM problems.
+       *
+       * I'll leave the below loop for now until a better approach is found.
+       */
+
+    int counter = 1;
+    if (!isRenameAllowed || isBlobStoragePath) {
+      while (destFs.exists(destFilePath)) {
+        destFilePath =  new Path(destDirPath, name + ("_copy_" + counter) + (!type.isEmpty() ? "." + type : ""));
+        counter++;
+      }
     }
 
     if (isRenameAllowed) {
-      destFs.rename(sourcePath, destFilePath);
+      while (!destFs.rename(sourcePath, destFilePath)) {
+        destFilePath =  new Path(destDirPath, name + ("_copy_" + counter) + (!type.isEmpty() ? "." + type : ""));
+        counter++;
+      }
     } else if (isSrcLocal) {
       destFs.copyFromLocalFile(sourcePath, destFilePath);
     } else {
@@ -3281,6 +3221,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
           false,  // overwrite destination
           conf);
     }
+
     return destFilePath;
   }
 
@@ -3309,30 +3250,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
     }
   }
 
-  // List the new files in destination path which gets copied from source.
-  public static void listNewFilesRecursively(final FileSystem destFs, Path dest,
-                                             List<Path> newFiles) throws HiveException {
-    try {
-      for (FileStatus fileStatus : destFs.listStatus(dest, FileUtils.HIDDEN_FILES_PATH_FILTER)) {
-        if (fileStatus.isDirectory()) {
-          // If it is a sub-directory, then recursively list the files.
-          listNewFilesRecursively(destFs, fileStatus.getPath(), newFiles);
-        } else {
-          newFiles.add(fileStatus.getPath());
-        }
-      }
-    } catch (IOException e) {
-      LOG.error("Failed to get source file statuses", e);
-      throw new HiveException(e.getMessage(), e);
-    }
-  }
-
   //it is assumed that parent directory of the destf should already exist when this
   //method is called. when the replace value is true, this method works a little different
   //from mv command if the destf is a directory, it replaces the destf instead of moving under
   //the destf. in this case, the replaced destf still preserves the original destf's permission
-  public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, boolean replace,
-                                 boolean isSrcLocal) throws HiveException {
+  public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf,
+      boolean replace, boolean isSrcLocal) throws HiveException {
     final FileSystem srcFs, destFs;
     try {
       destFs = destf.getFileSystem(conf);
@@ -3343,10 +3266,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
     try {
       srcFs = srcf.getFileSystem(conf);
     } catch (IOException e) {
-      LOG.error("Failed to get src fs", e);
+      LOG.error("Failed to get dest fs", e);
       throw new HiveException(e.getMessage(), e);
     }
 
+    //needed for perm inheritance.
+    final boolean inheritPerms = HiveConf.getBoolVar(conf,
+        HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
     HdfsUtils.HadoopFileStatus destStatus = null;
 
     // If source path is a subdirectory of the destination path (or the other way around):
@@ -3358,7 +3284,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     boolean srcIsSubDirOfDest = isSubDir(srcf, destf, srcFs, destFs, isSrcLocal),
         destIsSubDirOfSrc = isSubDir(destf, srcf, destFs, srcFs, false);
     try {
-      if (replace) {
+      if (inheritPerms || replace) {
         try{
           destStatus = new HdfsUtils.HadoopFileStatus(conf, destFs, destf);
           //if destf is an existing directory:
@@ -3371,6 +3297,10 @@ private void constructOneLBLocationMap(FileStatus fSta,
             LOG.debug("The path " + destf.toString() + " is deleted");
           }
         } catch (FileNotFoundException ignore) {
+          //if dest dir does not exist, any re
+          if (inheritPerms) {
+            destStatus = new HdfsUtils.HadoopFileStatus(conf, destFs, destf.getParent());
+          }
         }
       }
       final HdfsUtils.HadoopFileStatus desiredStatus = destStatus;
@@ -3378,6 +3308,9 @@ private void constructOneLBLocationMap(FileStatus fSta,
       if (isSrcLocal) {
         // For local src file, copy to hdfs
         destFs.copyFromLocalFile(srcf, destf);
+        if (inheritPerms) {
+          HdfsUtils.setFullFileStatus(conf, destStatus, destFs, destf, true);
+        }
         return true;
       } else {
         if (needToCopy(srcf, destf, srcFs, destFs)) {
@@ -3414,7 +3347,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
                   public Void call() throws Exception {
                     SessionState.setCurrentSessionState(parentSession);
                     final String group = srcStatus.getGroup();
-                    if(!destFs.rename(srcStatus.getPath(), destFile)) {
+                    if(destFs.rename(srcStatus.getPath(), destFile)) {
+                      if (inheritPerms) {
+                        HdfsUtils.setFullFileStatus(conf, desiredStatus, group, destFs, destFile, false);
+                      }
+                    } else {
                       throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest path:"
                           + destFile + " returned false");
                     }
@@ -3423,7 +3360,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
                 }));
               }
             }
-            if (null != pool) {
+            if (null == pool) {
+              if (inheritPerms) {
+                HdfsUtils.setFullFileStatus(conf, desiredStatus, null, destFs, destf, true);
+              }
+            } else {
               pool.shutdown();
               for (Future<Void> future : futures) {
                 try {
@@ -3438,6 +3379,9 @@ private void constructOneLBLocationMap(FileStatus fSta,
             return true;
           } else {
             if (destFs.rename(srcf, destf)) {
+              if (inheritPerms) {
+                HdfsUtils.setFullFileStatus(conf, destStatus, destFs, destf, true);
+              }
               return true;
             }
             return false;
@@ -3488,10 +3432,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
    */
   static protected void copyFiles(HiveConf conf, Path srcf, Path destf,
       FileSystem fs, boolean isSrcLocal, boolean isAcid, List<Path> newFiles) throws HiveException {
+    boolean inheritPerms = HiveConf.getBoolVar(conf,
+        HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
     try {
       // create the destination if it does not exist
       if (!fs.exists(destf)) {
-        FileUtils.mkdir(fs, destf, conf);
+        FileUtils.mkdir(fs, destf, inheritPerms, conf);
       }
     } catch (IOException e) {
       throw new HiveException(
@@ -3625,16 +3571,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @param oldPath
    *          The directory where the old data location, need to be cleaned up.  Most of time, will be the same
    *          as destf, unless its across FileSystem boundaries.
-   * @param purge
-   *          When set to true files which needs to be deleted are not moved to Trash
    * @param isSrcLocal
    *          If the source directory is LOCAL
-   * @param newFiles
-   *          Output the list of new files replaced in the destination path
    */
   protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, HiveConf conf,
-          boolean isSrcLocal, boolean purge, List<Path> newFiles, PathFilter deletePathFilter,
-          boolean isMmTable) throws HiveException {
+          boolean isSrcLocal, PathFilter deletePathFilter, boolean isMmTable) throws HiveException {
     try {
 
       FileSystem destFs = destf.getFileSystem(conf);
@@ -3655,12 +3596,14 @@ private void constructOneLBLocationMap(FileStatus fSta,
       if (oldPath != null) {
         // TODO: we assume lbLevels is 0 here. Same as old code for non-MM.
         //       For MM tables, this can only be a LOAD command. Does LOAD even support LB?
-        deleteOldPathForReplace(destf, oldPath, conf, purge, deletePathFilter, isMmTable, 0);
+        deleteOldPathForReplace(destf, oldPath, conf, deletePathFilter, isMmTable, 0);
       }
 
       // first call FileUtils.mkdir to make sure that destf directory exists, if not, it creates
-      // destf
-      boolean destfExist = FileUtils.mkdir(destFs, destf, conf);
+      // destf with inherited permissions
+      boolean inheritPerms = HiveConf.getBoolVar(conf, HiveConf.ConfVars
+          .HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+      boolean destfExist = FileUtils.mkdir(destFs, destf, inheritPerms, conf);
       if(!destfExist) {
         throw new IOException("Directory " + destf.toString()
             + " does not exist and could not be created.");
@@ -3676,23 +3619,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
         if (!moveFile(conf, srcs[0].getPath(), destf, true, isSrcLocal)) {
           throw new IOException("Error moving: " + srcf + " into: " + destf);
         }
-
-        // Add file paths of the files that will be moved to the destination if the caller needs it
-        if (null != newFiles) {
-          listNewFilesRecursively(destFs, destf, newFiles);
-        }
-      } else {
-        // its either a file or glob
+      } else { // its either a file or glob
         for (FileStatus src : srcs) {
-          Path destFile = new Path(destf, src.getPath().getName());
-          if (!moveFile(conf, src.getPath(), destFile, true, isSrcLocal)) {
+          if (!moveFile(conf, src.getPath(), new Path(destf, src.getPath().getName()), true, isSrcLocal)) {
             throw new IOException("Error moving: " + srcf + " into: " + destf);
           }
-
-          // Add file paths of the files that will be moved to the destination if the caller needs it
-          if (null != newFiles) {
-            newFiles.add(destFile);
-          }
         }
       }
     } catch (IOException e) {
@@ -3700,7 +3631,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     }
   }
 
-  private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, boolean purge,
+  private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf,
       PathFilter pathFilter, boolean isMmTable, int lbLevels) throws HiveException {
     Utilities.LOG14535.info("Deleting old paths for replace in " + destPath + " and old path " + oldPath);
     boolean isOldPathUnderDestf = false;
@@ -3714,7 +3645,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       isOldPathUnderDestf = isSubDir(oldPath, destPath, oldFs, destFs, false);
       if (isOldPathUnderDestf || isMmTable) {
         if (lbLevels == 0 || !isMmTable) {
-          cleanUpOneDirectoryForReplace(oldPath, oldFs, pathFilter, conf, purge);
+          cleanUpOneDirectoryForReplace(oldPath, oldFs, pathFilter, conf);
         } else {
           // We need to clean up different MM IDs from each LB directory separately.
           // Avoid temporary directories in the immediate table/part dir.
@@ -3732,7 +3663,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
               throw new HiveException("Unexpected path during overwrite: " + lbPath);
             }
             Utilities.LOG14535.info("Cleaning up LB directory " + lbPath);
-            cleanUpOneDirectoryForReplace(lbPath, oldFs, pathFilter, conf, purge);
+            cleanUpOneDirectoryForReplace(lbPath, oldFs, pathFilter, conf);
           }
         }
       }
@@ -3750,7 +3681,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
 
   private void cleanUpOneDirectoryForReplace(Path path, FileSystem fs,
-      PathFilter pathFilter, HiveConf conf, boolean purge) throws IOException, HiveException {
+      PathFilter pathFilter, HiveConf conf) throws IOException, HiveException {
     FileStatus[] statuses = fs.listStatus(path, pathFilter);
     if (statuses == null || statuses.length == 0) return;
     String s = "Deleting files under " + path + " for replace: ";
@@ -3758,7 +3689,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       s += file.getPath().getName() + ", ";
     }
     Utilities.LOG14535.info(s);
-    if (!trashFiles(fs, statuses, conf, purge)) {
+    if (!trashFiles(fs, statuses, conf)) {
       throw new HiveException("Old path " + path + " has not been cleaned up.");
     }
   }
@@ -3772,8 +3703,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @return true if deletion successful
    * @throws IOException
    */
-  public static boolean trashFiles(final FileSystem fs, final FileStatus[] statuses,
-      final Configuration conf, final boolean purge)
+  public static boolean trashFiles(final FileSystem fs, final FileStatus[] statuses, final Configuration conf)
       throws IOException {
     boolean result = true;
 
@@ -3787,13 +3717,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
     final SessionState parentSession = SessionState.get();
     for (final FileStatus status : statuses) {
       if (null == pool) {
-        result &= FileUtils.moveToTrash(fs, status.getPath(), conf, purge);
+        result &= FileUtils.moveToTrash(fs, status.getPath(), conf);
       } else {
         futures.add(pool.submit(new Callable<Boolean>() {
           @Override
           public Boolean call() throws Exception {
             SessionState.setCurrentSessionState(parentSession);
-            return FileUtils.moveToTrash(fs, status.getPath(), conf, purge);
+            return FileUtils.moveToTrash(fs, status.getPath(), conf);
           }
         }));
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
index b121eea..1d78b4c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
@@ -34,7 +34,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.calcite.adapter.druid.DruidQuery;
 import org.apache.calcite.adapter.druid.DruidSchema;
 import org.apache.calcite.adapter.druid.DruidTable;
-import org.apache.calcite.adapter.druid.LocalInterval;
 import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptMaterialization;
@@ -311,7 +310,7 @@ public final class HiveMaterializedViewsRegistry {
         }
         metrics.add(field.getName());
       }
-      List<LocalInterval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
+      List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
 
       DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false),
           dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
index 4add836..6805c17 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.metadata;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -36,10 +35,7 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 
 import com.google.common.collect.Sets;
-import org.apache.hadoop.hive.common.StringInternUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.ql.log.PerfLogger;
-import org.apache.hadoop.hive.ql.session.SessionState;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
@@ -53,6 +49,8 @@ import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.metadata.CheckResult.PartitionResult;
+import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
+import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.thrift.TException;
 
 import com.google.common.util.concurrent.MoreExecutors;
@@ -66,7 +64,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 public class HiveMetaStoreChecker {
 
   public static final Logger LOG = LoggerFactory.getLogger(HiveMetaStoreChecker.class);
-  public static final String CLASS_NAME = HiveMetaStoreChecker.class.getName();
 
   private final Hive hive;
   private final HiveConf conf;
@@ -211,28 +208,19 @@ public class HiveMetaStoreChecker {
       return;
     }
 
-    PartitionIterable parts;
+    List<Partition> parts = new ArrayList<Partition>();
     boolean findUnknownPartitions = true;
 
     if (table.isPartitioned()) {
       if (partitions == null || partitions.isEmpty()) {
-        String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, (String) null);
-        if ("strict".equalsIgnoreCase(mode)) {
-          parts = new PartitionIterable(hive, table, null, conf.getIntVar(
-              HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
-        } else {
-          List<Partition> loadedPartitions = new ArrayList<>();
-          PerfLogger perfLogger = SessionState.getPerfLogger();
-          perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
-          loadedPartitions.addAll(hive.getAllPartitionsOf(table));
-          perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
-          parts = new PartitionIterable(loadedPartitions);
-        }
+        PrunedPartitionList prunedPartList =
+        PartitionPruner.prune(table, null, conf, toString(), null);
+        // no partitions specified, let's get all
+        parts.addAll(prunedPartList.getPartitions());
       } else {
         // we're interested in specific partitions,
         // don't check for any others
         findUnknownPartitions = false;
-        List<Partition> loadedPartitions = new ArrayList<>();
         for (Map<String, String> map : partitions) {
           Partition part = hive.getPartition(table, map, false);
           if (part == null) {
@@ -241,13 +229,10 @@ public class HiveMetaStoreChecker {
             pr.setPartitionName(Warehouse.makePartPath(map));
             result.getPartitionsNotInMs().add(pr);
           } else {
-            loadedPartitions.add(part);
+            parts.add(part);
           }
         }
-        parts = new PartitionIterable(loadedPartitions);
       }
-    } else {
-      parts = new PartitionIterable(Collections.<Partition>emptyList());
     }
 
     checkTable(table, parts, findUnknownPartitions, result);
@@ -270,7 +255,7 @@ public class HiveMetaStoreChecker {
    * @throws HiveException
    *           Could not create Partition object
    */
-  void checkTable(Table table, PartitionIterable parts,
+  void checkTable(Table table, List<Partition> parts,
       boolean findUnknownPartitions, CheckResult result) throws IOException,
       HiveException {
 
@@ -299,9 +284,7 @@ public class HiveMetaStoreChecker {
       }
 
       for (int i = 0; i < partition.getSpec().size(); i++) {
-        Path qualifiedPath = partPath.makeQualified(fs);
-        StringInternUtils.internUriStringsInPath(qualifiedPath);
-        partPaths.add(qualifiedPath);
+        partPaths.add(partPath.makeQualified(fs));
         partPath = partPath.getParent();
       }
     }
@@ -331,7 +314,7 @@ public class HiveMetaStoreChecker {
     // now check the table folder and see if we find anything
     // that isn't in the metastore
     Set<Path> allPartDirs = new HashSet<Path>();
-    checkPartitionDirs(tablePath, allPartDirs, Collections.unmodifiableList(table.getPartColNames()));
+    checkPartitionDirs(tablePath, allPartDirs, table.getPartCols().size());
     // don't want the table dir
     allPartDirs.remove(tablePath);
 
@@ -415,14 +398,14 @@ public class HiveMetaStoreChecker {
    *          Start directory
    * @param allDirs
    *          This set will contain the leaf paths at the end.
-   * @param list
+   * @param maxDepth
    *          Specify how deep the search goes.
    * @throws IOException
    *           Thrown if we can't get lists from the fs.
    * @throws HiveException
    */
 
-  private void checkPartitionDirs(Path basePath, Set<Path> allDirs, final List<String> partColNames) throws IOException, HiveException {
+  private void checkPartitionDirs(Path basePath, Set<Path> allDirs, int maxDepth) throws IOException, HiveException {
     // Here we just reuse the THREAD_COUNT configuration for
     // METASTORE_FS_HANDLER_THREADS_COUNT since this results in better performance
     // The number of missing partitions discovered are later added by metastore using a
@@ -440,21 +423,21 @@ public class HiveMetaStoreChecker {
           new ThreadFactoryBuilder().setDaemon(true).setNameFormat("MSCK-GetPaths-%d").build();
       executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(poolSize, threadFactory);
     }
-    checkPartitionDirs(executor, basePath, allDirs, basePath.getFileSystem(conf), partColNames);
+    checkPartitionDirs(executor, basePath, allDirs, basePath.getFileSystem(conf), maxDepth);
 
     executor.shutdown();
   }
 
   private final class PathDepthInfoCallable implements Callable<Path> {
-    private final List<String> partColNames;
+    private final int maxDepth;
     private final FileSystem fs;
     private final ConcurrentLinkedQueue<PathDepthInfo> pendingPaths;
     private final boolean throwException;
     private final PathDepthInfo pd;
 
-    private PathDepthInfoCallable(PathDepthInfo pd, List<String> partColNames, FileSystem fs,
+    private PathDepthInfoCallable(PathDepthInfo pd, int maxDepth, FileSystem fs,
         ConcurrentLinkedQueue<PathDepthInfo> basePaths) {
-      this.partColNames = partColNames;
+      this.maxDepth = maxDepth;
       this.pd = pd;
       this.fs = fs;
       this.pendingPaths = basePaths;
@@ -474,50 +457,39 @@ public class HiveMetaStoreChecker {
       FileStatus[] fileStatuses = fs.listStatus(currentPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
       // found no files under a sub-directory under table base path; it is possible that the table
       // is empty and hence there are no partition sub-directories created under base path
-      if (fileStatuses.length == 0 && currentDepth > 0 && currentDepth < partColNames.size()) {
+      if (fileStatuses.length == 0 && currentDepth > 0 && currentDepth < maxDepth) {
         // since maxDepth is not yet reached, we are missing partition
         // columns in currentPath
-        logOrThrowExceptionWithMsg(
-            "MSCK is missing partition columns under " + currentPath.toString());
+        if (throwException) {
+          throw new HiveException(
+              "MSCK is missing partition columns under " + currentPath.toString());
+        } else {
+          LOG.warn("MSCK is missing partition columns under " + currentPath.toString());
+        }
       } else {
         // found files under currentPath add them to the queue if it is a directory
         for (FileStatus fileStatus : fileStatuses) {
-          if (!fileStatus.isDirectory() && currentDepth < partColNames.size()) {
+          if (!fileStatus.isDirectory() && currentDepth < maxDepth) {
             // found a file at depth which is less than number of partition keys
-            logOrThrowExceptionWithMsg(
-                "MSCK finds a file rather than a directory when it searches for "
-                    + fileStatus.getPath().toString());
-          } else if (fileStatus.isDirectory() && currentDepth < partColNames.size()) {
-            // found a sub-directory at a depth less than number of partition keys
-            // validate if the partition directory name matches with the corresponding
-            // partition colName at currentDepth
-            Path nextPath = fileStatus.getPath();
-            String[] parts = nextPath.getName().split("=");
-            if (parts.length != 2) {
-              logOrThrowExceptionWithMsg("Invalid partition name " + nextPath);
-            } else if (!parts[0].equalsIgnoreCase(partColNames.get(currentDepth))) {
-              logOrThrowExceptionWithMsg(
-                  "Unexpected partition key " + parts[0] + " found at " + nextPath);
+            if (throwException) {
+              throw new HiveException(
+                  "MSCK finds a file rather than a directory when it searches for "
+                      + fileStatus.getPath().toString());
             } else {
-              // add sub-directory to the work queue if maxDepth is not yet reached
-              pendingPaths.add(new PathDepthInfo(nextPath, currentDepth + 1));
+              LOG.warn("MSCK finds a file rather than a directory when it searches for "
+                  + fileStatus.getPath().toString());
             }
+          } else if (fileStatus.isDirectory() && currentDepth < maxDepth) {
+            // add sub-directory to the work queue if maxDepth is not yet reached
+            pendingPaths.add(new PathDepthInfo(fileStatus.getPath(), currentDepth + 1));
           }
         }
-        if (currentDepth == partColNames.size()) {
+        if (currentDepth == maxDepth) {
           return currentPath;
         }
       }
       return null;
     }
-
-    private void logOrThrowExceptionWithMsg(String msg) throws HiveException {
-      if(throwException) {
-        throw new HiveException(msg);
-      } else {
-        LOG.warn(msg);
-      }
-    }
   }
 
   private static class PathDepthInfo {
@@ -531,7 +503,7 @@ public class HiveMetaStoreChecker {
 
   private void checkPartitionDirs(final ExecutorService executor,
       final Path basePath, final Set<Path> result,
-      final FileSystem fs, final List<String> partColNames) throws HiveException {
+      final FileSystem fs, final int maxDepth) throws HiveException {
     try {
       Queue<Future<Path>> futures = new LinkedList<Future<Path>>();
       ConcurrentLinkedQueue<PathDepthInfo> nextLevel = new ConcurrentLinkedQueue<>();
@@ -548,7 +520,7 @@ public class HiveMetaStoreChecker {
         //process each level in parallel
         while(!nextLevel.isEmpty()) {
           futures.add(
-              executor.submit(new PathDepthInfoCallable(nextLevel.poll(), partColNames, fs, tempQueue)));
+              executor.submit(new PathDepthInfoCallable(nextLevel.poll(), maxDepth, fs, tempQueue)));
         }
         while(!futures.isEmpty()) {
           Path p = futures.poll().get();

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index a319b88..8eb011e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -391,7 +391,7 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
       throw new MetaException("Temp table path not set for " + tbl.getTableName());
     } else {
       if (!wh.isDir(tblPath)) {
-        if (!wh.mkdirs(tblPath)) {
+        if (!wh.mkdirs(tblPath, true)) {
           throw new MetaException(tblPath
               + " is not a directory or unable to create one");
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 5efaf70..c53ddad 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -18,19 +18,15 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
-import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
-import java.util.Set;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -508,14 +504,6 @@ public class Table implements Serializable {
     return null;
   }
 
-  public List<String> getPartColNames() {
-    List<String> partColNames = new ArrayList<String>();
-    for (FieldSchema key : getPartCols()) {
-      partColNames.add(key.getName());
-    }
-    return partColNames;
-  }
-
   public boolean isPartitionKey(String colName) {
     return getPartColByName(colName) == null ? false : true;
   }
@@ -948,16 +936,6 @@ public class Table implements Serializable {
     }
   }
 
-  public boolean isEmpty() throws HiveException {
-    Preconditions.checkNotNull(getPath());
-    try {
-      FileSystem fs = FileSystem.get(getPath().toUri(), SessionState.getSessionConf());
-      return !fs.exists(getPath()) || fs.listStatus(getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER).length == 0;
-    } catch (IOException e) {
-      throw new HiveException(e);
-    }
-  }
-
   public boolean isTemporary() {
     return tTable.isTemporary();
   }
@@ -986,7 +964,7 @@ public class Table implements Serializable {
 
   public static void validateColumns(List<FieldSchema> columns, List<FieldSchema> partCols)
       throws HiveException {
-    Set<String> colNames = new HashSet<>();
+    List<String> colNames = new ArrayList<String>();
     for (FieldSchema partCol: columns) {
       String colName = normalize(partCol.getName());
       if (colNames.contains(colName)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java
index 2435bf1..044d64c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java
@@ -60,7 +60,7 @@ public enum VirtualColumn {
    */
   GROUPINGID("GROUPING__ID", TypeInfoFactory.intTypeInfo);
 
-  public static final ImmutableSet<String> VIRTUAL_COLUMN_NAMES =
+  public static ImmutableSet<String> VIRTUAL_COLUMN_NAMES =
       ImmutableSet.of(FILENAME.getName(), BLOCKOFFSET.getName(), ROWOFFSET.getName(),
           RAWDATASIZE.getName(), GROUPINGID.getName(), ROWID.getName());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
index d59603e..7e39d77 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
@@ -158,15 +158,14 @@ public class ColumnPruner extends Transform {
       boolean walkChildren = true;
       opStack.push(nd);
 
-      // no need to go further down for a select op with all file sink or script
-      // child since all cols are needed for these ops
-      // However, if one of the children is not file sink or script, we still go down.
+      // no need to go further down for a select op with a file sink or script
+      // child
+      // since all cols are needed for these ops
       if (nd instanceof SelectOperator) {
-        walkChildren = false;
         for (Node child : nd.getChildren()) {
-          if (!(child instanceof FileSinkOperator || child instanceof ScriptOperator)) {
-            walkChildren = true;
-            break;
+          if ((child instanceof FileSinkOperator)
+              || (child instanceof ScriptOperator)) {
+            walkChildren = false;
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
index 45839ad..00ec03e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
@@ -671,11 +671,10 @@ public final class ColumnPrunerProcFactory {
 
       List<FieldNode> colsAfterReplacement = new ArrayList<>();
       List<FieldNode> newCols = new ArrayList<>();
-      for (int index = 0; index < numSelColumns; index++) {
-        String colName = outputCols.get(index);
-        FieldNode col = lookupColumn(cols, colName);
+      for (FieldNode col : cols) {
+        int index = outputCols.indexOf(col.getFieldName());
         // colExprMap.size() == size of cols from SEL(*) branch
-        if (col != null) {
+        if (index >= 0 && index < numSelColumns) {
           ExprNodeDesc transformed = colExprMap.get(col.getFieldName());
           colsAfterReplacement = mergeFieldNodesWithDesc(colsAfterReplacement, transformed);
           newCols.add(col);
@@ -714,14 +713,12 @@ public final class ColumnPrunerProcFactory {
       RowSchema rs = op.getSchema();
       ArrayList<ExprNodeDesc> colList = new ArrayList<>();
       List<FieldNode> outputCols = new ArrayList<>();
-      for (ColumnInfo colInfo : rs.getSignature()) {
-        FieldNode col = lookupColumn(cols, colInfo.getInternalName());
-        if (col != null) {
-          // revert output cols of SEL(*) to ExprNodeColumnDesc
-          ExprNodeColumnDesc colExpr = new ExprNodeColumnDesc(colInfo);
-          colList.add(colExpr);
-          outputCols.add(col);
-        }
+      for (FieldNode col : cols) {
+        // revert output cols of SEL(*) to ExprNodeColumnDesc
+        ColumnInfo colInfo = rs.getColumnInfo(col.getFieldName());
+        ExprNodeColumnDesc colExpr = new ExprNodeColumnDesc(colInfo);
+        colList.add(colExpr);
+        outputCols.add(col);
       }
       // replace SEL(*) to SEL(exprs)
       ((SelectDesc)select.getConf()).setSelStarNoCompute(false);
@@ -813,18 +810,11 @@ public final class ColumnPrunerProcFactory {
         ArrayList<String> newOutputColumnNames = new ArrayList<String>();
         ArrayList<ColumnInfo> rs_oldsignature = op.getSchema().getSignature();
         ArrayList<ColumnInfo> rs_newsignature = new ArrayList<ColumnInfo>();
-        // The pruning needs to preserve the order of columns in the input schema
-        Set<String> colNames = new HashSet<String>();
         for (FieldNode col : cols) {
-          colNames.add(col.getFieldName());
-        }
-        for (int i = 0; i < originalOutputColumnNames.size(); i++) {
-          String colName = originalOutputColumnNames.get(i);
-          if (colNames.contains(colName)) {
-            newOutputColumnNames.add(colName);
-            newColList.add(originalColList.get(i));
-            rs_newsignature.add(rs_oldsignature.get(i));
-          }
+          int index = originalOutputColumnNames.indexOf(col.getFieldName());
+          newOutputColumnNames.add(col.getFieldName());
+          newColList.add(originalColList.get(index));
+          rs_newsignature.add(rs_oldsignature.get(index));
         }
         op.getSchema().setSignature(rs_newsignature);
         conf.setColList(newColList);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
index d0fdb52..e68618a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile;
 import org.apache.hadoop.hive.ql.parse.GenTezUtils;
 import org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
@@ -69,8 +68,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * ConvertJoinMapJoin is an optimization that replaces a common join
  * (aka shuffle join) with a map join (aka broadcast or fragment replicate
@@ -98,19 +95,15 @@ public class ConvertJoinMapJoin implements NodeProcessor {
     JoinOperator joinOp = (JoinOperator) nd;
     long maxSize = context.conf.getLongVar(HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
 
-    // adjust noconditional task size threshold for LLAP
-    maxSize = getNoConditionalTaskSizeForLlap(maxSize, context.conf);
-    joinOp.getConf().setNoConditionalTaskSize(maxSize);
-
     TezBucketJoinProcCtx tezBucketJoinProcCtx = new TezBucketJoinProcCtx(context.conf);
     if (!context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN)) {
       // we are just converting to a common merge join operator. The shuffle
       // join in map-reduce case.
-      Object retval = checkAndConvertSMBJoin(context, joinOp, tezBucketJoinProcCtx, maxSize);
+      Object retval = checkAndConvertSMBJoin(context, joinOp, tezBucketJoinProcCtx);
       if (retval == null) {
         return retval;
       } else {
-        fallbackToReduceSideJoin(joinOp, context, maxSize);
+        fallbackToReduceSideJoin(joinOp, context);
         return null;
       }
     }
@@ -127,13 +120,13 @@ public class ConvertJoinMapJoin implements NodeProcessor {
     LOG.info("Estimated number of buckets " + numBuckets);
     int mapJoinConversionPos = getMapJoinConversionPos(joinOp, context, numBuckets, false, maxSize, true);
     if (mapJoinConversionPos < 0) {
-      Object retval = checkAndConvertSMBJoin(context, joinOp, tezBucketJoinProcCtx, maxSize);
+      Object retval = checkAndConvertSMBJoin(context, joinOp, tezBucketJoinProcCtx);
       if (retval == null) {
         return retval;
       } else {
         // only case is full outer join with SMB enabled which is not possible. Convert to regular
         // join.
-        fallbackToReduceSideJoin(joinOp, context, maxSize);
+        fallbackToReduceSideJoin(joinOp, context);
         return null;
       }
     }
@@ -154,7 +147,7 @@ public class ConvertJoinMapJoin implements NodeProcessor {
     if (mapJoinConversionPos < 0) {
       // we are just converting to a common merge join operator. The shuffle
       // join in map-reduce case.
-      fallbackToReduceSideJoin(joinOp, context, maxSize);
+      fallbackToReduceSideJoin(joinOp, context);
       return null;
     }
 
@@ -171,54 +164,15 @@ public class ConvertJoinMapJoin implements NodeProcessor {
     return null;
   }
 
-  @VisibleForTesting
-  public long getNoConditionalTaskSizeForLlap(final long maxSize, final HiveConf conf) {
-    if ("llap".equalsIgnoreCase(conf.getVar(ConfVars.HIVE_EXECUTION_MODE))) {
-      LlapClusterStateForCompile llapInfo = LlapClusterStateForCompile.getClusterInfo(conf);
-      llapInfo.initClusterInfo();
-      final int executorsPerNode;
-      if (!llapInfo.hasClusterInfo()) {
-        LOG.warn("LLAP cluster information not available. Falling back to getting #executors from hiveconf..");
-        executorsPerNode = conf.getIntVar(ConfVars.LLAP_DAEMON_NUM_EXECUTORS);
-      } else {
-        final int numExecutorsPerNodeFromCluster = llapInfo.getNumExecutorsPerNode();
-        if (numExecutorsPerNodeFromCluster == -1) {
-          LOG.warn("Cannot determine executor count from LLAP cluster information. Falling back to getting #executors" +
-            " from hiveconf..");
-          executorsPerNode = conf.getIntVar(ConfVars.LLAP_DAEMON_NUM_EXECUTORS);
-        } else {
-          executorsPerNode = numExecutorsPerNodeFromCluster;
-        }
-      }
-      final int numSessions = conf.getIntVar(ConfVars.HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE);
-      if (numSessions > 0) {
-        final int availableSlotsPerQuery = (int) ((double) executorsPerNode / numSessions);
-        final double overSubscriptionFactor = conf.getFloatVar(ConfVars.LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR);
-        final int maxSlotsPerQuery = conf.getIntVar(ConfVars.LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY);
-        final int slotsPerQuery = Math.min(maxSlotsPerQuery, availableSlotsPerQuery);
-        final long llapMaxSize = (long) (maxSize + (maxSize * overSubscriptionFactor * slotsPerQuery));
-        LOG.info("No conditional task size adjusted for LLAP. executorsPerNode: {}, numSessions: {}, " +
-            "availableSlotsPerQuery: {}, overSubscriptionFactor: {}, maxSlotsPerQuery: {}, slotsPerQuery: {}, " +
-            "noconditionalTaskSize: {}, adjustedNoconditionalTaskSize: {}", executorsPerNode, numSessions,
-          availableSlotsPerQuery, overSubscriptionFactor, maxSlotsPerQuery, slotsPerQuery, maxSize, llapMaxSize);
-        return Math.max(maxSize, llapMaxSize);
-      } else {
-        LOG.warn(ConfVars.HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE.varname + " returned value {}. Returning {}" +
-          " as no conditional task size for LLAP.", numSessions, maxSize);
-      }
-    }
-    return maxSize;
-  }
-
   @SuppressWarnings("unchecked")
   private Object checkAndConvertSMBJoin(OptimizeTezProcContext context, JoinOperator joinOp,
-    TezBucketJoinProcCtx tezBucketJoinProcCtx, final long maxSize) throws SemanticException {
+      TezBucketJoinProcCtx tezBucketJoinProcCtx) throws SemanticException {
     // we cannot convert to bucket map join, we cannot convert to
     // map join either based on the size. Check if we can convert to SMB join.
     if ((HiveConf.getBoolVar(context.conf, ConfVars.HIVE_AUTO_SORTMERGE_JOIN) == false)
       || ((!HiveConf.getBoolVar(context.conf, ConfVars.HIVE_AUTO_SORTMERGE_JOIN_REDUCE))
           && joinOp.getOpTraits().getNumReduceSinks() >= 2)) {
-      fallbackToReduceSideJoin(joinOp, context, maxSize);
+      fallbackToReduceSideJoin(joinOp, context);
       return null;
     }
     Class<? extends BigTableSelectorForAutoSMJ> bigTableMatcherClass = null;
@@ -247,7 +201,7 @@ public class ConvertJoinMapJoin implements NodeProcessor {
       // contains aliases from sub-query
       // we are just converting to a common merge join operator. The shuffle
       // join in map-reduce case.
-      fallbackToReduceSideJoin(joinOp, context, maxSize);
+      fallbackToReduceSideJoin(joinOp, context);
       return null;
     }
 
@@ -257,7 +211,7 @@ public class ConvertJoinMapJoin implements NodeProcessor {
     } else {
       // we are just converting to a common merge join operator. The shuffle
       // join in map-reduce case.
-      fallbackToReduceSideJoin(joinOp, context, maxSize);
+      fallbackToReduceSideJoin(joinOp, context);
     }
     return null;
   }
@@ -281,7 +235,7 @@ public class ConvertJoinMapJoin implements NodeProcessor {
                   joinOp.getConf().getBaseSrc(), joinOp).getSecond(),
                   null, joinDesc.getExprs(), null, null,
                   joinDesc.getOutputColumnNames(), mapJoinConversionPos, joinDesc.getConds(),
-                  joinDesc.getFilters(), joinDesc.getNoOuterJoin(), null, joinDesc.getNoConditionalTaskSize());
+                  joinDesc.getFilters(), joinDesc.getNoOuterJoin(), null);
       mapJoinDesc.setNullSafes(joinDesc.getNullSafes());
       mapJoinDesc.setFilterMap(joinDesc.getFilterMap());
       mapJoinDesc.setResidualFilterExprs(joinDesc.getResidualFilterExprs());
@@ -840,7 +794,7 @@ public class ConvertJoinMapJoin implements NodeProcessor {
       // The semijoin branch can potentially create a task level cycle
       // with the hashjoin except when it is dynamically partitioned hash
       // join which takes place in a separate task.
-      if (context.parseContext.getRsToSemiJoinBranchInfo().size() > 0
+      if (context.parseContext.getRsOpToTsOpMap().size() > 0
               && removeReduceSink) {
         removeCycleCreatingSemiJoinOps(mapJoinOp, parentSelectOpOfBigTableOp,
                 context.parseContext);
@@ -872,7 +826,7 @@ public class ConvertJoinMapJoin implements NodeProcessor {
       }
 
       ReduceSinkOperator rs = (ReduceSinkOperator) op;
-      TableScanOperator ts = parseContext.getRsToSemiJoinBranchInfo().get(rs).getTsOp();
+      TableScanOperator ts = parseContext.getRsOpToTsOpMap().get(rs);
       if (ts == null) {
         // skip, no semijoin branch
         continue;
@@ -897,11 +851,6 @@ public class ConvertJoinMapJoin implements NodeProcessor {
     }
     if (semiJoinMap.size() > 0) {
       for (ReduceSinkOperator rs : semiJoinMap.keySet()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Found semijoin optimization from the big table side of a map join, which will cause a task cycle. "
-              + "Removing semijoin "
-              + OperatorUtils.getOpNamePretty(rs) + " - " + OperatorUtils.getOpNamePretty(semiJoinMap.get(rs)));
-        }
         GenTezUtils.removeBranch(rs);
         GenTezUtils.removeSemiJoinOperator(parseContext, rs,
                 semiJoinMap.get(rs));
@@ -974,14 +923,15 @@ public class ConvertJoinMapJoin implements NodeProcessor {
     return numBuckets;
   }
 
-  private boolean convertJoinDynamicPartitionedHashJoin(JoinOperator joinOp, OptimizeTezProcContext context,
-    final long maxSize)
+  private boolean convertJoinDynamicPartitionedHashJoin(JoinOperator joinOp, OptimizeTezProcContext context)
     throws SemanticException {
     // Attempt dynamic partitioned hash join
     // Since we don't have big table index yet, must start with estimate of numReducers
     int numReducers = estimateNumBuckets(joinOp, false);
     LOG.info("Try dynamic partitioned hash join with estimated " + numReducers + " reducers");
-    int bigTablePos = getMapJoinConversionPos(joinOp, context, numReducers, false, maxSize,false);
+    int bigTablePos = getMapJoinConversionPos(joinOp, context, numReducers, false,
+            context.conf.getLongVar(HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD),
+            false);
     if (bigTablePos >= 0) {
       // Now that we have the big table index, get real numReducers value based on big table RS
       ReduceSinkOperator bigTableParentRS =
@@ -1016,11 +966,11 @@ public class ConvertJoinMapJoin implements NodeProcessor {
     return false;
   }
 
-  private void fallbackToReduceSideJoin(JoinOperator joinOp, OptimizeTezProcContext context, final long maxSize)
+  private void fallbackToReduceSideJoin(JoinOperator joinOp, OptimizeTezProcContext context)
       throws SemanticException {
     if (context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN) &&
         context.conf.getBoolVar(HiveConf.ConfVars.HIVEDYNAMICPARTITIONHASHJOIN)) {
-      if (convertJoinDynamicPartitionedHashJoin(joinOp, context, maxSize)) {
+      if (convertJoinDynamicPartitionedHashJoin(joinOp, context)) {
         return;
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
index b8c0102..b6db6aa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.optimizer;
 
 import java.util.ArrayList;
-import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
@@ -43,7 +42,12 @@ import org.apache.hadoop.hive.ql.lib.RuleRegExp;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc;
-import org.apache.hadoop.hive.ql.parse.*;
+import org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext;
+import org.apache.hadoop.hive.ql.parse.ParseContext;
+import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
+import org.apache.hadoop.hive.ql.parse.RuntimeValuesInfo;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.spark.OptimizeSparkProcContext;
 import org.apache.hadoop.hive.ql.plan.*;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFBloomFilter.GenericUDAFBloomFilterEvaluator;
@@ -210,34 +214,16 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
         } else {
           LOG.debug("Column " + column + " is not a partition column");
           if (semiJoin && ts.getConf().getFilterExpr() != null) {
-            LOG.debug("Initiate semijoin reduction for " + column + " ("
-                + ts.getConf().getFilterExpr().getExprString());
-            // Get the table name from which the min-max values and bloom filter will come.
+            LOG.debug("Initiate semijoin reduction for " + column);
+            // Get the table name from which the min-max values will come.
             Operator<?> op = ctx.generator;
-
             while (!(op == null || op instanceof TableScanOperator)) {
               op = op.getParentOperators().get(0);
             }
             String tableAlias = (op == null ? "" : ((TableScanOperator) op).getConf().getAlias());
-
             keyBaseAlias = ctx.generator.getOperatorId() + "_" + tableAlias + "_" + column;
 
-            Map<String, SemiJoinHint> hints = parseContext.getSemiJoinHints();
-            if (hints != null) {
-              // If hints map has no entry that would imply that user enforced
-              // no runtime filtering.
-              if (hints.size() > 0) {
-                SemiJoinHint sjHint = hints.get(tableAlias);
-                semiJoinAttempted = generateSemiJoinOperatorPlan(
-                        ctx, parseContext, ts, keyBaseAlias, sjHint);
-                if (!semiJoinAttempted && sjHint != null) {
-                  throw new SemanticException("The user hint to enforce semijoin failed required conditions");
-                }
-              }
-            } else {
-              semiJoinAttempted = generateSemiJoinOperatorPlan(
-                      ctx, parseContext, ts, keyBaseAlias, null);
-            }
+            semiJoinAttempted = generateSemiJoinOperatorPlan(ctx, parseContext, ts, keyBaseAlias);
           }
         }
 
@@ -400,13 +386,7 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
 
   // Generates plan for min/max when dynamic partition pruning is ruled out.
   private boolean generateSemiJoinOperatorPlan(DynamicListContext ctx, ParseContext parseContext,
-      TableScanOperator ts, String keyBaseAlias, SemiJoinHint sjHint) throws SemanticException {
-
-    // If semijoin hint is enforced, make sure hint is provided
-    if (parseContext.getConf().getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION_HINT_ONLY)
-            && sjHint == null) {
-        return false;
-    }
+      TableScanOperator ts, String keyBaseAlias) throws SemanticException {
 
     // we will put a fork in the plan at the source of the reduce sink
     Operator<? extends OperatorDesc> parentOfRS = ctx.generator.getParentOperators().get(0);
@@ -422,62 +402,33 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
       exprNodeDesc = exprNodeDesc.getChildren().get(0);
     }
 
-    if (!(exprNodeDesc instanceof ExprNodeColumnDesc)) {
-      // No column found!
-      // Bail out
-      return false;
-    }
-
-    internalColName = ((ExprNodeColumnDesc) exprNodeDesc).getColumn();
-    if (parentOfRS instanceof SelectOperator) {
-      // Make sure the semijoin branch is not on partition column.
-      ExprNodeDesc expr = parentOfRS.getColumnExprMap().get(internalColName);
-      while (!(expr instanceof ExprNodeColumnDesc) &&
-              (expr.getChildren() != null)) {
-        expr = expr.getChildren().get(0);
-      }
-
-      if (!(expr instanceof ExprNodeColumnDesc)) {
-        // No column found!
-        // Bail out
-        return false;
-      }
-
-      ExprNodeColumnDesc colExpr = (ExprNodeColumnDesc) expr;
-      String colName = ExprNodeDescUtils.extractColName(colExpr);
-
-      // Fetch the TableScan Operator.
-      Operator<?> op = parentOfRS.getParentOperators().get(0);
-      while (op != null && !(op instanceof TableScanOperator)) {
-        op = op.getParentOperators().get(0);
-      }
-      assert op != null;
+    if (exprNodeDesc instanceof ExprNodeColumnDesc) {
+      internalColName = ((ExprNodeColumnDesc) exprNodeDesc).getColumn();
+      if (parentOfRS instanceof SelectOperator) {
+        // Make sure the semijoin branch is not on parition column.
+        ExprNodeColumnDesc colExpr = ((ExprNodeColumnDesc) (parentOfRS.
+                getColumnExprMap().get(internalColName)));
+        String colName = ExprNodeDescUtils.extractColName(colExpr);
+
+        // Fetch the TableScan Operator.
+        Operator<?> op = parentOfRS.getParentOperators().get(0);
+        while (op != null && !(op instanceof TableScanOperator)) {
+          op = op.getParentOperators().get(0);
+        }
+        assert op != null;
 
-      Table table = ((TableScanOperator) op).getConf().getTableMetadata();
-      if (table.isPartitionKey(colName)) {
-        // The column is partition column, skip the optimization.
-        return false;
+        Table table = ((TableScanOperator) op).getConf().getTableMetadata();
+        if (table.isPartitionKey(colName)) {
+          // The column is partition column, skip the optimization.
+          return false;
+        }
       }
-    }
-
-    // If hint is provided and only hinted semijoin optimizations should be
-    // created, then skip other columns on the table
-    if (parseContext.getConf().getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION_HINT_ONLY)
-            && sjHint.getColName() != null &&
-            !internalColName.equals(sjHint.getColName())) {
+    } else {
+      // No column found!
+      // Bail out
       return false;
     }
 
-    // Check if there already exists a semijoin branch
-    GroupByOperator gb = parseContext.getColExprToGBMap().get(key);
-    if (gb != null) {
-      // Already an existing semijoin branch, reuse it
-      createFinalRsForSemiJoinOp(parseContext, ts, gb, key, keyBaseAlias,
-              ctx.parent.getChildren().get(0), sjHint != null);
-      // done!
-      return true;
-    }
-
     List<ExprNodeDesc> keyExprs = new ArrayList<ExprNodeDesc>();
     keyExprs.add(key);
 
@@ -521,6 +472,8 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
             HiveConf.getFloatVar(parseContext.getConf(),
                     HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD);
 
+    ArrayList<ExprNodeDesc> groupByExprs = new ArrayList<ExprNodeDesc>();
+
     // Add min/max and bloom filter aggregations
     List<ObjectInspector> aggFnOIs = new ArrayList<ObjectInspector>();
     aggFnOIs.add(key.getWritableObjectInspector());
@@ -540,17 +493,9 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
       AggregationDesc bloomFilter = new AggregationDesc("bloom_filter",
               FunctionRegistry.getGenericUDAFEvaluator("bloom_filter", aggFnOIs, false, false),
               params, false, Mode.PARTIAL1);
-      GenericUDAFBloomFilterEvaluator bloomFilterEval =
-          (GenericUDAFBloomFilterEvaluator) bloomFilter.getGenericUDAFEvaluator();
+      GenericUDAFBloomFilterEvaluator bloomFilterEval = (GenericUDAFBloomFilterEvaluator) bloomFilter.getGenericUDAFEvaluator();
       bloomFilterEval.setSourceOperator(selectOp);
-
-      if (sjHint != null && sjHint.getNumEntries() > 0) {
-        LOG.debug("Setting size for " + keyBaseAlias + " to " + sjHint.getNumEntries() + " based on the hint");
-        bloomFilterEval.setHintEntries(sjHint.getNumEntries());
-      }
       bloomFilterEval.setMaxEntries(parseContext.getConf().getLongVar(ConfVars.TEZ_MAX_BLOOM_FILTER_ENTRIES));
-      bloomFilterEval.setMinEntries(parseContext.getConf().getLongVar(ConfVars.TEZ_MIN_BLOOM_FILTER_ENTRIES));
-      bloomFilterEval.setFactor(parseContext.getConf().getFloatVar(ConfVars.TEZ_BLOOM_FILTER_FACTOR));
       bloomFilter.setGenericUDAFWritableEvaluator(bloomFilterEval);
       aggs.add(min);
       aggs.add(max);
@@ -602,8 +547,6 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
     Map<String, ExprNodeDesc> columnExprMap = new HashMap<String, ExprNodeDesc>();
     rsOp.setColumnExprMap(columnExprMap);
 
-    rsOp.getConf().setReducerTraits(EnumSet.of(ReduceSinkDesc.ReducerTraits.QUICKSTART));
-
     // Create the final Group By Operator
     ArrayList<AggregationDesc> aggsFinal = new ArrayList<AggregationDesc>();
     try {
@@ -648,12 +591,7 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
               bloomFilterFinalParams, false, Mode.FINAL);
       GenericUDAFBloomFilterEvaluator bloomFilterEval = (GenericUDAFBloomFilterEvaluator) bloomFilter.getGenericUDAFEvaluator();
       bloomFilterEval.setSourceOperator(selectOp);
-      if (sjHint != null && sjHint.getNumEntries() > 0) {
-        bloomFilterEval.setHintEntries(sjHint.getNumEntries());
-      }
       bloomFilterEval.setMaxEntries(parseContext.getConf().getLongVar(ConfVars.TEZ_MAX_BLOOM_FILTER_ENTRIES));
-      bloomFilterEval.setMinEntries(parseContext.getConf().getLongVar(ConfVars.TEZ_MIN_BLOOM_FILTER_ENTRIES));
-      bloomFilterEval.setFactor(parseContext.getConf().getFloatVar(ConfVars.TEZ_BLOOM_FILTER_FACTOR));
       bloomFilter.setGenericUDAFWritableEvaluator(bloomFilterEval);
 
       aggsFinal.add(min);
@@ -679,56 +617,23 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
       rsOp.getConf().setOutputOperators(outputOperators);
     }
 
-    createFinalRsForSemiJoinOp(parseContext, ts, groupByOpFinal, key,
-            keyBaseAlias, ctx.parent.getChildren().get(0), sjHint != null);
-
-    return true;
-  }
-
-  private void createFinalRsForSemiJoinOp(
-          ParseContext parseContext, TableScanOperator ts, GroupByOperator gb,
-          ExprNodeDesc key, String keyBaseAlias, ExprNodeDesc colExpr,
-          boolean isHint) throws SemanticException {
-    ArrayList<String> gbOutputNames = new ArrayList<>();
-    // One each for min, max and bloom filter
-    gbOutputNames.add(SemanticAnalyzer.getColumnInternalName(0));
-    gbOutputNames.add(SemanticAnalyzer.getColumnInternalName(1));
-    gbOutputNames.add(SemanticAnalyzer.getColumnInternalName(2));
-
-    int colPos = 0;
-    ArrayList<ExprNodeDesc> rsValueCols = new ArrayList<ExprNodeDesc>();
-    for (int i = 0; i < gbOutputNames.size() - 1; i++) {
-      ExprNodeColumnDesc expr = new ExprNodeColumnDesc(key.getTypeInfo(),
-              gbOutputNames.get(colPos++), "", false);
-      rsValueCols.add(expr);
-    }
-
-    // Bloom Filter uses binary
-    ExprNodeColumnDesc colBFExpr = new ExprNodeColumnDesc(TypeInfoFactory.binaryTypeInfo,
-            gbOutputNames.get(colPos++), "", false);
-    rsValueCols.add(colBFExpr);
-
     // Create the final Reduce Sink Operator
     ReduceSinkDesc rsDescFinal = PlanUtils.getReduceSinkDesc(
             new ArrayList<ExprNodeDesc>(), rsValueCols, gbOutputNames, false,
             -1, 0, 1, Operation.NOT_ACID);
     ReduceSinkOperator rsOpFinal = (ReduceSinkOperator)OperatorFactory.getAndMakeChild(
-            rsDescFinal, new RowSchema(gb.getSchema()), gb);
-    Map<String, ExprNodeDesc> columnExprMap = new HashMap<>();
+            rsDescFinal, new RowSchema(groupByOpFinal.getSchema()), groupByOpFinal);
     rsOpFinal.setColumnExprMap(columnExprMap);
 
-    LOG.debug("DynamicSemiJoinPushdown: Saving RS to TS mapping: " + rsOpFinal + ": " + ts);
-    SemiJoinBranchInfo sjInfo = new SemiJoinBranchInfo(ts, isHint);
-    parseContext.getRsToSemiJoinBranchInfo().put(rsOpFinal, sjInfo);
+    LOG.debug("DynamicMinMaxPushdown: Saving RS to TS mapping: " + rsOpFinal + ": " + ts);
+    parseContext.getRsOpToTsOpMap().put(rsOpFinal, ts);
 
     // for explain purpose
-    if (parseContext.getContext().getExplainConfig() != null &&
-            parseContext.getContext().getExplainConfig().isFormatted()) {
-      List<String> outputOperators = rsOpFinal.getConf().getOutputOperators();
-      if (outputOperators == null) {
-        outputOperators = new ArrayList<>();
-      }
+    if (parseContext.getContext().getExplainConfig() != null
+        && parseContext.getContext().getExplainConfig().isFormatted()) {
+      List<String> outputOperators = new ArrayList<>();
       outputOperators.add(ts.getOperatorId());
+      rsOpFinal.getConf().setOutputOperators(outputOperators);
     }
 
     // Save the info that is required at query time to resolve dynamic/runtime values.
@@ -743,9 +648,9 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
     runtimeValuesInfo.setTableDesc(rsFinalTableDesc);
     runtimeValuesInfo.setDynamicValueIDs(dynamicValueIDs);
     runtimeValuesInfo.setColExprs(rsValueCols);
-    runtimeValuesInfo.setTsColExpr(colExpr);
     parseContext.getRsToRuntimeValuesInfoMap().put(rsOpFinal, runtimeValuesInfo);
-    parseContext.getColExprToGBMap().put(key, gb);
+
+    return true;
   }
 
   private Map<Node, Object> collectDynamicPruningConditions(ExprNodeDesc pred, NodeProcessorCtx ctx)


[16/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 1de9056..f547651 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -205,15 +205,6 @@ class Iface(fb303.FacebookService.Iface):
     """
     pass
 
-  def truncate_table(self, dbName, tableName, partNames):
-    """
-    Parameters:
-     - dbName
-     - tableName
-     - partNames
-    """
-    pass
-
   def get_tables(self, db_name, pattern):
     """
     Parameters:
@@ -2138,41 +2129,6 @@ class Client(fb303.FacebookService.Client, Iface):
       raise result.o3
     return
 
-  def truncate_table(self, dbName, tableName, partNames):
-    """
-    Parameters:
-     - dbName
-     - tableName
-     - partNames
-    """
-    self.send_truncate_table(dbName, tableName, partNames)
-    self.recv_truncate_table()
-
-  def send_truncate_table(self, dbName, tableName, partNames):
-    self._oprot.writeMessageBegin('truncate_table', TMessageType.CALL, self._seqid)
-    args = truncate_table_args()
-    args.dbName = dbName
-    args.tableName = tableName
-    args.partNames = partNames
-    args.write(self._oprot)
-    self._oprot.writeMessageEnd()
-    self._oprot.trans.flush()
-
-  def recv_truncate_table(self):
-    iprot = self._iprot
-    (fname, mtype, rseqid) = iprot.readMessageBegin()
-    if mtype == TMessageType.EXCEPTION:
-      x = TApplicationException()
-      x.read(iprot)
-      iprot.readMessageEnd()
-      raise x
-    result = truncate_table_result()
-    result.read(iprot)
-    iprot.readMessageEnd()
-    if result.o1 is not None:
-      raise result.o1
-    return
-
   def get_tables(self, db_name, pattern):
     """
     Parameters:
@@ -6984,7 +6940,6 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     self._processMap["add_foreign_key"] = Processor.process_add_foreign_key
     self._processMap["drop_table"] = Processor.process_drop_table
     self._processMap["drop_table_with_environment_context"] = Processor.process_drop_table_with_environment_context
-    self._processMap["truncate_table"] = Processor.process_truncate_table
     self._processMap["get_tables"] = Processor.process_get_tables
     self._processMap["get_tables_by_type"] = Processor.process_get_tables_by_type
     self._processMap["get_table_meta"] = Processor.process_get_table_meta
@@ -7758,28 +7713,6 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
-  def process_truncate_table(self, seqid, iprot, oprot):
-    args = truncate_table_args()
-    args.read(iprot)
-    iprot.readMessageEnd()
-    result = truncate_table_result()
-    try:
-      self._handler.truncate_table(args.dbName, args.tableName, args.partNames)
-      msg_type = TMessageType.REPLY
-    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
-      raise
-    except MetaException as o1:
-      msg_type = TMessageType.REPLY
-      result.o1 = o1
-    except Exception as ex:
-      msg_type = TMessageType.EXCEPTION
-      logging.exception(ex)
-      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("truncate_table", msg_type, seqid)
-    result.write(oprot)
-    oprot.writeMessageEnd()
-    oprot.trans.flush()
-
   def process_get_tables(self, seqid, iprot, oprot):
     args = get_tables_args()
     args.read(iprot)
@@ -15014,171 +14947,6 @@ class drop_table_with_environment_context_result:
   def __ne__(self, other):
     return not (self == other)
 
-class truncate_table_args:
-  """
-  Attributes:
-   - dbName
-   - tableName
-   - partNames
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'dbName', None, None, ), # 1
-    (2, TType.STRING, 'tableName', None, None, ), # 2
-    (3, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 3
-  )
-
-  def __init__(self, dbName=None, tableName=None, partNames=None,):
-    self.dbName = dbName
-    self.tableName = tableName
-    self.partNames = partNames
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.dbName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.tableName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.LIST:
-          self.partNames = []
-          (_etype669, _size666) = iprot.readListBegin()
-          for _i670 in xrange(_size666):
-            _elem671 = iprot.readString()
-            self.partNames.append(_elem671)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('truncate_table_args')
-    if self.dbName is not None:
-      oprot.writeFieldBegin('dbName', TType.STRING, 1)
-      oprot.writeString(self.dbName)
-      oprot.writeFieldEnd()
-    if self.tableName is not None:
-      oprot.writeFieldBegin('tableName', TType.STRING, 2)
-      oprot.writeString(self.tableName)
-      oprot.writeFieldEnd()
-    if self.partNames is not None:
-      oprot.writeFieldBegin('partNames', TType.LIST, 3)
-      oprot.writeListBegin(TType.STRING, len(self.partNames))
-      for iter672 in self.partNames:
-        oprot.writeString(iter672)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.dbName)
-    value = (value * 31) ^ hash(self.tableName)
-    value = (value * 31) ^ hash(self.partNames)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class truncate_table_result:
-  """
-  Attributes:
-   - o1
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
-  )
-
-  def __init__(self, o1=None,):
-    self.o1 = o1
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRUCT:
-          self.o1 = MetaException()
-          self.o1.read(iprot)
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('truncate_table_result')
-    if self.o1 is not None:
-      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
-      self.o1.write(oprot)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.o1)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
 class get_tables_args:
   """
   Attributes:
@@ -15285,10 +15053,10 @@ class get_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype676, _size673) = iprot.readListBegin()
-          for _i677 in xrange(_size673):
-            _elem678 = iprot.readString()
-            self.success.append(_elem678)
+          (_etype669, _size666) = iprot.readListBegin()
+          for _i670 in xrange(_size666):
+            _elem671 = iprot.readString()
+            self.success.append(_elem671)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15311,8 +15079,8 @@ class get_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter679 in self.success:
-        oprot.writeString(iter679)
+      for iter672 in self.success:
+        oprot.writeString(iter672)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -15462,10 +15230,10 @@ class get_tables_by_type_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype683, _size680) = iprot.readListBegin()
-          for _i684 in xrange(_size680):
-            _elem685 = iprot.readString()
-            self.success.append(_elem685)
+          (_etype676, _size673) = iprot.readListBegin()
+          for _i677 in xrange(_size673):
+            _elem678 = iprot.readString()
+            self.success.append(_elem678)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15488,8 +15256,8 @@ class get_tables_by_type_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter686 in self.success:
-        oprot.writeString(iter686)
+      for iter679 in self.success:
+        oprot.writeString(iter679)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -15562,10 +15330,10 @@ class get_table_meta_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.tbl_types = []
-          (_etype690, _size687) = iprot.readListBegin()
-          for _i691 in xrange(_size687):
-            _elem692 = iprot.readString()
-            self.tbl_types.append(_elem692)
+          (_etype683, _size680) = iprot.readListBegin()
+          for _i684 in xrange(_size680):
+            _elem685 = iprot.readString()
+            self.tbl_types.append(_elem685)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15590,8 +15358,8 @@ class get_table_meta_args:
     if self.tbl_types is not None:
       oprot.writeFieldBegin('tbl_types', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.tbl_types))
-      for iter693 in self.tbl_types:
-        oprot.writeString(iter693)
+      for iter686 in self.tbl_types:
+        oprot.writeString(iter686)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -15647,11 +15415,11 @@ class get_table_meta_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype697, _size694) = iprot.readListBegin()
-          for _i698 in xrange(_size694):
-            _elem699 = TableMeta()
-            _elem699.read(iprot)
-            self.success.append(_elem699)
+          (_etype690, _size687) = iprot.readListBegin()
+          for _i691 in xrange(_size687):
+            _elem692 = TableMeta()
+            _elem692.read(iprot)
+            self.success.append(_elem692)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15674,8 +15442,8 @@ class get_table_meta_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter700 in self.success:
-        iter700.write(oprot)
+      for iter693 in self.success:
+        iter693.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -15799,10 +15567,10 @@ class get_all_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype704, _size701) = iprot.readListBegin()
-          for _i705 in xrange(_size701):
-            _elem706 = iprot.readString()
-            self.success.append(_elem706)
+          (_etype697, _size694) = iprot.readListBegin()
+          for _i698 in xrange(_size694):
+            _elem699 = iprot.readString()
+            self.success.append(_elem699)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15825,8 +15593,8 @@ class get_all_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter707 in self.success:
-        oprot.writeString(iter707)
+      for iter700 in self.success:
+        oprot.writeString(iter700)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -16062,10 +15830,10 @@ class get_table_objects_by_name_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.tbl_names = []
-          (_etype711, _size708) = iprot.readListBegin()
-          for _i712 in xrange(_size708):
-            _elem713 = iprot.readString()
-            self.tbl_names.append(_elem713)
+          (_etype704, _size701) = iprot.readListBegin()
+          for _i705 in xrange(_size701):
+            _elem706 = iprot.readString()
+            self.tbl_names.append(_elem706)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16086,8 +15854,8 @@ class get_table_objects_by_name_args:
     if self.tbl_names is not None:
       oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.tbl_names))
-      for iter714 in self.tbl_names:
-        oprot.writeString(iter714)
+      for iter707 in self.tbl_names:
+        oprot.writeString(iter707)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -16139,11 +15907,11 @@ class get_table_objects_by_name_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype718, _size715) = iprot.readListBegin()
-          for _i719 in xrange(_size715):
-            _elem720 = Table()
-            _elem720.read(iprot)
-            self.success.append(_elem720)
+          (_etype711, _size708) = iprot.readListBegin()
+          for _i712 in xrange(_size708):
+            _elem713 = Table()
+            _elem713.read(iprot)
+            self.success.append(_elem713)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16160,8 +15928,8 @@ class get_table_objects_by_name_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter721 in self.success:
-        iter721.write(oprot)
+      for iter714 in self.success:
+        iter714.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -16644,10 +16412,10 @@ class get_table_names_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype725, _size722) = iprot.readListBegin()
-          for _i726 in xrange(_size722):
-            _elem727 = iprot.readString()
-            self.success.append(_elem727)
+          (_etype718, _size715) = iprot.readListBegin()
+          for _i719 in xrange(_size715):
+            _elem720 = iprot.readString()
+            self.success.append(_elem720)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16682,8 +16450,8 @@ class get_table_names_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter728 in self.success:
-        oprot.writeString(iter728)
+      for iter721 in self.success:
+        oprot.writeString(iter721)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17653,11 +17421,11 @@ class add_partitions_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype732, _size729) = iprot.readListBegin()
-          for _i733 in xrange(_size729):
-            _elem734 = Partition()
-            _elem734.read(iprot)
-            self.new_parts.append(_elem734)
+          (_etype725, _size722) = iprot.readListBegin()
+          for _i726 in xrange(_size722):
+            _elem727 = Partition()
+            _elem727.read(iprot)
+            self.new_parts.append(_elem727)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17674,8 +17442,8 @@ class add_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter735 in self.new_parts:
-        iter735.write(oprot)
+      for iter728 in self.new_parts:
+        iter728.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -17833,11 +17601,11 @@ class add_partitions_pspec_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype739, _size736) = iprot.readListBegin()
-          for _i740 in xrange(_size736):
-            _elem741 = PartitionSpec()
-            _elem741.read(iprot)
-            self.new_parts.append(_elem741)
+          (_etype732, _size729) = iprot.readListBegin()
+          for _i733 in xrange(_size729):
+            _elem734 = PartitionSpec()
+            _elem734.read(iprot)
+            self.new_parts.append(_elem734)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17854,8 +17622,8 @@ class add_partitions_pspec_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter742 in self.new_parts:
-        iter742.write(oprot)
+      for iter735 in self.new_parts:
+        iter735.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -18029,10 +17797,10 @@ class append_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype746, _size743) = iprot.readListBegin()
-          for _i747 in xrange(_size743):
-            _elem748 = iprot.readString()
-            self.part_vals.append(_elem748)
+          (_etype739, _size736) = iprot.readListBegin()
+          for _i740 in xrange(_size736):
+            _elem741 = iprot.readString()
+            self.part_vals.append(_elem741)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18057,8 +17825,8 @@ class append_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter749 in self.part_vals:
-        oprot.writeString(iter749)
+      for iter742 in self.part_vals:
+        oprot.writeString(iter742)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -18411,10 +18179,10 @@ class append_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype753, _size750) = iprot.readListBegin()
-          for _i754 in xrange(_size750):
-            _elem755 = iprot.readString()
-            self.part_vals.append(_elem755)
+          (_etype746, _size743) = iprot.readListBegin()
+          for _i747 in xrange(_size743):
+            _elem748 = iprot.readString()
+            self.part_vals.append(_elem748)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18445,8 +18213,8 @@ class append_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter756 in self.part_vals:
-        oprot.writeString(iter756)
+      for iter749 in self.part_vals:
+        oprot.writeString(iter749)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -19041,10 +18809,10 @@ class drop_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype760, _size757) = iprot.readListBegin()
-          for _i761 in xrange(_size757):
-            _elem762 = iprot.readString()
-            self.part_vals.append(_elem762)
+          (_etype753, _size750) = iprot.readListBegin()
+          for _i754 in xrange(_size750):
+            _elem755 = iprot.readString()
+            self.part_vals.append(_elem755)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19074,8 +18842,8 @@ class drop_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter763 in self.part_vals:
-        oprot.writeString(iter763)
+      for iter756 in self.part_vals:
+        oprot.writeString(iter756)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -19248,10 +19016,10 @@ class drop_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype767, _size764) = iprot.readListBegin()
-          for _i768 in xrange(_size764):
-            _elem769 = iprot.readString()
-            self.part_vals.append(_elem769)
+          (_etype760, _size757) = iprot.readListBegin()
+          for _i761 in xrange(_size757):
+            _elem762 = iprot.readString()
+            self.part_vals.append(_elem762)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19287,8 +19055,8 @@ class drop_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter770 in self.part_vals:
-        oprot.writeString(iter770)
+      for iter763 in self.part_vals:
+        oprot.writeString(iter763)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -20025,10 +19793,10 @@ class get_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype774, _size771) = iprot.readListBegin()
-          for _i775 in xrange(_size771):
-            _elem776 = iprot.readString()
-            self.part_vals.append(_elem776)
+          (_etype767, _size764) = iprot.readListBegin()
+          for _i768 in xrange(_size764):
+            _elem769 = iprot.readString()
+            self.part_vals.append(_elem769)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20053,8 +19821,8 @@ class get_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter777 in self.part_vals:
-        oprot.writeString(iter777)
+      for iter770 in self.part_vals:
+        oprot.writeString(iter770)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20213,11 +19981,11 @@ class exchange_partition_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype779, _vtype780, _size778 ) = iprot.readMapBegin()
-          for _i782 in xrange(_size778):
-            _key783 = iprot.readString()
-            _val784 = iprot.readString()
-            self.partitionSpecs[_key783] = _val784
+          (_ktype772, _vtype773, _size771 ) = iprot.readMapBegin()
+          for _i775 in xrange(_size771):
+            _key776 = iprot.readString()
+            _val777 = iprot.readString()
+            self.partitionSpecs[_key776] = _val777
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -20254,9 +20022,9 @@ class exchange_partition_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter785,viter786 in self.partitionSpecs.items():
-        oprot.writeString(kiter785)
-        oprot.writeString(viter786)
+      for kiter778,viter779 in self.partitionSpecs.items():
+        oprot.writeString(kiter778)
+        oprot.writeString(viter779)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -20461,11 +20229,11 @@ class exchange_partitions_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype788, _vtype789, _size787 ) = iprot.readMapBegin()
-          for _i791 in xrange(_size787):
-            _key792 = iprot.readString()
-            _val793 = iprot.readString()
-            self.partitionSpecs[_key792] = _val793
+          (_ktype781, _vtype782, _size780 ) = iprot.readMapBegin()
+          for _i784 in xrange(_size780):
+            _key785 = iprot.readString()
+            _val786 = iprot.readString()
+            self.partitionSpecs[_key785] = _val786
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -20502,9 +20270,9 @@ class exchange_partitions_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter794,viter795 in self.partitionSpecs.items():
-        oprot.writeString(kiter794)
-        oprot.writeString(viter795)
+      for kiter787,viter788 in self.partitionSpecs.items():
+        oprot.writeString(kiter787)
+        oprot.writeString(viter788)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -20587,11 +20355,11 @@ class exchange_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype799, _size796) = iprot.readListBegin()
-          for _i800 in xrange(_size796):
-            _elem801 = Partition()
-            _elem801.read(iprot)
-            self.success.append(_elem801)
+          (_etype792, _size789) = iprot.readListBegin()
+          for _i793 in xrange(_size789):
+            _elem794 = Partition()
+            _elem794.read(iprot)
+            self.success.append(_elem794)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20632,8 +20400,8 @@ class exchange_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter802 in self.success:
-        iter802.write(oprot)
+      for iter795 in self.success:
+        iter795.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -20727,10 +20495,10 @@ class get_partition_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype806, _size803) = iprot.readListBegin()
-          for _i807 in xrange(_size803):
-            _elem808 = iprot.readString()
-            self.part_vals.append(_elem808)
+          (_etype799, _size796) = iprot.readListBegin()
+          for _i800 in xrange(_size796):
+            _elem801 = iprot.readString()
+            self.part_vals.append(_elem801)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20742,10 +20510,10 @@ class get_partition_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype812, _size809) = iprot.readListBegin()
-          for _i813 in xrange(_size809):
-            _elem814 = iprot.readString()
-            self.group_names.append(_elem814)
+          (_etype805, _size802) = iprot.readListBegin()
+          for _i806 in xrange(_size802):
+            _elem807 = iprot.readString()
+            self.group_names.append(_elem807)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20770,8 +20538,8 @@ class get_partition_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter815 in self.part_vals:
-        oprot.writeString(iter815)
+      for iter808 in self.part_vals:
+        oprot.writeString(iter808)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.user_name is not None:
@@ -20781,8 +20549,8 @@ class get_partition_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter816 in self.group_names:
-        oprot.writeString(iter816)
+      for iter809 in self.group_names:
+        oprot.writeString(iter809)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -21211,11 +20979,11 @@ class get_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype820, _size817) = iprot.readListBegin()
-          for _i821 in xrange(_size817):
-            _elem822 = Partition()
-            _elem822.read(iprot)
-            self.success.append(_elem822)
+          (_etype813, _size810) = iprot.readListBegin()
+          for _i814 in xrange(_size810):
+            _elem815 = Partition()
+            _elem815.read(iprot)
+            self.success.append(_elem815)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21244,8 +21012,8 @@ class get_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter823 in self.success:
-        iter823.write(oprot)
+      for iter816 in self.success:
+        iter816.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21339,10 +21107,10 @@ class get_partitions_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype827, _size824) = iprot.readListBegin()
-          for _i828 in xrange(_size824):
-            _elem829 = iprot.readString()
-            self.group_names.append(_elem829)
+          (_etype820, _size817) = iprot.readListBegin()
+          for _i821 in xrange(_size817):
+            _elem822 = iprot.readString()
+            self.group_names.append(_elem822)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21375,8 +21143,8 @@ class get_partitions_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter830 in self.group_names:
-        oprot.writeString(iter830)
+      for iter823 in self.group_names:
+        oprot.writeString(iter823)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -21437,11 +21205,11 @@ class get_partitions_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype834, _size831) = iprot.readListBegin()
-          for _i835 in xrange(_size831):
-            _elem836 = Partition()
-            _elem836.read(iprot)
-            self.success.append(_elem836)
+          (_etype827, _size824) = iprot.readListBegin()
+          for _i828 in xrange(_size824):
+            _elem829 = Partition()
+            _elem829.read(iprot)
+            self.success.append(_elem829)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21470,8 +21238,8 @@ class get_partitions_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter837 in self.success:
-        iter837.write(oprot)
+      for iter830 in self.success:
+        iter830.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21629,11 +21397,11 @@ class get_partitions_pspec_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype841, _size838) = iprot.readListBegin()
-          for _i842 in xrange(_size838):
-            _elem843 = PartitionSpec()
-            _elem843.read(iprot)
-            self.success.append(_elem843)
+          (_etype834, _size831) = iprot.readListBegin()
+          for _i835 in xrange(_size831):
+            _elem836 = PartitionSpec()
+            _elem836.read(iprot)
+            self.success.append(_elem836)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21662,8 +21430,8 @@ class get_partitions_pspec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter844 in self.success:
-        iter844.write(oprot)
+      for iter837 in self.success:
+        iter837.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21818,10 +21586,10 @@ class get_partition_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype848, _size845) = iprot.readListBegin()
-          for _i849 in xrange(_size845):
-            _elem850 = iprot.readString()
-            self.success.append(_elem850)
+          (_etype841, _size838) = iprot.readListBegin()
+          for _i842 in xrange(_size838):
+            _elem843 = iprot.readString()
+            self.success.append(_elem843)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21844,8 +21612,8 @@ class get_partition_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter851 in self.success:
-        oprot.writeString(iter851)
+      for iter844 in self.success:
+        oprot.writeString(iter844)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -21921,10 +21689,10 @@ class get_partitions_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype855, _size852) = iprot.readListBegin()
-          for _i856 in xrange(_size852):
-            _elem857 = iprot.readString()
-            self.part_vals.append(_elem857)
+          (_etype848, _size845) = iprot.readListBegin()
+          for _i849 in xrange(_size845):
+            _elem850 = iprot.readString()
+            self.part_vals.append(_elem850)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21954,8 +21722,8 @@ class get_partitions_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter858 in self.part_vals:
-        oprot.writeString(iter858)
+      for iter851 in self.part_vals:
+        oprot.writeString(iter851)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -22019,11 +21787,11 @@ class get_partitions_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype862, _size859) = iprot.readListBegin()
-          for _i863 in xrange(_size859):
-            _elem864 = Partition()
-            _elem864.read(iprot)
-            self.success.append(_elem864)
+          (_etype855, _size852) = iprot.readListBegin()
+          for _i856 in xrange(_size852):
+            _elem857 = Partition()
+            _elem857.read(iprot)
+            self.success.append(_elem857)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22052,8 +21820,8 @@ class get_partitions_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter865 in self.success:
-        iter865.write(oprot)
+      for iter858 in self.success:
+        iter858.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -22140,10 +21908,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype869, _size866) = iprot.readListBegin()
-          for _i870 in xrange(_size866):
-            _elem871 = iprot.readString()
-            self.part_vals.append(_elem871)
+          (_etype862, _size859) = iprot.readListBegin()
+          for _i863 in xrange(_size859):
+            _elem864 = iprot.readString()
+            self.part_vals.append(_elem864)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22160,10 +21928,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 6:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype875, _size872) = iprot.readListBegin()
-          for _i876 in xrange(_size872):
-            _elem877 = iprot.readString()
-            self.group_names.append(_elem877)
+          (_etype868, _size865) = iprot.readListBegin()
+          for _i869 in xrange(_size865):
+            _elem870 = iprot.readString()
+            self.group_names.append(_elem870)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22188,8 +21956,8 @@ class get_partitions_ps_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter878 in self.part_vals:
-        oprot.writeString(iter878)
+      for iter871 in self.part_vals:
+        oprot.writeString(iter871)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -22203,8 +21971,8 @@ class get_partitions_ps_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 6)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter879 in self.group_names:
-        oprot.writeString(iter879)
+      for iter872 in self.group_names:
+        oprot.writeString(iter872)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -22266,11 +22034,11 @@ class get_partitions_ps_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype883, _size880) = iprot.readListBegin()
-          for _i884 in xrange(_size880):
-            _elem885 = Partition()
-            _elem885.read(iprot)
-            self.success.append(_elem885)
+          (_etype876, _size873) = iprot.readListBegin()
+          for _i877 in xrange(_size873):
+            _elem878 = Partition()
+            _elem878.read(iprot)
+            self.success.append(_elem878)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22299,8 +22067,8 @@ class get_partitions_ps_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter886 in self.success:
-        iter886.write(oprot)
+      for iter879 in self.success:
+        iter879.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -22381,10 +22149,10 @@ class get_partition_names_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype890, _size887) = iprot.readListBegin()
-          for _i891 in xrange(_size887):
-            _elem892 = iprot.readString()
-            self.part_vals.append(_elem892)
+          (_etype883, _size880) = iprot.readListBegin()
+          for _i884 in xrange(_size880):
+            _elem885 = iprot.readString()
+            self.part_vals.append(_elem885)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22414,8 +22182,8 @@ class get_partition_names_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter893 in self.part_vals:
-        oprot.writeString(iter893)
+      for iter886 in self.part_vals:
+        oprot.writeString(iter886)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -22479,10 +22247,10 @@ class get_partition_names_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype897, _size894) = iprot.readListBegin()
-          for _i898 in xrange(_size894):
-            _elem899 = iprot.readString()
-            self.success.append(_elem899)
+          (_etype890, _size887) = iprot.readListBegin()
+          for _i891 in xrange(_size887):
+            _elem892 = iprot.readString()
+            self.success.append(_elem892)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22511,8 +22279,8 @@ class get_partition_names_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter900 in self.success:
-        oprot.writeString(iter900)
+      for iter893 in self.success:
+        oprot.writeString(iter893)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -22683,11 +22451,11 @@ class get_partitions_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype904, _size901) = iprot.readListBegin()
-          for _i905 in xrange(_size901):
-            _elem906 = Partition()
-            _elem906.read(iprot)
-            self.success.append(_elem906)
+          (_etype897, _size894) = iprot.readListBegin()
+          for _i898 in xrange(_size894):
+            _elem899 = Partition()
+            _elem899.read(iprot)
+            self.success.append(_elem899)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22716,8 +22484,8 @@ class get_partitions_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter907 in self.success:
-        iter907.write(oprot)
+      for iter900 in self.success:
+        iter900.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -22888,11 +22656,11 @@ class get_part_specs_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype911, _size908) = iprot.readListBegin()
-          for _i912 in xrange(_size908):
-            _elem913 = PartitionSpec()
-            _elem913.read(iprot)
-            self.success.append(_elem913)
+          (_etype904, _size901) = iprot.readListBegin()
+          for _i905 in xrange(_size901):
+            _elem906 = PartitionSpec()
+            _elem906.read(iprot)
+            self.success.append(_elem906)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22921,8 +22689,8 @@ class get_part_specs_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter914 in self.success:
-        iter914.write(oprot)
+      for iter907 in self.success:
+        iter907.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23342,10 +23110,10 @@ class get_partitions_by_names_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.names = []
-          (_etype918, _size915) = iprot.readListBegin()
-          for _i919 in xrange(_size915):
-            _elem920 = iprot.readString()
-            self.names.append(_elem920)
+          (_etype911, _size908) = iprot.readListBegin()
+          for _i912 in xrange(_size908):
+            _elem913 = iprot.readString()
+            self.names.append(_elem913)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23370,8 +23138,8 @@ class get_partitions_by_names_args:
     if self.names is not None:
       oprot.writeFieldBegin('names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.names))
-      for iter921 in self.names:
-        oprot.writeString(iter921)
+      for iter914 in self.names:
+        oprot.writeString(iter914)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23430,11 +23198,11 @@ class get_partitions_by_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype925, _size922) = iprot.readListBegin()
-          for _i926 in xrange(_size922):
-            _elem927 = Partition()
-            _elem927.read(iprot)
-            self.success.append(_elem927)
+          (_etype918, _size915) = iprot.readListBegin()
+          for _i919 in xrange(_size915):
+            _elem920 = Partition()
+            _elem920.read(iprot)
+            self.success.append(_elem920)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23463,8 +23231,8 @@ class get_partitions_by_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter928 in self.success:
-        iter928.write(oprot)
+      for iter921 in self.success:
+        iter921.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23714,11 +23482,11 @@ class alter_partitions_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype932, _size929) = iprot.readListBegin()
-          for _i933 in xrange(_size929):
-            _elem934 = Partition()
-            _elem934.read(iprot)
-            self.new_parts.append(_elem934)
+          (_etype925, _size922) = iprot.readListBegin()
+          for _i926 in xrange(_size922):
+            _elem927 = Partition()
+            _elem927.read(iprot)
+            self.new_parts.append(_elem927)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23743,8 +23511,8 @@ class alter_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter935 in self.new_parts:
-        iter935.write(oprot)
+      for iter928 in self.new_parts:
+        iter928.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23897,11 +23665,11 @@ class alter_partitions_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype939, _size936) = iprot.readListBegin()
-          for _i940 in xrange(_size936):
-            _elem941 = Partition()
-            _elem941.read(iprot)
-            self.new_parts.append(_elem941)
+          (_etype932, _size929) = iprot.readListBegin()
+          for _i933 in xrange(_size929):
+            _elem934 = Partition()
+            _elem934.read(iprot)
+            self.new_parts.append(_elem934)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23932,8 +23700,8 @@ class alter_partitions_with_environment_context_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter942 in self.new_parts:
-        iter942.write(oprot)
+      for iter935 in self.new_parts:
+        iter935.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -24277,10 +24045,10 @@ class rename_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype946, _size943) = iprot.readListBegin()
-          for _i947 in xrange(_size943):
-            _elem948 = iprot.readString()
-            self.part_vals.append(_elem948)
+          (_etype939, _size936) = iprot.readListBegin()
+          for _i940 in xrange(_size936):
+            _elem941 = iprot.readString()
+            self.part_vals.append(_elem941)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24311,8 +24079,8 @@ class rename_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter949 in self.part_vals:
-        oprot.writeString(iter949)
+      for iter942 in self.part_vals:
+        oprot.writeString(iter942)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.new_part is not None:
@@ -24454,10 +24222,10 @@ class partition_name_has_valid_characters_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype953, _size950) = iprot.readListBegin()
-          for _i954 in xrange(_size950):
-            _elem955 = iprot.readString()
-            self.part_vals.append(_elem955)
+          (_etype946, _size943) = iprot.readListBegin()
+          for _i947 in xrange(_size943):
+            _elem948 = iprot.readString()
+            self.part_vals.append(_elem948)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24479,8 +24247,8 @@ class partition_name_has_valid_characters_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter956 in self.part_vals:
-        oprot.writeString(iter956)
+      for iter949 in self.part_vals:
+        oprot.writeString(iter949)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.throw_exception is not None:
@@ -24838,10 +24606,10 @@ class partition_name_to_vals_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype960, _size957) = iprot.readListBegin()
-          for _i961 in xrange(_size957):
-            _elem962 = iprot.readString()
-            self.success.append(_elem962)
+          (_etype953, _size950) = iprot.readListBegin()
+          for _i954 in xrange(_size950):
+            _elem955 = iprot.readString()
+            self.success.append(_elem955)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24864,8 +24632,8 @@ class partition_name_to_vals_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter963 in self.success:
-        oprot.writeString(iter963)
+      for iter956 in self.success:
+        oprot.writeString(iter956)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -24989,11 +24757,11 @@ class partition_name_to_spec_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype965, _vtype966, _size964 ) = iprot.readMapBegin()
-          for _i968 in xrange(_size964):
-            _key969 = iprot.readString()
-            _val970 = iprot.readString()
-            self.success[_key969] = _val970
+          (_ktype958, _vtype959, _size957 ) = iprot.readMapBegin()
+          for _i961 in xrange(_size957):
+            _key962 = iprot.readString()
+            _val963 = iprot.readString()
+            self.success[_key962] = _val963
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -25016,9 +24784,9 @@ class partition_name_to_spec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
-      for kiter971,viter972 in self.success.items():
-        oprot.writeString(kiter971)
-        oprot.writeString(viter972)
+      for kiter964,viter965 in self.success.items():
+        oprot.writeString(kiter964)
+        oprot.writeString(viter965)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -25094,11 +24862,11 @@ class markPartitionForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype974, _vtype975, _size973 ) = iprot.readMapBegin()
-          for _i977 in xrange(_size973):
-            _key978 = iprot.readString()
-            _val979 = iprot.readString()
-            self.part_vals[_key978] = _val979
+          (_ktype967, _vtype968, _size966 ) = iprot.readMapBegin()
+          for _i970 in xrange(_size966):
+            _key971 = iprot.readString()
+            _val972 = iprot.readString()
+            self.part_vals[_key971] = _val972
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -25128,9 +24896,9 @@ class markPartitionForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter980,viter981 in self.part_vals.items():
-        oprot.writeString(kiter980)
-        oprot.writeString(viter981)
+      for kiter973,viter974 in self.part_vals.items():
+        oprot.writeString(kiter973)
+        oprot.writeString(viter974)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -25344,11 +25112,11 @@ class isPartitionMarkedForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype983, _vtype984, _size982 ) = iprot.readMapBegin()
-          for _i986 in xrange(_size982):
-            _key987 = iprot.readString()
-            _val988 = iprot.readString()
-            self.part_vals[_key987] = _val988
+          (_ktype976, _vtype977, _size975 ) = iprot.readMapBegin()
+          for _i979 in xrange(_size975):
+            _key980 = iprot.readString()
+            _val981 = iprot.readString()
+            self.part_vals[_key980] = _val981
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -25378,9 +25146,9 @@ class isPartitionMarkedForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter989,viter990 in self.part_vals.items():
-        oprot.writeString(kiter989)
-        oprot.writeString(viter990)
+      for kiter982,viter983 in self.part_vals.items():
+        oprot.writeString(kiter982)
+        oprot.writeString(viter983)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -26435,11 +26203,11 @@ class get_indexes_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype994, _size991) = iprot.readListBegin()
-          for _i995 in xrange(_size991):
-            _elem996 = Index()
-            _elem996.read(iprot)
-            self.success.append(_elem996)
+          (_etype987, _size984) = iprot.readListBegin()
+          for _i988 in xrange(_size984):
+            _elem989 = Index()
+            _elem989.read(iprot)
+            self.success.append(_elem989)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26468,8 +26236,8 @@ class get_indexes_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter997 in self.success:
-        iter997.write(oprot)
+      for iter990 in self.success:
+        iter990.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -26624,10 +26392,10 @@ class get_index_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1001, _size998) = iprot.readListBegin()
-          for _i1002 in xrange(_size998):
-            _elem1003 = iprot.readString()
-            self.success.append(_elem1003)
+          (_etype994, _size991) = iprot.readListBegin()
+          for _i995 in xrange(_size991):
+            _elem996 = iprot.readString()
+            self.success.append(_elem996)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26650,8 +26418,8 @@ class get_index_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1004 in self.success:
-        oprot.writeString(iter1004)
+      for iter997 in self.success:
+        oprot.writeString(iter997)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -29517,10 +29285,10 @@ class get_functions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1008, _size1005) = iprot.readListBegin()
-          for _i1009 in xrange(_size1005):
-            _elem1010 = iprot.readString()
-            self.success.append(_elem1010)
+          (_etype1001, _size998) = iprot.readListBegin()
+          for _i1002 in xrange(_size998):
+            _elem1003 = iprot.readString()
+            self.success.append(_elem1003)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -29543,8 +29311,8 @@ class get_functions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1011 in self.success:
-        oprot.writeString(iter1011)
+      for iter1004 in self.success:
+        oprot.writeString(iter1004)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -30232,10 +30000,10 @@ class get_role_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1015, _size1012) = iprot.readListBegin()
-          for _i1016 in xrange(_size1012):
-            _elem1017 = iprot.readString()
-            self.success.append(_elem1017)
+          (_etype1008, _size1005) = iprot.readListBegin()
+          for _i1009 in xrange(_size1005):
+            _elem1010 = iprot.readString()
+            self.success.append(_elem1010)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -30258,8 +30026,8 @@ class get_role_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1018 in self.success:
-        oprot.writeString(iter1018)
+      for iter1011 in self.success:
+        oprot.writeString(iter1011)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -30773,11 +30541,11 @@ class list_roles_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1022, _size1019) = iprot.readListBegin()
-          for _i1023 in xrange(_size1019):
-            _elem1024 = Role()
-            _elem1024.read(iprot)
-            self.success.append(_elem1024)
+          (_etype1015, _size1012) = iprot.readListBegin()
+          for _i1016 in xrange(_size1012):
+            _elem1017 = Role()
+            _elem1017.read(iprot)
+            self.success.append(_elem1017)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -30800,8 +30568,8 @@ class list_roles_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1025 in self.success:
-        iter1025.write(oprot)
+      for iter1018 in self.success:
+        iter1018.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -31310,10 +31078,10 @@ class get_privilege_set_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1029, _size1026) = iprot.readListBegin()
-          for _i1030 in xrange(_size1026):
-            _elem1031 = iprot.readString()
-            self.group_names.append(_elem1031)
+          (_etype1022, _size1019) = iprot.readListBegin()
+          for _i1023 in xrange(_size1019):
+            _elem1024 = iprot.readString()
+            self.group_names.append(_elem1024)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -31338,8 +31106,8 @@ class get_privilege_set_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1032 in self.group_names:
-        oprot.writeString(iter1032)
+      for iter1025 in self.group_names:
+        oprot.writeString(iter1025)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -31566,11 +31334,11 @@ class list_privileges_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1036, _size1033) = iprot.readListBegin()
-          for _i1037 in xrange(_size1033):
-            _elem1038 = HiveObjectPrivilege()
-            _elem1038.read(iprot)
-            self.success.append(_elem1038)
+          (_etype1029, _size1026) = iprot.readListBegin()
+          for _i1030 in xrange(_size1026):
+            _elem1031 = HiveObjectPrivilege()
+            _elem1031.read(iprot)
+            self.success.append(_elem1031)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -31593,8 +31361,8 @@ class list_privileges_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1039 in self.success:
-        iter1039.write(oprot)
+      for iter1032 in self.success:
+        iter1032.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -32092,10 +31860,10 @@ class set_ugi_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1043, _size1040) = iprot.readListBegin()
-          for _i1044 in xrange(_size1040):
-            _elem1045 = iprot.readString()
-            self.group_names.append(_elem1045)
+          (_etype1036, _size1033) = iprot.readListBegin()
+          for _i1037 in xrange(_size1033):
+            _elem1038 = iprot.readString()
+            self.group_names.append(_elem1038)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -32116,8 +31884,8 @@ class set_ugi_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1046 in self.group_names:
-        oprot.writeString(iter1046)
+      for iter1039 in self.group_names:
+        oprot.writeString(iter1039)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -32172,10 +31940,10 @@ class set_ugi_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1050, _size1047) = iprot.readListBegin()
-          for _i1051 in xrange(_size1047):
-            _elem1052 = iprot.readString()
-            self.success.append(_elem1052)
+          (_etype1043, _size1040) = iprot.readListBegin()
+          for _i1044 in xrange(_size1040):
+            _elem1045 = iprot.readString()
+            self.success.append(_elem1045)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -32198,8 +31966,8 @@ class set_ugi_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1053 in self.success:
-        oprot.writeString(iter1053)
+      for iter1046 in self.success:
+        oprot.writeString(iter1046)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -33131,10 +32899,10 @@ class get_all_token_identifiers_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1057, _size1054) = iprot.readListBegin()
-          for _i1058 in xrange(_size1054):
-            _elem1059 = iprot.readString()
-            self.success.append(_elem1059)
+          (_etype1050, _size1047) = iprot.readListBegin()
+          for _i1051 in xrange(_size1047):
+            _elem1052 = iprot.readString()
+            self.success.append(_elem1052)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -33151,8 +32919,8 @@ class get_all_token_identifiers_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1060 in self.success:
-        oprot.writeString(iter1060)
+      for iter1053 in self.success:
+        oprot.writeString(iter1053)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -33679,10 +33447,10 @@ class get_master_keys_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1064, _size1061) = iprot.readListBegin()
-          for _i1065 in xrange(_size1061):
-            _elem1066 = iprot.readString()
-            self.success.append(_elem1066)
+          (_etype1057, _size1054) = iprot.readListBegin()
+          for _i1058 in xrange(_size1054):
+            _elem1059 = iprot.readString()
+            self.success.append(_elem1059)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -33699,8 +33467,8 @@ class get_master_keys_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1067 in self.success:
-        oprot.writeString(iter1067)
+      for iter1060 in self.success:
+        oprot.writeString(iter1060)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 3d20125..9ac1974 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -8344,22 +8344,19 @@ class GetOpenTxnsResponse:
    - txn_high_water_mark
    - open_txns
    - min_open_txn
-   - abortedBits
   """
 
   thrift_spec = (
     None, # 0
     (1, TType.I64, 'txn_high_water_mark', None, None, ), # 1
-    (2, TType.LIST, 'open_txns', (TType.I64,None), None, ), # 2
+    (2, TType.SET, 'open_txns', (TType.I64,None), None, ), # 2
     (3, TType.I64, 'min_open_txn', None, None, ), # 3
-    (4, TType.STRING, 'abortedBits', None, None, ), # 4
   )
 
-  def __init__(self, txn_high_water_mark=None, open_txns=None, min_open_txn=None, abortedBits=None,):
+  def __init__(self, txn_high_water_mark=None, open_txns=None, min_open_txn=None,):
     self.txn_high_water_mark = txn_high_water_mark
     self.open_txns = open_txns
     self.min_open_txn = min_open_txn
-    self.abortedBits = abortedBits
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -8376,13 +8373,13 @@ class GetOpenTxnsResponse:
         else:
           iprot.skip(ftype)
       elif fid == 2:
-        if ftype == TType.LIST:
-          self.open_txns = []
-          (_etype416, _size413) = iprot.readListBegin()
+        if ftype == TType.SET:
+          self.open_txns = set()
+          (_etype416, _size413) = iprot.readSetBegin()
           for _i417 in xrange(_size413):
             _elem418 = iprot.readI64()
-            self.open_txns.append(_elem418)
-          iprot.readListEnd()
+            self.open_txns.add(_elem418)
+          iprot.readSetEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
@@ -8390,11 +8387,6 @@ class GetOpenTxnsResponse:
           self.min_open_txn = iprot.readI64()
         else:
           iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.STRING:
-          self.abortedBits = iprot.readString()
-        else:
-          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -8410,20 +8402,16 @@ class GetOpenTxnsResponse:
       oprot.writeI64(self.txn_high_water_mark)
       oprot.writeFieldEnd()
     if self.open_txns is not None:
-      oprot.writeFieldBegin('open_txns', TType.LIST, 2)
-      oprot.writeListBegin(TType.I64, len(self.open_txns))
+      oprot.writeFieldBegin('open_txns', TType.SET, 2)
+      oprot.writeSetBegin(TType.I64, len(self.open_txns))
       for iter419 in self.open_txns:
         oprot.writeI64(iter419)
-      oprot.writeListEnd()
+      oprot.writeSetEnd()
       oprot.writeFieldEnd()
     if self.min_open_txn is not None:
       oprot.writeFieldBegin('min_open_txn', TType.I64, 3)
       oprot.writeI64(self.min_open_txn)
       oprot.writeFieldEnd()
-    if self.abortedBits is not None:
-      oprot.writeFieldBegin('abortedBits', TType.STRING, 4)
-      oprot.writeString(self.abortedBits)
-      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -8432,8 +8420,6 @@ class GetOpenTxnsResponse:
       raise TProtocol.TProtocolException(message='Required field txn_high_water_mark is unset!')
     if self.open_txns is None:
       raise TProtocol.TProtocolException(message='Required field open_txns is unset!')
-    if self.abortedBits is None:
-      raise TProtocol.TProtocolException(message='Required field abortedBits is unset!')
     return
 
 
@@ -8442,7 +8428,6 @@ class GetOpenTxnsResponse:
     value = (value * 31) ^ hash(self.txn_high_water_mark)
     value = (value * 31) ^ hash(self.open_txns)
     value = (value * 31) ^ hash(self.min_open_txn)
-    value = (value * 31) ^ hash(self.abortedBits)
     return value
 
   def __repr__(self):
@@ -11200,20 +11185,17 @@ class CurrentNotificationEventId:
 class InsertEventRequestData:
   """
   Attributes:
-   - replace
    - filesAdded
    - filesAddedChecksum
   """
 
   thrift_spec = (
     None, # 0
-    (1, TType.BOOL, 'replace', None, None, ), # 1
-    (2, TType.LIST, 'filesAdded', (TType.STRING,None), None, ), # 2
-    (3, TType.LIST, 'filesAddedChecksum', (TType.STRING,None), None, ), # 3
+    (1, TType.LIST, 'filesAdded', (TType.STRING,None), None, ), # 1
+    (2, TType.LIST, 'filesAddedChecksum', (TType.STRING,None), None, ), # 2
   )
 
-  def __init__(self, replace=None, filesAdded=None, filesAddedChecksum=None,):
-    self.replace = replace
+  def __init__(self, filesAdded=None, filesAddedChecksum=None,):
     self.filesAdded = filesAdded
     self.filesAddedChecksum = filesAddedChecksum
 
@@ -11227,11 +11209,6 @@ class InsertEventRequestData:
       if ftype == TType.STOP:
         break
       if fid == 1:
-        if ftype == TType.BOOL:
-          self.replace = iprot.readBool()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
         if ftype == TType.LIST:
           self.filesAdded = []
           (_etype495, _size492) = iprot.readListBegin()
@@ -11241,7 +11218,7 @@ class InsertEventRequestData:
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
-      elif fid == 3:
+      elif fid == 2:
         if ftype == TType.LIST:
           self.filesAddedChecksum = []
           (_etype501, _size498) = iprot.readListBegin()
@@ -11261,19 +11238,15 @@ class InsertEventRequestData:
       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
       return
     oprot.writeStructBegin('InsertEventRequestData')
-    if self.replace is not None:
-      oprot.writeFieldBegin('replace', TType.BOOL, 1)
-      oprot.writeBool(self.replace)
-      oprot.writeFieldEnd()
     if self.filesAdded is not None:
-      oprot.writeFieldBegin('filesAdded', TType.LIST, 2)
+      oprot.writeFieldBegin('filesAdded', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.filesAdded))
       for iter504 in self.filesAdded:
         oprot.writeString(iter504)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.filesAddedChecksum is not None:
-      oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 3)
+      oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum))
       for iter505 in self.filesAddedChecksum:
         oprot.writeString(iter505)
@@ -11290,7 +11263,6 @@ class InsertEventRequestData:
 
   def __hash__(self):
     value = 17
-    value = (value * 31) ^ hash(self.replace)
     value = (value * 31) ^ hash(self.filesAdded)
     value = (value * 31) ^ hash(self.filesAddedChecksum)
     return value

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 5e18f9b..da24113 100644
--- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -1861,13 +1861,11 @@ class GetOpenTxnsResponse
   TXN_HIGH_WATER_MARK = 1
   OPEN_TXNS = 2
   MIN_OPEN_TXN = 3
-  ABORTEDBITS = 4
 
   FIELDS = {
     TXN_HIGH_WATER_MARK => {:type => ::Thrift::Types::I64, :name => 'txn_high_water_mark'},
-    OPEN_TXNS => {:type => ::Thrift::Types::LIST, :name => 'open_txns', :element => {:type => ::Thrift::Types::I64}},
-    MIN_OPEN_TXN => {:type => ::Thrift::Types::I64, :name => 'min_open_txn', :optional => true},
-    ABORTEDBITS => {:type => ::Thrift::Types::STRING, :name => 'abortedBits', :binary => true}
+    OPEN_TXNS => {:type => ::Thrift::Types::SET, :name => 'open_txns', :element => {:type => ::Thrift::Types::I64}},
+    MIN_OPEN_TXN => {:type => ::Thrift::Types::I64, :name => 'min_open_txn', :optional => true}
   }
 
   def struct_fields; FIELDS; end
@@ -1875,7 +1873,6 @@ class GetOpenTxnsResponse
   def validate
     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txn_high_water_mark is unset!') unless @txn_high_water_mark
     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field open_txns is unset!') unless @open_txns
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field abortedBits is unset!') unless @abortedBits
   end
 
   ::Thrift::Struct.generate_accessors self
@@ -2511,12 +2508,10 @@ end
 
 class InsertEventRequestData
   include ::Thrift::Struct, ::Thrift::Struct_Union
-  REPLACE = 1
-  FILESADDED = 2
-  FILESADDEDCHECKSUM = 3
+  FILESADDED = 1
+  FILESADDEDCHECKSUM = 2
 
   FIELDS = {
-    REPLACE => {:type => ::Thrift::Types::BOOL, :name => 'replace', :optional => true},
     FILESADDED => {:type => ::Thrift::Types::LIST, :name => 'filesAdded', :element => {:type => ::Thrift::Types::STRING}},
     FILESADDEDCHECKSUM => {:type => ::Thrift::Types::LIST, :name => 'filesAddedChecksum', :element => {:type => ::Thrift::Types::STRING}, :optional => true}
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index 36be2e8..97f7fc0 100644
--- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -416,21 +416,6 @@ module ThriftHiveMetastore
       return
     end
 
-    def truncate_table(dbName, tableName, partNames)
-      send_truncate_table(dbName, tableName, partNames)
-      recv_truncate_table()
-    end
-
-    def send_truncate_table(dbName, tableName, partNames)
-      send_message('truncate_table', Truncate_table_args, :dbName => dbName, :tableName => tableName, :partNames => partNames)
-    end
-
-    def recv_truncate_table()
-      result = receive_message(Truncate_table_result)
-      raise result.o1 unless result.o1.nil?
-      return
-    end
-
     def get_tables(db_name, pattern)
       send_get_tables(db_name, pattern)
       return recv_get_tables()
@@ -2955,17 +2940,6 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'drop_table_with_environment_context', seqid)
     end
 
-    def process_truncate_table(seqid, iprot, oprot)
-      args = read_args(iprot, Truncate_table_args)
-      result = Truncate_table_result.new()
-      begin
-        @handler.truncate_table(args.dbName, args.tableName, args.partNames)
-      rescue ::MetaException => o1
-        result.o1 = o1
-      end
-      write_result(result, oprot, 'truncate_table', seqid)
-    end
-
     def process_get_tables(seqid, iprot, oprot)
       args = read_args(iprot, Get_tables_args)
       result = Get_tables_result.new()
@@ -5440,42 +5414,6 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
-  class Truncate_table_args
-    include ::Thrift::Struct, ::Thrift::Struct_Union
-    DBNAME = 1
-    TABLENAME = 2
-    PARTNAMES = 3
-
-    FIELDS = {
-      DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
-      TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
-      PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}}
-    }
-
-    def struct_fields; FIELDS; end
-
-    def validate
-    end
-
-    ::Thrift::Struct.generate_accessors self
-  end
-
-  class Truncate_table_result
-    include ::Thrift::Struct, ::Thrift::Struct_Union
-    O1 = 1
-
-    FIELDS = {
-      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException}
-    }
-
-    def struct_fields; FIELDS; end
-
-    def validate
-    end
-
-    ::Thrift::Struct.generate_accessors self
-  end
-
   class Get_tables_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1